code_text
stringlengths
604
999k
repo_name
stringlengths
4
100
file_path
stringlengths
4
873
language
stringclasses
23 values
license
stringclasses
15 values
size
int32
1.02k
999k
#include "amd64_edac.h" #define EDAC_DCT_ATTR_SHOW(reg) \ static ssize_t amd64_##reg##_show(struct device *dev, \ struct device_attribute *mattr, \ char *data) \ { \ struct mem_ctl_info *mci = to_mci(dev); \ struct amd64_pvt *pvt = mci->pvt_info; \ return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \ } EDAC_DCT_ATTR_SHOW(dhar); EDAC_DCT_ATTR_SHOW(dbam0); EDAC_DCT_ATTR_SHOW(top_mem); EDAC_DCT_ATTR_SHOW(top_mem2); static ssize_t amd64_hole_show(struct device *dev, struct device_attribute *mattr, char *data) { struct mem_ctl_info *mci = to_mci(dev); u64 hole_base = 0; u64 hole_offset = 0; u64 hole_size = 0; amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size); return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset, hole_size); } /* * update NUM_DBG_ATTRS in case you add new members */ static DEVICE_ATTR(dhar, S_IRUGO, amd64_dhar_show, NULL); static DEVICE_ATTR(dbam, S_IRUGO, amd64_dbam0_show, NULL); static DEVICE_ATTR(topmem, S_IRUGO, amd64_top_mem_show, NULL); static DEVICE_ATTR(topmem2, S_IRUGO, amd64_top_mem2_show, NULL); static DEVICE_ATTR(dram_hole, S_IRUGO, amd64_hole_show, NULL); int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci) { int rc; rc = device_create_file(&mci->dev, &dev_attr_dhar); if (rc < 0) return rc; rc = device_create_file(&mci->dev, &dev_attr_dbam); if (rc < 0) return rc; rc = device_create_file(&mci->dev, &dev_attr_topmem); if (rc < 0) return rc; rc = device_create_file(&mci->dev, &dev_attr_topmem2); if (rc < 0) return rc; rc = device_create_file(&mci->dev, &dev_attr_dram_hole); if (rc < 0) return rc; return 0; } void amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci) { device_remove_file(&mci->dev, &dev_attr_dhar); device_remove_file(&mci->dev, &dev_attr_dbam); device_remove_file(&mci->dev, &dev_attr_topmem); device_remove_file(&mci->dev, &dev_attr_topmem2); device_remove_file(&mci->dev, &dev_attr_dram_hole); }
XMelancholy/android_kernel_huawei_h60
drivers/edac/amd64_edac_dbg.c
C
gpl-2.0
2,023
/* * DMA mapping support for platforms lacking IOMMUs. * * Copyright (C) 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/dma-mapping.h> #include <linux/io.h> static dma_addr_t nommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { dma_addr_t addr = page_to_phys(page) + offset; WARN_ON(size == 0); dma_cache_sync(dev, page_address(page) + offset, size, dir); return addr; } static int nommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *s; int i; WARN_ON(nents == 0 || sg[0].length == 0); for_each_sg(sg, s, nents, i) { BUG_ON(!sg_page(s)); dma_cache_sync(dev, sg_virt(s), s->length, dir); s->dma_address = sg_phys(s); s->dma_length = s->length; } return nents; } #ifdef CONFIG_DMA_NONCOHERENT static void nommu_sync_single(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { dma_cache_sync(dev, phys_to_virt(addr), size, dir); } static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { struct scatterlist *s; int i; for_each_sg(sg, s, nelems, i) dma_cache_sync(dev, sg_virt(s), s->length, dir); } #endif struct dma_map_ops nommu_dma_ops = { .alloc_coherent = dma_generic_alloc_coherent, .free_coherent = dma_generic_free_coherent, .map_page = nommu_map_page, .map_sg = nommu_map_sg, #ifdef CONFIG_DMA_NONCOHERENT .sync_single_for_device = nommu_sync_single, .sync_sg_for_device = nommu_sync_sg, #endif .is_phys = 1, }; void __init no_iommu_init(void) { if (dma_ops) return; dma_ops = &nommu_dma_ops; }
NovaFusion/android_kernel_samsung_ux500
arch/sh/kernel/dma-nommu.c
C
gpl-2.0
1,928
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <media/rc-map.h> static struct rc_map_table ue_rf4ce[] = { { 0x0a, KEY_SETUP }, { 0x6b, KEY_POWER }, { 0x00, KEY_OK }, { 0x03, KEY_LEFT }, { 0x04, KEY_RIGHT }, { 0x01, KEY_UP }, { 0x02, KEY_DOWN }, { 0x53, KEY_HOMEPAGE }, { 0x0d, KEY_EXIT }, { 0x72, KEY_TV }, { 0x73, KEY_VIDEO }, { 0x74, KEY_PC }, { 0x71, KEY_AUX }, { 0x45, KEY_STOP }, { 0x0b, KEY_LIST }, { 0x47, KEY_RECORD }, { 0x48, KEY_REWIND }, { 0x44, KEY_PLAY }, { 0x49, KEY_FASTFORWARD }, { 0x4c, KEY_BACK }, { 0x46, KEY_PAUSE }, { 0x4b, KEY_NEXT }, { 0x41, KEY_VOLUMEUP }, { 0x42, KEY_VOLUMEDOWN }, { 0x32, KEY_LAST }, { 0x43, KEY_MUTE }, { 0x30, KEY_CHANNELUP }, { 0x31, KEY_CHANNELDOWN }, { 0x20, KEY_0 }, { 0x21, KEY_1 }, { 0x22, KEY_2 }, { 0x23, KEY_3 }, { 0x24, KEY_4 }, { 0x25, KEY_5 }, { 0x26, KEY_6 }, { 0x27, KEY_7 }, { 0x28, KEY_8 }, { 0x29, KEY_9 }, { 0x34, KEY_TV2 }, { 0x2b, KEY_ENTER }, { 0x35, KEY_INFO }, { 0x09, KEY_MENU }, }; static struct rc_map_list ue_rf4ce_map = { .map = { .scan = ue_rf4ce, .size = ARRAY_SIZE(ue_rf4ce), .rc_type = RC_TYPE_OTHER, .name = RC_MAP_UE_RF4CE, } }; static int __init init_rc_map_ue_rf4ce(void) { return rc_map_register(&ue_rf4ce_map); } static void __exit exit_rc_map_ue_rf4ce(void) { rc_map_unregister(&ue_rf4ce_map); } module_init(init_rc_map_ue_rf4ce) module_exit(exit_rc_map_ue_rf4ce) MODULE_DESCRIPTION("UE RF4CE Remote Keymap "); MODULE_LICENSE("GPL v2");
MotoG-2013/android_kernel_motorola_msm8226
drivers/media/rc/keymaps/rc-ue-rf4ce.c
C
gpl-2.0
2,008
/* * Support for dynamic reconfiguration for PCI, Memory, and CPU * Hotplug and Dynamic Logical Partitioning on RPA platforms. * * Copyright (C) 2009 Nathan Fontenot * Copyright (C) 2009 IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/kref.h> #include <linux/notifier.h> #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <linux/cpu.h> #include <linux/slab.h> #include "offline_states.h" #include <asm/prom.h> #include <asm/machdep.h> #include <asm/uaccess.h> #include <asm/rtas.h> #include <asm/pSeries_reconfig.h> struct cc_workarea { u32 drc_index; u32 zero; u32 name_offset; u32 prop_length; u32 prop_offset; }; void dlpar_free_cc_property(struct property *prop) { kfree(prop->name); kfree(prop->value); kfree(prop); } static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa) { struct property *prop; char *name; char *value; prop = kzalloc(sizeof(*prop), GFP_KERNEL); if (!prop) return NULL; name = (char *)ccwa + ccwa->name_offset; prop->name = kstrdup(name, GFP_KERNEL); prop->length = ccwa->prop_length; value = (char *)ccwa + ccwa->prop_offset; prop->value = kmemdup(value, prop->length, GFP_KERNEL); if (!prop->value) { dlpar_free_cc_property(prop); return NULL; } return prop; } static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa) { struct device_node *dn; char *name; dn = kzalloc(sizeof(*dn), GFP_KERNEL); if (!dn) return NULL; /* The configure connector reported name does not contain a * preceding '/', so we allocate a buffer large enough to * prepend this to the full_name. */ name = (char *)ccwa + ccwa->name_offset; dn->full_name = kasprintf(GFP_KERNEL, "/%s", name); if (!dn->full_name) { kfree(dn); return NULL; } return dn; } static void dlpar_free_one_cc_node(struct device_node *dn) { struct property *prop; while (dn->properties) { prop = dn->properties; dn->properties = prop->next; dlpar_free_cc_property(prop); } kfree(dn->full_name); kfree(dn); } void dlpar_free_cc_nodes(struct device_node *dn) { if (dn->child) dlpar_free_cc_nodes(dn->child); if (dn->sibling) dlpar_free_cc_nodes(dn->sibling); dlpar_free_one_cc_node(dn); } #define COMPLETE 0 #define NEXT_SIBLING 1 #define NEXT_CHILD 2 #define NEXT_PROPERTY 3 #define PREV_PARENT 4 #define MORE_MEMORY 5 #define CALL_AGAIN -2 #define ERR_CFG_USE -9003 struct device_node *dlpar_configure_connector(u32 drc_index) { struct device_node *dn; struct device_node *first_dn = NULL; struct device_node *last_dn = NULL; struct property *property; struct property *last_property = NULL; struct cc_workarea *ccwa; char *data_buf; int cc_token; int rc = -1; cc_token = rtas_token("ibm,configure-connector"); if (cc_token == RTAS_UNKNOWN_SERVICE) return NULL; data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); if (!data_buf) return NULL; ccwa = (struct cc_workarea *)&data_buf[0]; ccwa->drc_index = drc_index; ccwa->zero = 0; do { /* Since we release the rtas_data_buf lock between configure * connector calls we want to re-populate the rtas_data_buffer * with the contents of the previous call. */ spin_lock(&rtas_data_buf_lock); memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE); rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); spin_unlock(&rtas_data_buf_lock); switch (rc) { case COMPLETE: break; case NEXT_SIBLING: dn = dlpar_parse_cc_node(ccwa); if (!dn) goto cc_error; dn->parent = last_dn->parent; last_dn->sibling = dn; last_dn = dn; break; case NEXT_CHILD: dn = dlpar_parse_cc_node(ccwa); if (!dn) goto cc_error; if (!first_dn) first_dn = dn; else { dn->parent = last_dn; if (last_dn) last_dn->child = dn; } last_dn = dn; break; case NEXT_PROPERTY: property = dlpar_parse_cc_property(ccwa); if (!property) goto cc_error; if (!last_dn->properties) last_dn->properties = property; else last_property->next = property; last_property = property; break; case PREV_PARENT: last_dn = last_dn->parent; break; case CALL_AGAIN: break; case MORE_MEMORY: case ERR_CFG_USE: default: printk(KERN_ERR "Unexpected Error (%d) " "returned from configure-connector\n", rc); goto cc_error; } } while (rc); cc_error: kfree(data_buf); if (rc) { if (first_dn) dlpar_free_cc_nodes(first_dn); return NULL; } return first_dn; } static struct device_node *derive_parent(const char *path) { struct device_node *parent; char *last_slash; last_slash = strrchr(path, '/'); if (last_slash == path) { parent = of_find_node_by_path("/"); } else { char *parent_path; int parent_path_len = last_slash - path + 1; parent_path = kmalloc(parent_path_len, GFP_KERNEL); if (!parent_path) return NULL; strlcpy(parent_path, path, parent_path_len); parent = of_find_node_by_path(parent_path); kfree(parent_path); } return parent; } int dlpar_attach_node(struct device_node *dn) { #ifdef CONFIG_PROC_DEVICETREE struct proc_dir_entry *ent; #endif int rc; of_node_set_flag(dn, OF_DYNAMIC); kref_init(&dn->kref); dn->parent = derive_parent(dn->full_name); if (!dn->parent) return -ENOMEM; rc = pSeries_reconfig_notify(PSERIES_RECONFIG_ADD, dn); if (rc) { printk(KERN_ERR "Failed to add device node %s\n", dn->full_name); return rc; } of_attach_node(dn); #ifdef CONFIG_PROC_DEVICETREE ent = proc_mkdir(strrchr(dn->full_name, '/') + 1, dn->parent->pde); if (ent) proc_device_tree_add_node(dn, ent); #endif of_node_put(dn->parent); return 0; } int dlpar_detach_node(struct device_node *dn) { #ifdef CONFIG_PROC_DEVICETREE struct device_node *parent = dn->parent; struct property *prop = dn->properties; while (prop) { remove_proc_entry(prop->name, dn->pde); prop = prop->next; } if (dn->pde) remove_proc_entry(dn->pde->name, parent->pde); #endif pSeries_reconfig_notify(PSERIES_RECONFIG_REMOVE, dn); of_detach_node(dn); of_node_put(dn); /* Must decrement the refcount */ return 0; } #define DR_ENTITY_SENSE 9003 #define DR_ENTITY_PRESENT 1 #define DR_ENTITY_UNUSABLE 2 #define ALLOCATION_STATE 9003 #define ALLOC_UNUSABLE 0 #define ALLOC_USABLE 1 #define ISOLATION_STATE 9001 #define ISOLATE 0 #define UNISOLATE 1 int dlpar_acquire_drc(u32 drc_index) { int dr_status, rc; rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, DR_ENTITY_SENSE, drc_index); if (rc || dr_status != DR_ENTITY_UNUSABLE) return -1; rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE); if (rc) return rc; rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); if (rc) { rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); return rc; } return 0; } int dlpar_release_drc(u32 drc_index) { int dr_status, rc; rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, DR_ENTITY_SENSE, drc_index); if (rc || dr_status != DR_ENTITY_PRESENT) return -1; rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE); if (rc) return rc; rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); if (rc) { rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); return rc; } return 0; } #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE static int dlpar_online_cpu(struct device_node *dn) { int rc = 0; unsigned int cpu; int len, nthreads, i; const u32 *intserv; intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); if (!intserv) return -EINVAL; nthreads = len / sizeof(u32); cpu_maps_update_begin(); for (i = 0; i < nthreads; i++) { for_each_present_cpu(cpu) { if (get_hard_smp_processor_id(cpu) != intserv[i]) continue; BUG_ON(get_cpu_current_state(cpu) != CPU_STATE_OFFLINE); cpu_maps_update_done(); rc = cpu_up(cpu); if (rc) goto out; cpu_maps_update_begin(); break; } if (cpu == num_possible_cpus()) printk(KERN_WARNING "Could not find cpu to online " "with physical id 0x%x\n", intserv[i]); } cpu_maps_update_done(); out: return rc; } static ssize_t dlpar_cpu_probe(const char *buf, size_t count) { struct device_node *dn; unsigned long drc_index; char *cpu_name; int rc; cpu_hotplug_driver_lock(); rc = strict_strtoul(buf, 0, &drc_index); if (rc) { rc = -EINVAL; goto out; } dn = dlpar_configure_connector(drc_index); if (!dn) { rc = -EINVAL; goto out; } /* configure-connector reports cpus as living in the base * directory of the device tree. CPUs actually live in the * cpus directory so we need to fixup the full_name. */ cpu_name = kasprintf(GFP_KERNEL, "/cpus%s", dn->full_name); if (!cpu_name) { dlpar_free_cc_nodes(dn); rc = -ENOMEM; goto out; } kfree(dn->full_name); dn->full_name = cpu_name; rc = dlpar_acquire_drc(drc_index); if (rc) { dlpar_free_cc_nodes(dn); rc = -EINVAL; goto out; } rc = dlpar_attach_node(dn); if (rc) { dlpar_release_drc(drc_index); dlpar_free_cc_nodes(dn); goto out; } rc = dlpar_online_cpu(dn); out: cpu_hotplug_driver_unlock(); return rc ? rc : count; } static int dlpar_offline_cpu(struct device_node *dn) { int rc = 0; unsigned int cpu; int len, nthreads, i; const u32 *intserv; intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); if (!intserv) return -EINVAL; nthreads = len / sizeof(u32); cpu_maps_update_begin(); for (i = 0; i < nthreads; i++) { for_each_present_cpu(cpu) { if (get_hard_smp_processor_id(cpu) != intserv[i]) continue; if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) break; if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); cpu_maps_update_done(); rc = cpu_down(cpu); if (rc) goto out; cpu_maps_update_begin(); break; } /* * The cpu is in CPU_STATE_INACTIVE. * Upgrade it's state to CPU_STATE_OFFLINE. */ set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); BUG_ON(plpar_hcall_norets(H_PROD, intserv[i]) != H_SUCCESS); __cpu_die(cpu); break; } if (cpu == num_possible_cpus()) printk(KERN_WARNING "Could not find cpu to offline " "with physical id 0x%x\n", intserv[i]); } cpu_maps_update_done(); out: return rc; } static ssize_t dlpar_cpu_release(const char *buf, size_t count) { struct device_node *dn; const u32 *drc_index; int rc; dn = of_find_node_by_path(buf); if (!dn) return -EINVAL; drc_index = of_get_property(dn, "ibm,my-drc-index", NULL); if (!drc_index) { of_node_put(dn); return -EINVAL; } cpu_hotplug_driver_lock(); rc = dlpar_offline_cpu(dn); if (rc) { of_node_put(dn); rc = -EINVAL; goto out; } rc = dlpar_release_drc(*drc_index); if (rc) { of_node_put(dn); goto out; } rc = dlpar_detach_node(dn); if (rc) { dlpar_acquire_drc(*drc_index); goto out; } of_node_put(dn); out: cpu_hotplug_driver_unlock(); return rc ? rc : count; } static int __init pseries_dlpar_init(void) { ppc_md.cpu_probe = dlpar_cpu_probe; ppc_md.cpu_release = dlpar_cpu_release; return 0; } machine_device_initcall(pseries, pseries_dlpar_init); #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
Compunctus/android_kernel_lge_g3
arch/powerpc/platforms/pseries/dlpar.c
C
gpl-2.0
11,515
/* * decompress.c * * Detect the decompression method based on magic number */ #include <linux/decompress/generic.h> #include <linux/decompress/bunzip2.h> #include <linux/decompress/unlzma.h> #include <linux/decompress/unxz.h> #include <linux/decompress/inflate.h> #include <linux/decompress/unlzo.h> #include <linux/types.h> #include <linux/string.h> #ifndef CONFIG_DECOMPRESS_GZIP # define gunzip NULL #endif #ifndef CONFIG_DECOMPRESS_BZIP2 # define bunzip2 NULL #endif #ifndef CONFIG_DECOMPRESS_LZMA # define unlzma NULL #endif #ifndef CONFIG_DECOMPRESS_XZ # define unxz NULL #endif #ifndef CONFIG_DECOMPRESS_LZO # define unlzo NULL #endif static const struct compress_format { unsigned char magic[2]; const char *name; decompress_fn decompressor; } compressed_formats[] = { { {037, 0213}, "gzip", gunzip }, { {037, 0236}, "gzip", gunzip }, { {0x42, 0x5a}, "bzip2", bunzip2 }, { {0x5d, 0x00}, "lzma", unlzma }, { {0xfd, 0x37}, "xz", unxz }, { {0x89, 0x4c}, "lzo", unlzo }, { {0, 0}, NULL, NULL } }; decompress_fn decompress_method(const unsigned char *inbuf, int len, const char **name) { const struct compress_format *cf; if (len < 2) return NULL; /* Need at least this much... */ for (cf = compressed_formats; cf->name; cf++) { if (!memcmp(inbuf, cf->magic, 2)) break; } if (name) *name = cf->name; return cf->decompressor; }
UniqueDroid/lge-kernel-p880
lib/decompress.c
C
gpl-2.0
1,373
/* * linux/arch/arm/mach-pxa/generic.c * * Author: Nicolas Pitre * Created: Jun 15, 2001 * Copyright: MontaVista Software Inc. * * Code common to all PXA machines. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Since this file should be linked before any other machine specific file, * the __initcall() here will be executed first. This serves as default * initialization stuff for PXA machines which can be overridden later if * need be. */ #include <linux/gpio.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <mach/hardware.h> #include <asm/mach/map.h> #include <asm/mach-types.h> #include <mach/reset.h> #include <mach/smemc.h> #include <mach/pxa3xx-regs.h> #include "generic.h" void clear_reset_status(unsigned int mask) { if (cpu_is_pxa2xx()) pxa2xx_clear_reset_status(mask); else { /* RESET_STATUS_* has a 1:1 mapping with ARSR */ ARSR = mask; } } unsigned long get_clock_tick_rate(void) { unsigned long clock_tick_rate; if (cpu_is_pxa25x()) clock_tick_rate = 3686400; else if (machine_is_mainstone()) clock_tick_rate = 3249600; else clock_tick_rate = 3250000; return clock_tick_rate; } EXPORT_SYMBOL(get_clock_tick_rate); /* * Get the clock frequency as reflected by CCCR and the turbo flag. * We assume these values have been applied via a fcs. * If info is not 0 we also display the current settings. */ unsigned int get_clk_frequency_khz(int info) { if (cpu_is_pxa25x()) return pxa25x_get_clk_frequency_khz(info); else if (cpu_is_pxa27x()) return pxa27x_get_clk_frequency_khz(info); return 0; } EXPORT_SYMBOL(get_clk_frequency_khz); /* * Intel PXA2xx internal register mapping. * * Note: virtual 0xfffe0000-0xffffffff is reserved for the vector table * and cache flush area. */ static struct map_desc common_io_desc[] __initdata = { { /* Devs */ .virtual = 0xf2000000, .pfn = __phys_to_pfn(0x40000000), .length = 0x02000000, .type = MT_DEVICE }, { /* UNCACHED_PHYS_0 */ .virtual = 0xff000000, .pfn = __phys_to_pfn(0x00000000), .length = 0x00100000, .type = MT_DEVICE } }; void __init pxa_map_io(void) { iotable_init(ARRAY_AND_SIZE(common_io_desc)); }
lgoptimusdev/lge-kernel-lproj
arch/arm/mach-pxa/generic.c
C
gpl-2.0
2,347
/* * arch/arm/kernel/kprobes-test-thumb.c * * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include "kprobes-test.h" #define TEST_ISA "16" #define DONT_TEST_IN_ITBLOCK(tests) \ kprobe_test_flags |= TEST_FLAG_NO_ITBLOCK; \ tests \ kprobe_test_flags &= ~TEST_FLAG_NO_ITBLOCK; #define CONDITION_INSTRUCTIONS(cc_pos, tests) \ kprobe_test_cc_position = cc_pos; \ DONT_TEST_IN_ITBLOCK(tests) \ kprobe_test_cc_position = 0; #define TEST_ITBLOCK(code) \ kprobe_test_flags |= TEST_FLAG_FULL_ITBLOCK; \ TESTCASE_START(code) \ TEST_ARG_END("") \ "50: nop \n\t" \ "1: "code" \n\t" \ " mov r1, #0x11 \n\t" \ " mov r2, #0x22 \n\t" \ " mov r3, #0x33 \n\t" \ "2: nop \n\t" \ TESTCASE_END \ kprobe_test_flags &= ~TEST_FLAG_FULL_ITBLOCK; #define TEST_THUMB_TO_ARM_INTERWORK_P(code1, reg, val, code2) \ TESTCASE_START(code1 #reg code2) \ TEST_ARG_PTR(reg, val) \ TEST_ARG_REG(14, 99f+1) \ TEST_ARG_MEM(15, 3f) \ TEST_ARG_END("") \ " nop \n\t" /* To align 1f */ \ "50: nop \n\t" \ "1: "code1 #reg code2" \n\t" \ " bx lr \n\t" \ ".arm \n\t" \ "3: adr lr, 2f+1 \n\t" \ " bx lr \n\t" \ ".thumb \n\t" \ "2: nop \n\t" \ TESTCASE_END void kprobe_thumb16_test_cases(void) { kprobe_test_flags = TEST_FLAG_NARROW_INSTR; TEST_GROUP("Shift (immediate), add, subtract, move, and compare") TEST_R( "lsls r7, r",0,VAL1,", #5") TEST_R( "lsls r0, r",7,VAL2,", #11") TEST_R( "lsrs r7, r",0,VAL1,", #5") TEST_R( "lsrs r0, r",7,VAL2,", #11") TEST_R( "asrs r7, r",0,VAL1,", #5") TEST_R( "asrs r0, r",7,VAL2,", #11") TEST_RR( "adds r2, r",0,VAL1,", r",7,VAL2,"") TEST_RR( "adds r5, r",7,VAL2,", r",0,VAL2,"") TEST_RR( "subs r2, r",0,VAL1,", r",7,VAL2,"") TEST_RR( "subs r5, r",7,VAL2,", r",0,VAL2,"") TEST_R( "adds r7, r",0,VAL1,", #5") TEST_R( "adds r0, r",7,VAL2,", #2") TEST_R( "subs r7, r",0,VAL1,", #5") TEST_R( "subs r0, r",7,VAL2,", #2") TEST( "movs.n r0, #0x5f") TEST( "movs.n r7, #0xa0") TEST_R( "cmp.n r",0,0x5e, ", #0x5f") TEST_R( "cmp.n r",5,0x15f,", #0x5f") TEST_R( "cmp.n r",7,0xa0, ", #0xa0") TEST_R( "adds.n r",0,VAL1,", #0x5f") TEST_R( "adds.n r",7,VAL2,", #0xa0") TEST_R( "subs.n r",0,VAL1,", #0x5f") TEST_R( "subs.n r",7,VAL2,", #0xa0") TEST_GROUP("16-bit Thumb data-processing instructions") #define DATA_PROCESSING16(op,val) \ TEST_RR( op" r",0,VAL1,", r",7,val,"") \ TEST_RR( op" r",7,VAL2,", r",0,val,"") DATA_PROCESSING16("ands",0xf00f00ff) DATA_PROCESSING16("eors",0xf00f00ff) DATA_PROCESSING16("lsls",11) DATA_PROCESSING16("lsrs",11) DATA_PROCESSING16("asrs",11) DATA_PROCESSING16("adcs",VAL2) DATA_PROCESSING16("sbcs",VAL2) DATA_PROCESSING16("rors",11) DATA_PROCESSING16("tst",0xf00f00ff) TEST_R("rsbs r",0,VAL1,", #0") TEST_R("rsbs r",7,VAL2,", #0") DATA_PROCESSING16("cmp",0xf00f00ff) DATA_PROCESSING16("cmn",0xf00f00ff) DATA_PROCESSING16("orrs",0xf00f00ff) DATA_PROCESSING16("muls",VAL2) DATA_PROCESSING16("bics",0xf00f00ff) DATA_PROCESSING16("mvns",VAL2) TEST_GROUP("Special data instructions and branch and exchange") TEST_RR( "add r",0, VAL1,", r",7,VAL2,"") TEST_RR( "add r",3, VAL2,", r",8,VAL3,"") TEST_RR( "add r",8, VAL3,", r",0,VAL1,"") TEST_R( "add sp" ", r",8,-8, "") TEST_R( "add r",14,VAL1,", pc") TEST_BF_R("add pc" ", r",0,2f-1f-8,"") TEST_UNSUPPORTED(".short 0x44ff @ add pc, pc") TEST_RR( "cmp r",3,VAL1,", r",8,VAL2,"") TEST_RR( "cmp r",8,VAL2,", r",0,VAL1,"") TEST_R( "cmp sp" ", r",8,-8, "") TEST_R( "mov r0, r",7,VAL2,"") TEST_R( "mov r3, r",8,VAL3,"") TEST_R( "mov r8, r",0,VAL1,"") TEST_P( "mov sp, r",8,-8, "") TEST( "mov lr, pc") TEST_BF_R("mov pc, r",0,2f, "") TEST_BF_R("bx r",0, 2f+1,"") TEST_BF_R("bx r",14,2f+1,"") TESTCASE_START("bx pc") TEST_ARG_REG(14, 99f+1) TEST_ARG_END("") " nop \n\t" /* To align the bx pc*/ "50: nop \n\t" "1: bx pc \n\t" " bx lr \n\t" ".arm \n\t" " adr lr, 2f+1 \n\t" " bx lr \n\t" ".thumb \n\t" "2: nop \n\t" TESTCASE_END TEST_BF_R("blx r",0, 2f+1,"") TEST_BB_R("blx r",14,2f+1,"") TEST_UNSUPPORTED(".short 0x47f8 @ blx pc") TEST_GROUP("Load from Literal Pool") TEST_X( "ldr r0, 3f", ".align \n\t" "3: .word "__stringify(VAL1)) TEST_X( "ldr r7, 3f", ".space 128 \n\t" ".align \n\t" "3: .word "__stringify(VAL2)) TEST_GROUP("16-bit Thumb Load/store instructions") TEST_RPR("str r",0, VAL1,", [r",1, 24,", r",2, 48,"]") TEST_RPR("str r",7, VAL2,", [r",6, 24,", r",5, 48,"]") TEST_RPR("strh r",0, VAL1,", [r",1, 24,", r",2, 48,"]") TEST_RPR("strh r",7, VAL2,", [r",6, 24,", r",5, 48,"]") TEST_RPR("strb r",0, VAL1,", [r",1, 24,", r",2, 48,"]") TEST_RPR("strb r",7, VAL2,", [r",6, 24,", r",5, 48,"]") TEST_PR( "ldrsb r0, [r",1, 24,", r",2, 48,"]") TEST_PR( "ldrsb r7, [r",6, 24,", r",5, 50,"]") TEST_PR( "ldr r0, [r",1, 24,", r",2, 48,"]") TEST_PR( "ldr r7, [r",6, 24,", r",5, 48,"]") TEST_PR( "ldrh r0, [r",1, 24,", r",2, 48,"]") TEST_PR( "ldrh r7, [r",6, 24,", r",5, 50,"]") TEST_PR( "ldrb r0, [r",1, 24,", r",2, 48,"]") TEST_PR( "ldrb r7, [r",6, 24,", r",5, 50,"]") TEST_PR( "ldrsh r0, [r",1, 24,", r",2, 48,"]") TEST_PR( "ldrsh r7, [r",6, 24,", r",5, 50,"]") TEST_RP("str r",0, VAL1,", [r",1, 24,", #120]") TEST_RP("str r",7, VAL2,", [r",6, 24,", #120]") TEST_P( "ldr r0, [r",1, 24,", #120]") TEST_P( "ldr r7, [r",6, 24,", #120]") TEST_RP("strb r",0, VAL1,", [r",1, 24,", #30]") TEST_RP("strb r",7, VAL2,", [r",6, 24,", #30]") TEST_P( "ldrb r0, [r",1, 24,", #30]") TEST_P( "ldrb r7, [r",6, 24,", #30]") TEST_RP("strh r",0, VAL1,", [r",1, 24,", #60]") TEST_RP("strh r",7, VAL2,", [r",6, 24,", #60]") TEST_P( "ldrh r0, [r",1, 24,", #60]") TEST_P( "ldrh r7, [r",6, 24,", #60]") TEST_R( "str r",0, VAL1,", [sp, #0]") TEST_R( "str r",7, VAL2,", [sp, #160]") TEST( "ldr r0, [sp, #0]") TEST( "ldr r7, [sp, #160]") TEST_RP("str r",0, VAL1,", [r",0, 24,"]") TEST_P( "ldr r0, [r",0, 24,"]") TEST_GROUP("Generate PC-/SP-relative address") TEST("add r0, pc, #4") TEST("add r7, pc, #1020") TEST("add r0, sp, #4") TEST("add r7, sp, #1020") TEST_GROUP("Miscellaneous 16-bit instructions") TEST_UNSUPPORTED( "cpsie i") TEST_UNSUPPORTED( "cpsid i") TEST_UNSUPPORTED( "setend le") TEST_UNSUPPORTED( "setend be") TEST("add sp, #"__stringify(TEST_MEMORY_SIZE)) /* Assumes TEST_MEMORY_SIZE < 0x400 */ TEST("sub sp, #0x7f*4") DONT_TEST_IN_ITBLOCK( TEST_BF_R( "cbnz r",0,0, ", 2f") TEST_BF_R( "cbz r",2,-1,", 2f") TEST_BF_RX( "cbnz r",4,1, ", 2f", SPACE_0x20) TEST_BF_RX( "cbz r",7,0, ", 2f", SPACE_0x40) ) TEST_R("sxth r0, r",7, HH1,"") TEST_R("sxth r7, r",0, HH2,"") TEST_R("sxtb r0, r",7, HH1,"") TEST_R("sxtb r7, r",0, HH2,"") TEST_R("uxth r0, r",7, HH1,"") TEST_R("uxth r7, r",0, HH2,"") TEST_R("uxtb r0, r",7, HH1,"") TEST_R("uxtb r7, r",0, HH2,"") TEST_R("rev r0, r",7, VAL1,"") TEST_R("rev r7, r",0, VAL2,"") TEST_R("rev16 r0, r",7, VAL1,"") TEST_R("rev16 r7, r",0, VAL2,"") TEST_UNSUPPORTED(".short 0xba80") TEST_UNSUPPORTED(".short 0xbabf") TEST_R("revsh r0, r",7, VAL1,"") TEST_R("revsh r7, r",0, VAL2,"") #define TEST_POPPC(code, offset) \ TESTCASE_START(code) \ TEST_ARG_PTR(13, offset) \ TEST_ARG_END("") \ TEST_BRANCH_F(code) \ TESTCASE_END TEST("push {r0}") TEST("push {r7}") TEST("push {r14}") TEST("push {r0-r7,r14}") TEST("push {r0,r2,r4,r6,r14}") TEST("push {r1,r3,r5,r7}") TEST("pop {r0}") TEST("pop {r7}") TEST("pop {r0,r2,r4,r6}") TEST_POPPC("pop {pc}",15*4) TEST_POPPC("pop {r0-r7,pc}",7*4) TEST_POPPC("pop {r1,r3,r5,r7,pc}",11*4) TEST_THUMB_TO_ARM_INTERWORK_P("pop {pc} @ ",13,15*4,"") TEST_THUMB_TO_ARM_INTERWORK_P("pop {r0-r7,pc} @ ",13,7*4,"") TEST_UNSUPPORTED("bkpt.n 0") TEST_UNSUPPORTED("bkpt.n 255") TEST_SUPPORTED("yield") TEST("sev") TEST("nop") TEST("wfi") TEST_SUPPORTED("wfe") TEST_UNSUPPORTED(".short 0xbf50") /* Unassigned hints */ TEST_UNSUPPORTED(".short 0xbff0") /* Unassigned hints */ #define TEST_IT(code, code2) \ TESTCASE_START(code) \ TEST_ARG_END("") \ "50: nop \n\t" \ "1: "code" \n\t" \ " "code2" \n\t" \ "2: nop \n\t" \ TESTCASE_END DONT_TEST_IN_ITBLOCK( TEST_IT("it eq","moveq r0,#0") TEST_IT("it vc","movvc r0,#0") TEST_IT("it le","movle r0,#0") TEST_IT("ite eq","moveq r0,#0\n\t movne r1,#1") TEST_IT("itet vc","movvc r0,#0\n\t movvs r1,#1\n\t movvc r2,#2") TEST_IT("itete le","movle r0,#0\n\t movgt r1,#1\n\t movle r2,#2\n\t movgt r3,#3") TEST_IT("itttt le","movle r0,#0\n\t movle r1,#1\n\t movle r2,#2\n\t movle r3,#3") TEST_IT("iteee le","movle r0,#0\n\t movgt r1,#1\n\t movgt r2,#2\n\t movgt r3,#3") ) TEST_GROUP("Load and store multiple") TEST_P("ldmia r",4, 16*4,"!, {r0,r7}") TEST_P("ldmia r",7, 16*4,"!, {r0-r6}") TEST_P("stmia r",4, 16*4,"!, {r0,r7}") TEST_P("stmia r",0, 16*4,"!, {r0-r7}") TEST_GROUP("Conditional branch and Supervisor Call instructions") CONDITION_INSTRUCTIONS(8, TEST_BF("beq 2f") TEST_BB("bne 2b") TEST_BF("bgt 2f") TEST_BB("blt 2b") ) TEST_UNSUPPORTED(".short 0xde00") TEST_UNSUPPORTED(".short 0xdeff") TEST_UNSUPPORTED("svc #0x00") TEST_UNSUPPORTED("svc #0xff") TEST_GROUP("Unconditional branch") TEST_BF( "b 2f") TEST_BB( "b 2b") TEST_BF_X("b 2f", SPACE_0x400) TEST_BB_X("b 2b", SPACE_0x400) TEST_GROUP("Testing instructions in IT blocks") TEST_ITBLOCK("subs.n r0, r0") verbose("\n"); } void kprobe_thumb32_test_cases(void) { kprobe_test_flags = 0; TEST_GROUP("Load/store multiple") TEST_UNSUPPORTED("rfedb sp") TEST_UNSUPPORTED("rfeia sp") TEST_UNSUPPORTED("rfedb sp!") TEST_UNSUPPORTED("rfeia sp!") TEST_P( "stmia r",0, 16*4,", {r0,r8}") TEST_P( "stmia r",4, 16*4,", {r0-r12,r14}") TEST_P( "stmia r",7, 16*4,"!, {r8-r12,r14}") TEST_P( "stmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "ldmia r",0, 16*4,", {r0,r8}") TEST_P( "ldmia r",4, 0, ", {r0-r12,r14}") TEST_BF_P("ldmia r",5, 8*4, "!, {r6-r12,r15}") TEST_P( "ldmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmia r",14,14*4,"!, {r4,pc}") TEST_P( "stmdb r",0, 16*4,", {r0,r8}") TEST_P( "stmdb r",4, 16*4,", {r0-r12,r14}") TEST_P( "stmdb r",5, 16*4,"!, {r8-r12,r14}") TEST_P( "stmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "ldmdb r",0, 16*4,", {r0,r8}") TEST_P( "ldmdb r",4, 16*4,", {r0-r12,r14}") TEST_BF_P("ldmdb r",5, 16*4,"!, {r6-r12,r15}") TEST_P( "ldmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmdb r",14,16*4,"!, {r4,pc}") TEST_P( "stmdb r",13,16*4,"!, {r3-r12,lr}") TEST_P( "stmdb r",13,16*4,"!, {r3-r12}") TEST_P( "stmdb r",2, 16*4,", {r3-r12,lr}") TEST_P( "stmdb r",13,16*4,"!, {r2-r12,lr}") TEST_P( "stmdb r",0, 16*4,", {r0-r12}") TEST_P( "stmdb r",0, 16*4,", {r0-r12,lr}") TEST_BF_P("ldmia r",13,5*4, "!, {r3-r12,pc}") TEST_P( "ldmia r",13,5*4, "!, {r3-r12}") TEST_BF_P("ldmia r",2, 5*4, "!, {r3-r12,pc}") TEST_BF_P("ldmia r",13,4*4, "!, {r2-r12,pc}") TEST_P( "ldmia r",0, 16*4,", {r0-r12}") TEST_P( "ldmia r",0, 16*4,", {r0-r12,lr}") TEST_THUMB_TO_ARM_INTERWORK_P("ldmia r",0,14*4,", {r12,pc}") TEST_THUMB_TO_ARM_INTERWORK_P("ldmia r",13,2*4,", {r0-r12,pc}") TEST_UNSUPPORTED(".short 0xe88f,0x0101 @ stmia pc, {r0,r8}") TEST_UNSUPPORTED(".short 0xe92f,0x5f00 @ stmdb pc!, {r8-r12,r14}") TEST_UNSUPPORTED(".short 0xe8bd,0xc000 @ ldmia r13!, {r14,pc}") TEST_UNSUPPORTED(".short 0xe93e,0xc000 @ ldmdb r14!, {r14,pc}") TEST_UNSUPPORTED(".short 0xe8a7,0x3f00 @ stmia r7!, {r8-r12,sp}") TEST_UNSUPPORTED(".short 0xe8a7,0x9f00 @ stmia r7!, {r8-r12,pc}") TEST_UNSUPPORTED(".short 0xe93e,0x2010 @ ldmdb r14!, {r4,sp}") TEST_GROUP("Load/store double or exclusive, table branch") TEST_P( "ldrd r0, r1, [r",1, 24,", #-16]") TEST( "ldrd r12, r14, [sp, #16]") TEST_P( "ldrd r1, r0, [r",7, 24,", #-16]!") TEST( "ldrd r14, r12, [sp, #16]!") TEST_P( "ldrd r1, r0, [r",7, 24,"], #16") TEST( "ldrd r7, r8, [sp], #-16") TEST_X( "ldrd r12, r14, 3f", ".align 3 \n\t" "3: .word "__stringify(VAL1)" \n\t" " .word "__stringify(VAL2)) TEST_UNSUPPORTED(".short 0xe9ff,0xec04 @ ldrd r14, r12, [pc, #16]!") TEST_UNSUPPORTED(".short 0xe8ff,0xec04 @ ldrd r14, r12, [pc], #16") TEST_UNSUPPORTED(".short 0xe9d4,0xd800 @ ldrd sp, r8, [r4]") TEST_UNSUPPORTED(".short 0xe9d4,0xf800 @ ldrd pc, r8, [r4]") TEST_UNSUPPORTED(".short 0xe9d4,0x7d00 @ ldrd r7, sp, [r4]") TEST_UNSUPPORTED(".short 0xe9d4,0x7f00 @ ldrd r7, pc, [r4]") TEST_RRP("strd r",0, VAL1,", r",1, VAL2,", [r",1, 24,", #-16]") TEST_RR( "strd r",12,VAL2,", r",14,VAL1,", [sp, #16]") TEST_RRP("strd r",1, VAL1,", r",0, VAL2,", [r",7, 24,", #-16]!") TEST_RR( "strd r",14,VAL2,", r",12,VAL1,", [sp, #16]!") TEST_RRP("strd r",1, VAL1,", r",0, VAL2,", [r",7, 24,"], #16") TEST_RR( "strd r",7, VAL2,", r",8, VAL1,", [sp], #-16") TEST_UNSUPPORTED(".short 0xe9ef,0xec04 @ strd r14, r12, [pc, #16]!") TEST_UNSUPPORTED(".short 0xe8ef,0xec04 @ strd r14, r12, [pc], #16") TEST_RX("tbb [pc, r",0, (9f-(1f+4)),"]", "9: \n\t" ".byte (2f-1b-4)>>1 \n\t" ".byte (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_RX("tbb [pc, r",4, (9f-(1f+4)+1),"]", "9: \n\t" ".byte (2f-1b-4)>>1 \n\t" ".byte (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_RRX("tbb [r",1,9f,", r",2,0,"]", "9: \n\t" ".byte (2f-1b-4)>>1 \n\t" ".byte (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,"]", "9: \n\t" ".short (2f-1b-4)>>1 \n\t" ".short (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,"]", "9: \n\t" ".short (2f-1b-4)>>1 \n\t" ".short (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_RRX("tbh [r",1,9f, ", r",14,1,"]", "9: \n\t" ".short (2f-1b-4)>>1 \n\t" ".short (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_UNSUPPORTED(".short 0xe8d1,0xf01f @ tbh [r1, pc]") TEST_UNSUPPORTED(".short 0xe8d1,0xf01d @ tbh [r1, sp]") TEST_UNSUPPORTED(".short 0xe8dd,0xf012 @ tbh [sp, r2]") TEST_UNSUPPORTED("strexb r0, r1, [r2]") TEST_UNSUPPORTED("strexh r0, r1, [r2]") TEST_UNSUPPORTED("strexd r0, r1, [r2]") TEST_UNSUPPORTED("ldrexb r0, [r1]") TEST_UNSUPPORTED("ldrexh r0, [r1]") TEST_UNSUPPORTED("ldrexd r0, [r1]") TEST_GROUP("Data-processing (shifted register) and (modified immediate)") #define _DATA_PROCESSING32_DNM(op,s,val) \ TEST_RR(op s".w r0, r",1, VAL1,", r",2, val, "") \ TEST_RR(op s" r1, r",1, VAL1,", r",2, val, ", lsl #3") \ TEST_RR(op s" r2, r",3, VAL1,", r",2, val, ", lsr #4") \ TEST_RR(op s" r3, r",3, VAL1,", r",2, val, ", asr #5") \ TEST_RR(op s" r4, r",5, VAL1,", r",2, N(val),", asr #6") \ TEST_RR(op s" r5, r",5, VAL1,", r",2, val, ", ror #7") \ TEST_RR(op s" r8, r",9, VAL1,", r",10,val, ", rrx") \ TEST_R( op s" r0, r",11,VAL1,", #0x00010001") \ TEST_R( op s" r11, r",0, VAL1,", #0xf5000000") \ TEST_R( op s" r7, r",8, VAL2,", #0x000af000") #define DATA_PROCESSING32_DNM(op,val) \ _DATA_PROCESSING32_DNM(op,"",val) \ _DATA_PROCESSING32_DNM(op,"s",val) #define DATA_PROCESSING32_NM(op,val) \ TEST_RR(op".w r",1, VAL1,", r",2, val, "") \ TEST_RR(op" r",1, VAL1,", r",2, val, ", lsl #3") \ TEST_RR(op" r",3, VAL1,", r",2, val, ", lsr #4") \ TEST_RR(op" r",3, VAL1,", r",2, val, ", asr #5") \ TEST_RR(op" r",5, VAL1,", r",2, N(val),", asr #6") \ TEST_RR(op" r",5, VAL1,", r",2, val, ", ror #7") \ TEST_RR(op" r",9, VAL1,", r",10,val, ", rrx") \ TEST_R( op" r",11,VAL1,", #0x00010001") \ TEST_R( op" r",0, VAL1,", #0xf5000000") \ TEST_R( op" r",8, VAL2,", #0x000af000") #define _DATA_PROCESSING32_DM(op,s,val) \ TEST_R( op s".w r0, r",14, val, "") \ TEST_R( op s" r1, r",12, val, ", lsl #3") \ TEST_R( op s" r2, r",11, val, ", lsr #4") \ TEST_R( op s" r3, r",10, val, ", asr #5") \ TEST_R( op s" r4, r",9, N(val),", asr #6") \ TEST_R( op s" r5, r",8, val, ", ror #7") \ TEST_R( op s" r8, r",7,val, ", rrx") \ TEST( op s" r0, #0x00010001") \ TEST( op s" r11, #0xf5000000") \ TEST( op s" r7, #0x000af000") \ TEST( op s" r4, #0x00005a00") #define DATA_PROCESSING32_DM(op,val) \ _DATA_PROCESSING32_DM(op,"",val) \ _DATA_PROCESSING32_DM(op,"s",val) DATA_PROCESSING32_DNM("and",0xf00f00ff) DATA_PROCESSING32_NM("tst",0xf00f00ff) DATA_PROCESSING32_DNM("bic",0xf00f00ff) DATA_PROCESSING32_DNM("orr",0xf00f00ff) DATA_PROCESSING32_DM("mov",VAL2) DATA_PROCESSING32_DNM("orn",0xf00f00ff) DATA_PROCESSING32_DM("mvn",VAL2) DATA_PROCESSING32_DNM("eor",0xf00f00ff) DATA_PROCESSING32_NM("teq",0xf00f00ff) DATA_PROCESSING32_DNM("add",VAL2) DATA_PROCESSING32_NM("cmn",VAL2) DATA_PROCESSING32_DNM("adc",VAL2) DATA_PROCESSING32_DNM("sbc",VAL2) DATA_PROCESSING32_DNM("sub",VAL2) DATA_PROCESSING32_NM("cmp",VAL2) DATA_PROCESSING32_DNM("rsb",VAL2) TEST_RR("pkhbt r0, r",0, HH1,", r",1, HH2,"") TEST_RR("pkhbt r14,r",12, HH1,", r",10,HH2,", lsl #2") TEST_RR("pkhtb r0, r",0, HH1,", r",1, HH2,"") TEST_RR("pkhtb r14,r",12, HH1,", r",10,HH2,", asr #2") TEST_UNSUPPORTED(".short 0xea17,0x0f0d @ tst.w r7, sp") TEST_UNSUPPORTED(".short 0xea17,0x0f0f @ tst.w r7, pc") TEST_UNSUPPORTED(".short 0xea1d,0x0f07 @ tst.w sp, r7") TEST_UNSUPPORTED(".short 0xea1f,0x0f07 @ tst.w pc, r7") TEST_UNSUPPORTED(".short 0xf01d,0x1f08 @ tst sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf01f,0x1f08 @ tst pc, #0x00080008") TEST_UNSUPPORTED(".short 0xea97,0x0f0d @ teq.w r7, sp") TEST_UNSUPPORTED(".short 0xea97,0x0f0f @ teq.w r7, pc") TEST_UNSUPPORTED(".short 0xea9d,0x0f07 @ teq.w sp, r7") TEST_UNSUPPORTED(".short 0xea9f,0x0f07 @ teq.w pc, r7") TEST_UNSUPPORTED(".short 0xf09d,0x1f08 @ tst sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf09f,0x1f08 @ tst pc, #0x00080008") TEST_UNSUPPORTED(".short 0xeb17,0x0f0d @ cmn.w r7, sp") TEST_UNSUPPORTED(".short 0xeb17,0x0f0f @ cmn.w r7, pc") TEST_P("cmn.w sp, r",7,0,"") TEST_UNSUPPORTED(".short 0xeb1f,0x0f07 @ cmn.w pc, r7") TEST( "cmn sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf11f,0x1f08 @ cmn pc, #0x00080008") TEST_UNSUPPORTED(".short 0xebb7,0x0f0d @ cmp.w r7, sp") TEST_UNSUPPORTED(".short 0xebb7,0x0f0f @ cmp.w r7, pc") TEST_P("cmp.w sp, r",7,0,"") TEST_UNSUPPORTED(".short 0xebbf,0x0f07 @ cmp.w pc, r7") TEST( "cmp sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf1bf,0x1f08 @ cmp pc, #0x00080008") TEST_UNSUPPORTED(".short 0xea5f,0x070d @ movs.w r7, sp") TEST_UNSUPPORTED(".short 0xea5f,0x070f @ movs.w r7, pc") TEST_UNSUPPORTED(".short 0xea5f,0x0d07 @ movs.w sp, r7") TEST_UNSUPPORTED(".short 0xea4f,0x0f07 @ mov.w pc, r7") TEST_UNSUPPORTED(".short 0xf04f,0x1d08 @ mov sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf04f,0x1f08 @ mov pc, #0x00080008") TEST_R("add.w r0, sp, r",1, 4,"") TEST_R("adds r0, sp, r",1, 4,", asl #3") TEST_R("add r0, sp, r",1, 4,", asl #4") TEST_R("add r0, sp, r",1, 16,", ror #1") TEST_R("add.w sp, sp, r",1, 4,"") TEST_R("add sp, sp, r",1, 4,", asl #3") TEST_UNSUPPORTED(".short 0xeb0d,0x1d01 @ add sp, sp, r1, asl #4") TEST_UNSUPPORTED(".short 0xeb0d,0x0d71 @ add sp, sp, r1, ror #1") TEST( "add.w r0, sp, #24") TEST( "add.w sp, sp, #24") TEST_UNSUPPORTED(".short 0xeb0d,0x0f01 @ add pc, sp, r1") TEST_UNSUPPORTED(".short 0xeb0d,0x000f @ add r0, sp, pc") TEST_UNSUPPORTED(".short 0xeb0d,0x000d @ add r0, sp, sp") TEST_UNSUPPORTED(".short 0xeb0d,0x0d0f @ add sp, sp, pc") TEST_UNSUPPORTED(".short 0xeb0d,0x0d0d @ add sp, sp, sp") TEST_R("sub.w r0, sp, r",1, 4,"") TEST_R("subs r0, sp, r",1, 4,", asl #3") TEST_R("sub r0, sp, r",1, 4,", asl #4") TEST_R("sub r0, sp, r",1, 16,", ror #1") TEST_R("sub.w sp, sp, r",1, 4,"") TEST_R("sub sp, sp, r",1, 4,", asl #3") TEST_UNSUPPORTED(".short 0xebad,0x1d01 @ sub sp, sp, r1, asl #4") TEST_UNSUPPORTED(".short 0xebad,0x0d71 @ sub sp, sp, r1, ror #1") TEST_UNSUPPORTED(".short 0xebad,0x0f01 @ sub pc, sp, r1") TEST( "sub.w r0, sp, #24") TEST( "sub.w sp, sp, #24") TEST_UNSUPPORTED(".short 0xea02,0x010f @ and r1, r2, pc") TEST_UNSUPPORTED(".short 0xea0f,0x0103 @ and r1, pc, r3") TEST_UNSUPPORTED(".short 0xea02,0x0f03 @ and pc, r2, r3") TEST_UNSUPPORTED(".short 0xea02,0x010d @ and r1, r2, sp") TEST_UNSUPPORTED(".short 0xea0d,0x0103 @ and r1, sp, r3") TEST_UNSUPPORTED(".short 0xea02,0x0d03 @ and sp, r2, r3") TEST_UNSUPPORTED(".short 0xf00d,0x1108 @ and r1, sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf00f,0x1108 @ and r1, pc, #0x00080008") TEST_UNSUPPORTED(".short 0xf002,0x1d08 @ and sp, r8, #0x00080008") TEST_UNSUPPORTED(".short 0xf002,0x1f08 @ and pc, r8, #0x00080008") TEST_UNSUPPORTED(".short 0xeb02,0x010f @ add r1, r2, pc") TEST_UNSUPPORTED(".short 0xeb0f,0x0103 @ add r1, pc, r3") TEST_UNSUPPORTED(".short 0xeb02,0x0f03 @ add pc, r2, r3") TEST_UNSUPPORTED(".short 0xeb02,0x010d @ add r1, r2, sp") TEST_SUPPORTED( ".short 0xeb0d,0x0103 @ add r1, sp, r3") TEST_UNSUPPORTED(".short 0xeb02,0x0d03 @ add sp, r2, r3") TEST_SUPPORTED( ".short 0xf10d,0x1108 @ add r1, sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf10d,0x1f08 @ add pc, sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf10f,0x1108 @ add r1, pc, #0x00080008") TEST_UNSUPPORTED(".short 0xf102,0x1d08 @ add sp, r8, #0x00080008") TEST_UNSUPPORTED(".short 0xf102,0x1f08 @ add pc, r8, #0x00080008") TEST_UNSUPPORTED(".short 0xeaa0,0x0000") TEST_UNSUPPORTED(".short 0xeaf0,0x0000") TEST_UNSUPPORTED(".short 0xeb20,0x0000") TEST_UNSUPPORTED(".short 0xeb80,0x0000") TEST_UNSUPPORTED(".short 0xebe0,0x0000") TEST_UNSUPPORTED(".short 0xf0a0,0x0000") TEST_UNSUPPORTED(".short 0xf0c0,0x0000") TEST_UNSUPPORTED(".short 0xf0f0,0x0000") TEST_UNSUPPORTED(".short 0xf120,0x0000") TEST_UNSUPPORTED(".short 0xf180,0x0000") TEST_UNSUPPORTED(".short 0xf1e0,0x0000") TEST_GROUP("Coprocessor instructions") TEST_UNSUPPORTED(".short 0xec00,0x0000") TEST_UNSUPPORTED(".short 0xeff0,0x0000") TEST_UNSUPPORTED(".short 0xfc00,0x0000") TEST_UNSUPPORTED(".short 0xfff0,0x0000") TEST_GROUP("Data-processing (plain binary immediate)") TEST_R("addw r0, r",1, VAL1,", #0x123") TEST( "addw r14, sp, #0xf5a") TEST( "addw sp, sp, #0x20") TEST( "addw r7, pc, #0x888") TEST_UNSUPPORTED(".short 0xf20f,0x1f20 @ addw pc, pc, #0x120") TEST_UNSUPPORTED(".short 0xf20d,0x1f20 @ addw pc, sp, #0x120") TEST_UNSUPPORTED(".short 0xf20f,0x1d20 @ addw sp, pc, #0x120") TEST_UNSUPPORTED(".short 0xf200,0x1d20 @ addw sp, r0, #0x120") TEST_R("subw r0, r",1, VAL1,", #0x123") TEST( "subw r14, sp, #0xf5a") TEST( "subw sp, sp, #0x20") TEST( "subw r7, pc, #0x888") TEST_UNSUPPORTED(".short 0xf2af,0x1f20 @ subw pc, pc, #0x120") TEST_UNSUPPORTED(".short 0xf2ad,0x1f20 @ subw pc, sp, #0x120") TEST_UNSUPPORTED(".short 0xf2af,0x1d20 @ subw sp, pc, #0x120") TEST_UNSUPPORTED(".short 0xf2a0,0x1d20 @ subw sp, r0, #0x120") TEST("movw r0, #0") TEST("movw r0, #0xffff") TEST("movw lr, #0xffff") TEST_UNSUPPORTED(".short 0xf240,0x0d00 @ movw sp, #0") TEST_UNSUPPORTED(".short 0xf240,0x0f00 @ movw pc, #0") TEST_R("movt r",0, VAL1,", #0") TEST_R("movt r",0, VAL2,", #0xffff") TEST_R("movt r",14,VAL1,", #0xffff") TEST_UNSUPPORTED(".short 0xf2c0,0x0d00 @ movt sp, #0") TEST_UNSUPPORTED(".short 0xf2c0,0x0f00 @ movt pc, #0") TEST_R( "ssat r0, #24, r",0, VAL1,"") TEST_R( "ssat r14, #24, r",12, VAL2,"") TEST_R( "ssat r0, #24, r",0, VAL1,", lsl #8") TEST_R( "ssat r14, #24, r",12, VAL2,", asr #8") TEST_UNSUPPORTED(".short 0xf30c,0x0d17 @ ssat sp, #24, r12") TEST_UNSUPPORTED(".short 0xf30c,0x0f17 @ ssat pc, #24, r12") TEST_UNSUPPORTED(".short 0xf30d,0x0c17 @ ssat r12, #24, sp") TEST_UNSUPPORTED(".short 0xf30f,0x0c17 @ ssat r12, #24, pc") TEST_R( "usat r0, #24, r",0, VAL1,"") TEST_R( "usat r14, #24, r",12, VAL2,"") TEST_R( "usat r0, #24, r",0, VAL1,", lsl #8") TEST_R( "usat r14, #24, r",12, VAL2,", asr #8") TEST_UNSUPPORTED(".short 0xf38c,0x0d17 @ usat sp, #24, r12") TEST_UNSUPPORTED(".short 0xf38c,0x0f17 @ usat pc, #24, r12") TEST_UNSUPPORTED(".short 0xf38d,0x0c17 @ usat r12, #24, sp") TEST_UNSUPPORTED(".short 0xf38f,0x0c17 @ usat r12, #24, pc") TEST_R( "ssat16 r0, #12, r",0, HH1,"") TEST_R( "ssat16 r14, #12, r",12, HH2,"") TEST_UNSUPPORTED(".short 0xf32c,0x0d0b @ ssat16 sp, #12, r12") TEST_UNSUPPORTED(".short 0xf32c,0x0f0b @ ssat16 pc, #12, r12") TEST_UNSUPPORTED(".short 0xf32d,0x0c0b @ ssat16 r12, #12, sp") TEST_UNSUPPORTED(".short 0xf32f,0x0c0b @ ssat16 r12, #12, pc") TEST_R( "usat16 r0, #12, r",0, HH1,"") TEST_R( "usat16 r14, #12, r",12, HH2,"") TEST_UNSUPPORTED(".short 0xf3ac,0x0d0b @ usat16 sp, #12, r12") TEST_UNSUPPORTED(".short 0xf3ac,0x0f0b @ usat16 pc, #12, r12") TEST_UNSUPPORTED(".short 0xf3ad,0x0c0b @ usat16 r12, #12, sp") TEST_UNSUPPORTED(".short 0xf3af,0x0c0b @ usat16 r12, #12, pc") TEST_R( "sbfx r0, r",0 , VAL1,", #0, #31") TEST_R( "sbfx r14, r",12, VAL2,", #8, #16") TEST_R( "sbfx r4, r",10, VAL1,", #16, #15") TEST_UNSUPPORTED(".short 0xf34c,0x2d0f @ sbfx sp, r12, #8, #16") TEST_UNSUPPORTED(".short 0xf34c,0x2f0f @ sbfx pc, r12, #8, #16") TEST_UNSUPPORTED(".short 0xf34d,0x2c0f @ sbfx r12, sp, #8, #16") TEST_UNSUPPORTED(".short 0xf34f,0x2c0f @ sbfx r12, pc, #8, #16") TEST_R( "ubfx r0, r",0 , VAL1,", #0, #31") TEST_R( "ubfx r14, r",12, VAL2,", #8, #16") TEST_R( "ubfx r4, r",10, VAL1,", #16, #15") TEST_UNSUPPORTED(".short 0xf3cc,0x2d0f @ ubfx sp, r12, #8, #16") TEST_UNSUPPORTED(".short 0xf3cc,0x2f0f @ ubfx pc, r12, #8, #16") TEST_UNSUPPORTED(".short 0xf3cd,0x2c0f @ ubfx r12, sp, #8, #16") TEST_UNSUPPORTED(".short 0xf3cf,0x2c0f @ ubfx r12, pc, #8, #16") TEST_R( "bfc r",0, VAL1,", #4, #20") TEST_R( "bfc r",14,VAL2,", #4, #20") TEST_R( "bfc r",7, VAL1,", #0, #31") TEST_R( "bfc r",8, VAL2,", #0, #31") TEST_UNSUPPORTED(".short 0xf36f,0x0d1e @ bfc sp, #0, #31") TEST_UNSUPPORTED(".short 0xf36f,0x0f1e @ bfc pc, #0, #31") TEST_RR( "bfi r",0, VAL1,", r",0 , VAL2,", #0, #31") TEST_RR( "bfi r",12,VAL1,", r",14 , VAL2,", #4, #20") TEST_UNSUPPORTED(".short 0xf36e,0x1d17 @ bfi sp, r14, #4, #20") TEST_UNSUPPORTED(".short 0xf36e,0x1f17 @ bfi pc, r14, #4, #20") TEST_UNSUPPORTED(".short 0xf36d,0x1e17 @ bfi r14, sp, #4, #20") TEST_GROUP("Branches and miscellaneous control") CONDITION_INSTRUCTIONS(22, TEST_BF("beq.w 2f") TEST_BB("bne.w 2b") TEST_BF("bgt.w 2f") TEST_BB("blt.w 2b") TEST_BF_X("bpl.w 2f", SPACE_0x1000) ) TEST_UNSUPPORTED("msr cpsr, r0") TEST_UNSUPPORTED("msr cpsr_f, r1") TEST_UNSUPPORTED("msr spsr, r2") TEST_UNSUPPORTED("cpsie.w i") TEST_UNSUPPORTED("cpsid.w i") TEST_UNSUPPORTED("cps 0x13") TEST_SUPPORTED("yield.w") TEST("sev.w") TEST("nop.w") TEST("wfi.w") TEST_SUPPORTED("wfe.w") TEST_UNSUPPORTED("dbg.w #0") TEST_UNSUPPORTED("clrex") TEST_UNSUPPORTED("dsb") TEST_UNSUPPORTED("dmb") TEST_UNSUPPORTED("isb") TEST_UNSUPPORTED("bxj r0") TEST_UNSUPPORTED("subs pc, lr, #4") TEST("mrs r0, cpsr") TEST("mrs r14, cpsr") TEST_UNSUPPORTED(".short 0xf3ef,0x8d00 @ mrs sp, spsr") TEST_UNSUPPORTED(".short 0xf3ef,0x8f00 @ mrs pc, spsr") TEST_UNSUPPORTED("mrs r0, spsr") TEST_UNSUPPORTED("mrs lr, spsr") TEST_UNSUPPORTED(".short 0xf7f0,0x8000 @ smc #0") TEST_UNSUPPORTED(".short 0xf7f0,0xa000 @ undefeined") TEST_BF( "b.w 2f") TEST_BB( "b.w 2b") TEST_BF_X("b.w 2f", SPACE_0x1000) TEST_BF( "bl.w 2f") TEST_BB( "bl.w 2b") TEST_BB_X("bl.w 2b", SPACE_0x1000) TEST_X( "blx __dummy_arm_subroutine", ".arm \n\t" ".align \n\t" ".type __dummy_arm_subroutine, %%function \n\t" "__dummy_arm_subroutine: \n\t" "mov r0, pc \n\t" "bx lr \n\t" ".thumb \n\t" ) TEST( "blx __dummy_arm_subroutine") TEST_GROUP("Store single data item") #define SINGLE_STORE(size) \ TEST_RP( "str"size" r",0, VAL1,", [r",11,-1024,", #1024]") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, -1024,", #1080]") \ TEST_RP( "str"size" r",0, VAL1,", [r",11,256, ", #-120]") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, 256, ", #-128]") \ TEST_RP( "str"size" r",0, VAL1,", [r",11,24, "], #120") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, "], #128") \ TEST_RP( "str"size" r",0, VAL1,", [r",11,24, "], #-120") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, "], #-128") \ TEST_RP( "str"size" r",0, VAL1,", [r",11,24, ", #120]!") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, ", #128]!") \ TEST_RP( "str"size" r",0, VAL1,", [r",11,256, ", #-120]!") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, 256, ", #-128]!") \ TEST_RPR("str"size".w r",0, VAL1,", [r",1, 0,", r",2, 4,"]") \ TEST_RPR("str"size" r",14,VAL2,", [r",10,0,", r",11,4,", lsl #1]") \ TEST_R( "str"size".w r",7, VAL1,", [sp, #24]") \ TEST_RP( "str"size".w r",0, VAL2,", [r",0,0, "]") \ TEST_UNSUPPORTED("str"size"t r0, [r1, #4]") SINGLE_STORE("b") SINGLE_STORE("h") SINGLE_STORE("") TEST("str sp, [sp]") TEST_UNSUPPORTED(".short 0xf8cf,0xe000 @ str r14, [pc]") TEST_UNSUPPORTED(".short 0xf8ce,0xf000 @ str pc, [r14]") TEST_GROUP("Advanced SIMD element or structure load/store instructions") TEST_UNSUPPORTED(".short 0xf900,0x0000") TEST_UNSUPPORTED(".short 0xf92f,0xffff") TEST_UNSUPPORTED(".short 0xf980,0x0000") TEST_UNSUPPORTED(".short 0xf9ef,0xffff") TEST_GROUP("Load single data item and memory hints") #define SINGLE_LOAD(size) \ TEST_P( "ldr"size" r0, [r",11,-1024, ", #1024]") \ TEST_P( "ldr"size" r14, [r",1, -1024,", #1080]") \ TEST_P( "ldr"size" r0, [r",11,256, ", #-120]") \ TEST_P( "ldr"size" r14, [r",1, 256, ", #-128]") \ TEST_P( "ldr"size" r0, [r",11,24, "], #120") \ TEST_P( "ldr"size" r14, [r",1, 24, "], #128") \ TEST_P( "ldr"size" r0, [r",11,24, "], #-120") \ TEST_P( "ldr"size" r14, [r",1,24, "], #-128") \ TEST_P( "ldr"size" r0, [r",11,24, ", #120]!") \ TEST_P( "ldr"size" r14, [r",1, 24, ", #128]!") \ TEST_P( "ldr"size" r0, [r",11,256, ", #-120]!") \ TEST_P( "ldr"size" r14, [r",1, 256, ", #-128]!") \ TEST_PR("ldr"size".w r0, [r",1, 0,", r",2, 4,"]") \ TEST_PR("ldr"size" r14, [r",10,0,", r",11,4,", lsl #1]") \ TEST_X( "ldr"size".w r0, 3f", \ ".align 3 \n\t" \ "3: .word "__stringify(VAL1)) \ TEST_X( "ldr"size".w r14, 3f", \ ".align 3 \n\t" \ "3: .word "__stringify(VAL2)) \ TEST( "ldr"size".w r7, 3b") \ TEST( "ldr"size".w r7, [sp, #24]") \ TEST_P( "ldr"size".w r0, [r",0,0, "]") \ TEST_UNSUPPORTED("ldr"size"t r0, [r1, #4]") SINGLE_LOAD("b") SINGLE_LOAD("sb") SINGLE_LOAD("h") SINGLE_LOAD("sh") SINGLE_LOAD("") TEST_BF_P("ldr pc, [r",14, 15*4,"]") TEST_P( "ldr sp, [r",14, 13*4,"]") TEST_BF_R("ldr pc, [sp, r",14, 15*4,"]") TEST_R( "ldr sp, [sp, r",14, 13*4,"]") TEST_THUMB_TO_ARM_INTERWORK_P("ldr pc, [r",0,0,", #15*4]") TEST_SUPPORTED("ldr sp, 99f") TEST_SUPPORTED("ldr pc, 99f") TEST_UNSUPPORTED(".short 0xf854,0x700d @ ldr r7, [r4, sp]") TEST_UNSUPPORTED(".short 0xf854,0x700f @ ldr r7, [r4, pc]") TEST_UNSUPPORTED(".short 0xf814,0x700d @ ldrb r7, [r4, sp]") TEST_UNSUPPORTED(".short 0xf814,0x700f @ ldrb r7, [r4, pc]") TEST_UNSUPPORTED(".short 0xf89f,0xd004 @ ldrb sp, 99f") TEST_UNSUPPORTED(".short 0xf814,0xd008 @ ldrb sp, [r4, r8]") TEST_UNSUPPORTED(".short 0xf894,0xd000 @ ldrb sp, [r4]") TEST_UNSUPPORTED(".short 0xf860,0x0000") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xf9ff,0xffff") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xf950,0x0000") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xf95f,0xffff") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xf800,0x0800") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xf97f,0xfaff") /* Unallocated space */ TEST( "pli [pc, #4]") TEST( "pli [pc, #-4]") TEST( "pld [pc, #4]") TEST( "pld [pc, #-4]") TEST_P( "pld [r",0,-1024,", #1024]") TEST( ".short 0xf8b0,0xf400 @ pldw [r0, #1024]") TEST_P( "pli [r",4, 0b,", #1024]") TEST_P( "pld [r",7, 120,", #-120]") TEST( ".short 0xf837,0xfc78 @ pldw [r7, #-120]") TEST_P( "pli [r",11,120,", #-120]") TEST( "pld [sp, #0]") TEST_PR("pld [r",7, 24, ", r",0, 16,"]") TEST_PR("pld [r",8, 24, ", r",12,16,", lsl #3]") TEST_SUPPORTED(".short 0xf837,0xf000 @ pldw [r7, r0]") TEST_SUPPORTED(".short 0xf838,0xf03c @ pldw [r8, r12, lsl #3]"); TEST_RR("pli [r",12,0b,", r",0, 16,"]") TEST_RR("pli [r",0, 0b,", r",12,16,", lsl #3]") TEST_R( "pld [sp, r",1, 16,"]") TEST_UNSUPPORTED(".short 0xf817,0xf00d @pld [r7, sp]") TEST_UNSUPPORTED(".short 0xf817,0xf00f @pld [r7, pc]") TEST_GROUP("Data-processing (register)") #define SHIFTS32(op) \ TEST_RR(op" r0, r",1, VAL1,", r",2, 3, "") \ TEST_RR(op" r14, r",12,VAL2,", r",11,10,"") SHIFTS32("lsl") SHIFTS32("lsls") SHIFTS32("lsr") SHIFTS32("lsrs") SHIFTS32("asr") SHIFTS32("asrs") SHIFTS32("ror") SHIFTS32("rors") TEST_UNSUPPORTED(".short 0xfa01,0xff02 @ lsl pc, r1, r2") TEST_UNSUPPORTED(".short 0xfa01,0xfd02 @ lsl sp, r1, r2") TEST_UNSUPPORTED(".short 0xfa0f,0xf002 @ lsl r0, pc, r2") TEST_UNSUPPORTED(".short 0xfa0d,0xf002 @ lsl r0, sp, r2") TEST_UNSUPPORTED(".short 0xfa01,0xf00f @ lsl r0, r1, pc") TEST_UNSUPPORTED(".short 0xfa01,0xf00d @ lsl r0, r1, sp") TEST_RR( "sxtah r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "sxtah r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "sxth r8, r",7, HH1,"") TEST_UNSUPPORTED(".short 0xfa0f,0xff87 @ sxth pc, r7"); TEST_UNSUPPORTED(".short 0xfa0f,0xfd87 @ sxth sp, r7"); TEST_UNSUPPORTED(".short 0xfa0f,0xf88f @ sxth r8, pc"); TEST_UNSUPPORTED(".short 0xfa0f,0xf88d @ sxth r8, sp"); TEST_RR( "uxtah r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uxtah r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "uxth r8, r",7, HH1,"") TEST_RR( "sxtab16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "sxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "sxtb16 r8, r",7, HH1,"") TEST_RR( "uxtab16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "uxtb16 r8, r",7, HH1,"") TEST_RR( "sxtab r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "sxtab r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "sxtb r8, r",7, HH1,"") TEST_RR( "uxtab r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uxtab r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "uxtb r8, r",7, HH1,"") TEST_UNSUPPORTED(".short 0xfa60,0x00f0") TEST_UNSUPPORTED(".short 0xfa7f,0xffff") #define PARALLEL_ADD_SUB(op) \ TEST_RR( op"add16 r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"add16 r14, r",12,HH2,", r",10,HH1,"") \ TEST_RR( op"asx r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"asx r14, r",12,HH2,", r",10,HH1,"") \ TEST_RR( op"sax r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"sax r14, r",12,HH2,", r",10,HH1,"") \ TEST_RR( op"sub16 r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"sub16 r14, r",12,HH2,", r",10,HH1,"") \ TEST_RR( op"add8 r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"add8 r14, r",12,HH2,", r",10,HH1,"") \ TEST_RR( op"sub8 r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"sub8 r14, r",12,HH2,", r",10,HH1,"") TEST_GROUP("Parallel addition and subtraction, signed") PARALLEL_ADD_SUB("s") PARALLEL_ADD_SUB("q") PARALLEL_ADD_SUB("sh") TEST_GROUP("Parallel addition and subtraction, unsigned") PARALLEL_ADD_SUB("u") PARALLEL_ADD_SUB("uq") PARALLEL_ADD_SUB("uh") TEST_GROUP("Miscellaneous operations") TEST_RR("qadd r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR("qadd lr, r",9, VAL2,", r",8, VAL1,"") TEST_RR("qsub r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR("qsub lr, r",9, VAL2,", r",8, VAL1,"") TEST_RR("qdadd r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR("qdadd lr, r",9, VAL2,", r",8, VAL1,"") TEST_RR("qdsub r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR("qdsub lr, r",9, VAL2,", r",8, VAL1,"") TEST_R("rev.w r0, r",0, VAL1,"") TEST_R("rev r14, r",12, VAL2,"") TEST_R("rev16.w r0, r",0, VAL1,"") TEST_R("rev16 r14, r",12, VAL2,"") TEST_R("rbit r0, r",0, VAL1,"") TEST_R("rbit r14, r",12, VAL2,"") TEST_R("revsh.w r0, r",0, VAL1,"") TEST_R("revsh r14, r",12, VAL2,"") TEST_UNSUPPORTED(".short 0xfa9c,0xff8c @ rev pc, r12"); TEST_UNSUPPORTED(".short 0xfa9c,0xfd8c @ rev sp, r12"); TEST_UNSUPPORTED(".short 0xfa9f,0xfe8f @ rev r14, pc"); TEST_UNSUPPORTED(".short 0xfa9d,0xfe8d @ rev r14, sp"); TEST_RR("sel r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR("sel r14, r",12,VAL1,", r",10, VAL2,"") TEST_R("clz r0, r",0, 0x0,"") TEST_R("clz r7, r",14,0x1,"") TEST_R("clz lr, r",7, 0xffffffff,"") TEST_UNSUPPORTED(".short 0xfa80,0xf030") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfaff,0xff7f") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfab0,0xf000") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfaff,0xff7f") /* Unallocated space */ TEST_GROUP("Multiply, multiply accumulate, and absolute difference operations") TEST_RR( "mul r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "mul r7, r",8, VAL2,", r",9, VAL2,"") TEST_UNSUPPORTED(".short 0xfb08,0xff09 @ mul pc, r8, r9") TEST_UNSUPPORTED(".short 0xfb08,0xfd09 @ mul sp, r8, r9") TEST_UNSUPPORTED(".short 0xfb0f,0xf709 @ mul r7, pc, r9") TEST_UNSUPPORTED(".short 0xfb0d,0xf709 @ mul r7, sp, r9") TEST_UNSUPPORTED(".short 0xfb08,0xf70f @ mul r7, r8, pc") TEST_UNSUPPORTED(".short 0xfb08,0xf70d @ mul r7, r8, sp") TEST_RRR( "mla r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "mla r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_UNSUPPORTED(".short 0xfb08,0xaf09 @ mla pc, r8, r9, r10"); TEST_UNSUPPORTED(".short 0xfb08,0xad09 @ mla sp, r8, r9, r10"); TEST_UNSUPPORTED(".short 0xfb0f,0xa709 @ mla r7, pc, r9, r10"); TEST_UNSUPPORTED(".short 0xfb0d,0xa709 @ mla r7, sp, r9, r10"); TEST_UNSUPPORTED(".short 0xfb08,0xa70f @ mla r7, r8, pc, r10"); TEST_UNSUPPORTED(".short 0xfb08,0xa70d @ mla r7, r8, sp, r10"); TEST_UNSUPPORTED(".short 0xfb08,0xd709 @ mla r7, r8, r9, sp"); TEST_RRR( "mls r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "mls r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RRR( "smlabb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlabb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RRR( "smlatb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlatb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RRR( "smlabt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlabt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RRR( "smlatt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlatt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "smulbb r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulbb r7, r",8, VAL3,", r",9, VAL1,"") TEST_RR( "smultb r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smultb r7, r",8, VAL3,", r",9, VAL1,"") TEST_RR( "smulbt r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulbt r7, r",8, VAL3,", r",9, VAL1,"") TEST_RR( "smultt r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smultt r7, r",8, VAL3,", r",9, VAL1,"") TEST_RRR( "smlad r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smlad r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_RRR( "smladx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smladx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_RR( "smuad r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smuad r14, r",12,HH2,", r",10,HH1,"") TEST_RR( "smuadx r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smuadx r14, r",12,HH2,", r",10,HH1,"") TEST_RRR( "smlawb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlawb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RRR( "smlawt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlawt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "smulwb r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulwb r7, r",8, VAL3,", r",9, VAL1,"") TEST_RR( "smulwt r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulwt r7, r",8, VAL3,", r",9, VAL1,"") TEST_RRR( "smlsd r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smlsd r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_RRR( "smlsdx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smlsdx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_RR( "smusd r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smusd r14, r",12,HH2,", r",10,HH1,"") TEST_RR( "smusdx r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smusdx r14, r",12,HH2,", r",10,HH1,"") TEST_RRR( "smmla r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmla r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_RRR( "smmlar r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmlar r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_RR( "smmul r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR( "smmul r14, r",12,VAL2,", r",10,VAL1,"") TEST_RR( "smmulr r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR( "smmulr r14, r",12,VAL2,", r",10,VAL1,"") TEST_RRR( "smmls r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmls r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_RRR( "smmlsr r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmlsr r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_RRR( "usada8 r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL3,"") TEST_RRR( "usada8 r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL3,"") TEST_RR( "usad8 r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR( "usad8 r14, r",12,VAL2,", r",10,VAL1,"") TEST_UNSUPPORTED(".short 0xfb00,0xf010") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfb0f,0xff1f") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfb70,0xf010") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfb7f,0xff1f") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfb70,0x0010") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfb7f,0xff1f") /* Unallocated space */ TEST_GROUP("Long multiply, long multiply accumulate, and divide") TEST_RR( "smull r0, r1, r",2, VAL1,", r",3, VAL2,"") TEST_RR( "smull r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_UNSUPPORTED(".short 0xfb89,0xf80a @ smull pc, r8, r9, r10"); TEST_UNSUPPORTED(".short 0xfb89,0xd80a @ smull sp, r8, r9, r10"); TEST_UNSUPPORTED(".short 0xfb89,0x7f0a @ smull r7, pc, r9, r10"); TEST_UNSUPPORTED(".short 0xfb89,0x7d0a @ smull r7, sp, r9, r10"); TEST_UNSUPPORTED(".short 0xfb8f,0x780a @ smull r7, r8, pc, r10"); TEST_UNSUPPORTED(".short 0xfb8d,0x780a @ smull r7, r8, sp, r10"); TEST_UNSUPPORTED(".short 0xfb89,0x780f @ smull r7, r8, r9, pc"); TEST_UNSUPPORTED(".short 0xfb89,0x780d @ smull r7, r8, r9, sp"); TEST_RR( "umull r0, r1, r",2, VAL1,", r",3, VAL2,"") TEST_RR( "umull r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_RRRR( "smlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "smlalbb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlalbb r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "smlalbt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlalbt r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "smlaltb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlaltb r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "smlaltt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlaltt r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "smlald r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2) TEST_RRRR( "smlald r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1) TEST_RRRR( "smlaldx r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2) TEST_RRRR( "smlaldx r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1) TEST_RRRR( "smlsld r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2) TEST_RRRR( "smlsld r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1) TEST_RRRR( "smlsldx r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2) TEST_RRRR( "smlsldx r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1) TEST_RRRR( "umlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "umlal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "umaal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "umaal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_GROUP("Coprocessor instructions") TEST_UNSUPPORTED(".short 0xfc00,0x0000") TEST_UNSUPPORTED(".short 0xffff,0xffff") TEST_GROUP("Testing instructions in IT blocks") TEST_ITBLOCK("sub.w r0, r0") verbose("\n"); }
junkyde/vikinger-stock
arch/arm/kernel/kprobes-test-thumb.c
C
gpl-2.0
44,841
/* * arch/arm/kernel/kprobes-test-thumb.c * * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include "kprobes-test.h" #define TEST_ISA "16" #define DONT_TEST_IN_ITBLOCK(tests) \ kprobe_test_flags |= TEST_FLAG_NO_ITBLOCK; \ tests \ kprobe_test_flags &= ~TEST_FLAG_NO_ITBLOCK; #define CONDITION_INSTRUCTIONS(cc_pos, tests) \ kprobe_test_cc_position = cc_pos; \ DONT_TEST_IN_ITBLOCK(tests) \ kprobe_test_cc_position = 0; #define TEST_ITBLOCK(code) \ kprobe_test_flags |= TEST_FLAG_FULL_ITBLOCK; \ TESTCASE_START(code) \ TEST_ARG_END("") \ "50: nop \n\t" \ "1: "code" \n\t" \ " mov r1, #0x11 \n\t" \ " mov r2, #0x22 \n\t" \ " mov r3, #0x33 \n\t" \ "2: nop \n\t" \ TESTCASE_END \ kprobe_test_flags &= ~TEST_FLAG_FULL_ITBLOCK; #define TEST_THUMB_TO_ARM_INTERWORK_P(code1, reg, val, code2) \ TESTCASE_START(code1 #reg code2) \ TEST_ARG_PTR(reg, val) \ TEST_ARG_REG(14, 99f+1) \ TEST_ARG_MEM(15, 3f) \ TEST_ARG_END("") \ " nop \n\t" /* To align 1f */ \ "50: nop \n\t" \ "1: "code1 #reg code2" \n\t" \ " bx lr \n\t" \ ".arm \n\t" \ "3: adr lr, 2f+1 \n\t" \ " bx lr \n\t" \ ".thumb \n\t" \ "2: nop \n\t" \ TESTCASE_END void kprobe_thumb16_test_cases(void) { kprobe_test_flags = TEST_FLAG_NARROW_INSTR; TEST_GROUP("Shift (immediate), add, subtract, move, and compare") TEST_R( "lsls r7, r",0,VAL1,", #5") TEST_R( "lsls r0, r",7,VAL2,", #11") TEST_R( "lsrs r7, r",0,VAL1,", #5") TEST_R( "lsrs r0, r",7,VAL2,", #11") TEST_R( "asrs r7, r",0,VAL1,", #5") TEST_R( "asrs r0, r",7,VAL2,", #11") TEST_RR( "adds r2, r",0,VAL1,", r",7,VAL2,"") TEST_RR( "adds r5, r",7,VAL2,", r",0,VAL2,"") TEST_RR( "subs r2, r",0,VAL1,", r",7,VAL2,"") TEST_RR( "subs r5, r",7,VAL2,", r",0,VAL2,"") TEST_R( "adds r7, r",0,VAL1,", #5") TEST_R( "adds r0, r",7,VAL2,", #2") TEST_R( "subs r7, r",0,VAL1,", #5") TEST_R( "subs r0, r",7,VAL2,", #2") TEST( "movs.n r0, #0x5f") TEST( "movs.n r7, #0xa0") TEST_R( "cmp.n r",0,0x5e, ", #0x5f") TEST_R( "cmp.n r",5,0x15f,", #0x5f") TEST_R( "cmp.n r",7,0xa0, ", #0xa0") TEST_R( "adds.n r",0,VAL1,", #0x5f") TEST_R( "adds.n r",7,VAL2,", #0xa0") TEST_R( "subs.n r",0,VAL1,", #0x5f") TEST_R( "subs.n r",7,VAL2,", #0xa0") TEST_GROUP("16-bit Thumb data-processing instructions") #define DATA_PROCESSING16(op,val) \ TEST_RR( op" r",0,VAL1,", r",7,val,"") \ TEST_RR( op" r",7,VAL2,", r",0,val,"") DATA_PROCESSING16("ands",0xf00f00ff) DATA_PROCESSING16("eors",0xf00f00ff) DATA_PROCESSING16("lsls",11) DATA_PROCESSING16("lsrs",11) DATA_PROCESSING16("asrs",11) DATA_PROCESSING16("adcs",VAL2) DATA_PROCESSING16("sbcs",VAL2) DATA_PROCESSING16("rors",11) DATA_PROCESSING16("tst",0xf00f00ff) TEST_R("rsbs r",0,VAL1,", #0") TEST_R("rsbs r",7,VAL2,", #0") DATA_PROCESSING16("cmp",0xf00f00ff) DATA_PROCESSING16("cmn",0xf00f00ff) DATA_PROCESSING16("orrs",0xf00f00ff) DATA_PROCESSING16("muls",VAL2) DATA_PROCESSING16("bics",0xf00f00ff) DATA_PROCESSING16("mvns",VAL2) TEST_GROUP("Special data instructions and branch and exchange") TEST_RR( "add r",0, VAL1,", r",7,VAL2,"") TEST_RR( "add r",3, VAL2,", r",8,VAL3,"") TEST_RR( "add r",8, VAL3,", r",0,VAL1,"") TEST_R( "add sp" ", r",8,-8, "") TEST_R( "add r",14,VAL1,", pc") TEST_BF_R("add pc" ", r",0,2f-1f-8,"") TEST_UNSUPPORTED(".short 0x44ff @ add pc, pc") TEST_RR( "cmp r",3,VAL1,", r",8,VAL2,"") TEST_RR( "cmp r",8,VAL2,", r",0,VAL1,"") TEST_R( "cmp sp" ", r",8,-8, "") TEST_R( "mov r0, r",7,VAL2,"") TEST_R( "mov r3, r",8,VAL3,"") TEST_R( "mov r8, r",0,VAL1,"") TEST_P( "mov sp, r",8,-8, "") TEST( "mov lr, pc") TEST_BF_R("mov pc, r",0,2f, "") TEST_BF_R("bx r",0, 2f+1,"") TEST_BF_R("bx r",14,2f+1,"") TESTCASE_START("bx pc") TEST_ARG_REG(14, 99f+1) TEST_ARG_END("") " nop \n\t" /* To align the bx pc*/ "50: nop \n\t" "1: bx pc \n\t" " bx lr \n\t" ".arm \n\t" " adr lr, 2f+1 \n\t" " bx lr \n\t" ".thumb \n\t" "2: nop \n\t" TESTCASE_END TEST_BF_R("blx r",0, 2f+1,"") TEST_BB_R("blx r",14,2f+1,"") TEST_UNSUPPORTED(".short 0x47f8 @ blx pc") TEST_GROUP("Load from Literal Pool") TEST_X( "ldr r0, 3f", ".align \n\t" "3: .word "__stringify(VAL1)) TEST_X( "ldr r7, 3f", ".space 128 \n\t" ".align \n\t" "3: .word "__stringify(VAL2)) TEST_GROUP("16-bit Thumb Load/store instructions") TEST_RPR("str r",0, VAL1,", [r",1, 24,", r",2, 48,"]") TEST_RPR("str r",7, VAL2,", [r",6, 24,", r",5, 48,"]") TEST_RPR("strh r",0, VAL1,", [r",1, 24,", r",2, 48,"]") TEST_RPR("strh r",7, VAL2,", [r",6, 24,", r",5, 48,"]") TEST_RPR("strb r",0, VAL1,", [r",1, 24,", r",2, 48,"]") TEST_RPR("strb r",7, VAL2,", [r",6, 24,", r",5, 48,"]") TEST_PR( "ldrsb r0, [r",1, 24,", r",2, 48,"]") TEST_PR( "ldrsb r7, [r",6, 24,", r",5, 50,"]") TEST_PR( "ldr r0, [r",1, 24,", r",2, 48,"]") TEST_PR( "ldr r7, [r",6, 24,", r",5, 48,"]") TEST_PR( "ldrh r0, [r",1, 24,", r",2, 48,"]") TEST_PR( "ldrh r7, [r",6, 24,", r",5, 50,"]") TEST_PR( "ldrb r0, [r",1, 24,", r",2, 48,"]") TEST_PR( "ldrb r7, [r",6, 24,", r",5, 50,"]") TEST_PR( "ldrsh r0, [r",1, 24,", r",2, 48,"]") TEST_PR( "ldrsh r7, [r",6, 24,", r",5, 50,"]") TEST_RP("str r",0, VAL1,", [r",1, 24,", #120]") TEST_RP("str r",7, VAL2,", [r",6, 24,", #120]") TEST_P( "ldr r0, [r",1, 24,", #120]") TEST_P( "ldr r7, [r",6, 24,", #120]") TEST_RP("strb r",0, VAL1,", [r",1, 24,", #30]") TEST_RP("strb r",7, VAL2,", [r",6, 24,", #30]") TEST_P( "ldrb r0, [r",1, 24,", #30]") TEST_P( "ldrb r7, [r",6, 24,", #30]") TEST_RP("strh r",0, VAL1,", [r",1, 24,", #60]") TEST_RP("strh r",7, VAL2,", [r",6, 24,", #60]") TEST_P( "ldrh r0, [r",1, 24,", #60]") TEST_P( "ldrh r7, [r",6, 24,", #60]") TEST_R( "str r",0, VAL1,", [sp, #0]") TEST_R( "str r",7, VAL2,", [sp, #160]") TEST( "ldr r0, [sp, #0]") TEST( "ldr r7, [sp, #160]") TEST_RP("str r",0, VAL1,", [r",0, 24,"]") TEST_P( "ldr r0, [r",0, 24,"]") TEST_GROUP("Generate PC-/SP-relative address") TEST("add r0, pc, #4") TEST("add r7, pc, #1020") TEST("add r0, sp, #4") TEST("add r7, sp, #1020") TEST_GROUP("Miscellaneous 16-bit instructions") TEST_UNSUPPORTED( "cpsie i") TEST_UNSUPPORTED( "cpsid i") TEST_UNSUPPORTED( "setend le") TEST_UNSUPPORTED( "setend be") TEST("add sp, #"__stringify(TEST_MEMORY_SIZE)) /* Assumes TEST_MEMORY_SIZE < 0x400 */ TEST("sub sp, #0x7f*4") DONT_TEST_IN_ITBLOCK( TEST_BF_R( "cbnz r",0,0, ", 2f") TEST_BF_R( "cbz r",2,-1,", 2f") TEST_BF_RX( "cbnz r",4,1, ", 2f", SPACE_0x20) TEST_BF_RX( "cbz r",7,0, ", 2f", SPACE_0x40) ) TEST_R("sxth r0, r",7, HH1,"") TEST_R("sxth r7, r",0, HH2,"") TEST_R("sxtb r0, r",7, HH1,"") TEST_R("sxtb r7, r",0, HH2,"") TEST_R("uxth r0, r",7, HH1,"") TEST_R("uxth r7, r",0, HH2,"") TEST_R("uxtb r0, r",7, HH1,"") TEST_R("uxtb r7, r",0, HH2,"") TEST_R("rev r0, r",7, VAL1,"") TEST_R("rev r7, r",0, VAL2,"") TEST_R("rev16 r0, r",7, VAL1,"") TEST_R("rev16 r7, r",0, VAL2,"") TEST_UNSUPPORTED(".short 0xba80") TEST_UNSUPPORTED(".short 0xbabf") TEST_R("revsh r0, r",7, VAL1,"") TEST_R("revsh r7, r",0, VAL2,"") #define TEST_POPPC(code, offset) \ TESTCASE_START(code) \ TEST_ARG_PTR(13, offset) \ TEST_ARG_END("") \ TEST_BRANCH_F(code) \ TESTCASE_END TEST("push {r0}") TEST("push {r7}") TEST("push {r14}") TEST("push {r0-r7,r14}") TEST("push {r0,r2,r4,r6,r14}") TEST("push {r1,r3,r5,r7}") TEST("pop {r0}") TEST("pop {r7}") TEST("pop {r0,r2,r4,r6}") TEST_POPPC("pop {pc}",15*4) TEST_POPPC("pop {r0-r7,pc}",7*4) TEST_POPPC("pop {r1,r3,r5,r7,pc}",11*4) TEST_THUMB_TO_ARM_INTERWORK_P("pop {pc} @ ",13,15*4,"") TEST_THUMB_TO_ARM_INTERWORK_P("pop {r0-r7,pc} @ ",13,7*4,"") TEST_UNSUPPORTED("bkpt.n 0") TEST_UNSUPPORTED("bkpt.n 255") TEST_SUPPORTED("yield") TEST("sev") TEST("nop") TEST("wfi") TEST_SUPPORTED("wfe") TEST_UNSUPPORTED(".short 0xbf50") /* Unassigned hints */ TEST_UNSUPPORTED(".short 0xbff0") /* Unassigned hints */ #define TEST_IT(code, code2) \ TESTCASE_START(code) \ TEST_ARG_END("") \ "50: nop \n\t" \ "1: "code" \n\t" \ " "code2" \n\t" \ "2: nop \n\t" \ TESTCASE_END DONT_TEST_IN_ITBLOCK( TEST_IT("it eq","moveq r0,#0") TEST_IT("it vc","movvc r0,#0") TEST_IT("it le","movle r0,#0") TEST_IT("ite eq","moveq r0,#0\n\t movne r1,#1") TEST_IT("itet vc","movvc r0,#0\n\t movvs r1,#1\n\t movvc r2,#2") TEST_IT("itete le","movle r0,#0\n\t movgt r1,#1\n\t movle r2,#2\n\t movgt r3,#3") TEST_IT("itttt le","movle r0,#0\n\t movle r1,#1\n\t movle r2,#2\n\t movle r3,#3") TEST_IT("iteee le","movle r0,#0\n\t movgt r1,#1\n\t movgt r2,#2\n\t movgt r3,#3") ) TEST_GROUP("Load and store multiple") TEST_P("ldmia r",4, 16*4,"!, {r0,r7}") TEST_P("ldmia r",7, 16*4,"!, {r0-r6}") TEST_P("stmia r",4, 16*4,"!, {r0,r7}") TEST_P("stmia r",0, 16*4,"!, {r0-r7}") TEST_GROUP("Conditional branch and Supervisor Call instructions") CONDITION_INSTRUCTIONS(8, TEST_BF("beq 2f") TEST_BB("bne 2b") TEST_BF("bgt 2f") TEST_BB("blt 2b") ) TEST_UNSUPPORTED(".short 0xde00") TEST_UNSUPPORTED(".short 0xdeff") TEST_UNSUPPORTED("svc #0x00") TEST_UNSUPPORTED("svc #0xff") TEST_GROUP("Unconditional branch") TEST_BF( "b 2f") TEST_BB( "b 2b") TEST_BF_X("b 2f", SPACE_0x400) TEST_BB_X("b 2b", SPACE_0x400) TEST_GROUP("Testing instructions in IT blocks") TEST_ITBLOCK("subs.n r0, r0") verbose("\n"); } void kprobe_thumb32_test_cases(void) { kprobe_test_flags = 0; TEST_GROUP("Load/store multiple") TEST_UNSUPPORTED("rfedb sp") TEST_UNSUPPORTED("rfeia sp") TEST_UNSUPPORTED("rfedb sp!") TEST_UNSUPPORTED("rfeia sp!") TEST_P( "stmia r",0, 16*4,", {r0,r8}") TEST_P( "stmia r",4, 16*4,", {r0-r12,r14}") TEST_P( "stmia r",7, 16*4,"!, {r8-r12,r14}") TEST_P( "stmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "ldmia r",0, 16*4,", {r0,r8}") TEST_P( "ldmia r",4, 0, ", {r0-r12,r14}") TEST_BF_P("ldmia r",5, 8*4, "!, {r6-r12,r15}") TEST_P( "ldmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmia r",14,14*4,"!, {r4,pc}") TEST_P( "stmdb r",0, 16*4,", {r0,r8}") TEST_P( "stmdb r",4, 16*4,", {r0-r12,r14}") TEST_P( "stmdb r",5, 16*4,"!, {r8-r12,r14}") TEST_P( "stmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "ldmdb r",0, 16*4,", {r0,r8}") TEST_P( "ldmdb r",4, 16*4,", {r0-r12,r14}") TEST_BF_P("ldmdb r",5, 16*4,"!, {r6-r12,r15}") TEST_P( "ldmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmdb r",14,16*4,"!, {r4,pc}") TEST_P( "stmdb r",13,16*4,"!, {r3-r12,lr}") TEST_P( "stmdb r",13,16*4,"!, {r3-r12}") TEST_P( "stmdb r",2, 16*4,", {r3-r12,lr}") TEST_P( "stmdb r",13,16*4,"!, {r2-r12,lr}") TEST_P( "stmdb r",0, 16*4,", {r0-r12}") TEST_P( "stmdb r",0, 16*4,", {r0-r12,lr}") TEST_BF_P("ldmia r",13,5*4, "!, {r3-r12,pc}") TEST_P( "ldmia r",13,5*4, "!, {r3-r12}") TEST_BF_P("ldmia r",2, 5*4, "!, {r3-r12,pc}") TEST_BF_P("ldmia r",13,4*4, "!, {r2-r12,pc}") TEST_P( "ldmia r",0, 16*4,", {r0-r12}") TEST_P( "ldmia r",0, 16*4,", {r0-r12,lr}") TEST_THUMB_TO_ARM_INTERWORK_P("ldmia r",0,14*4,", {r12,pc}") TEST_THUMB_TO_ARM_INTERWORK_P("ldmia r",13,2*4,", {r0-r12,pc}") TEST_UNSUPPORTED(".short 0xe88f,0x0101 @ stmia pc, {r0,r8}") TEST_UNSUPPORTED(".short 0xe92f,0x5f00 @ stmdb pc!, {r8-r12,r14}") TEST_UNSUPPORTED(".short 0xe8bd,0xc000 @ ldmia r13!, {r14,pc}") TEST_UNSUPPORTED(".short 0xe93e,0xc000 @ ldmdb r14!, {r14,pc}") TEST_UNSUPPORTED(".short 0xe8a7,0x3f00 @ stmia r7!, {r8-r12,sp}") TEST_UNSUPPORTED(".short 0xe8a7,0x9f00 @ stmia r7!, {r8-r12,pc}") TEST_UNSUPPORTED(".short 0xe93e,0x2010 @ ldmdb r14!, {r4,sp}") TEST_GROUP("Load/store double or exclusive, table branch") TEST_P( "ldrd r0, r1, [r",1, 24,", #-16]") TEST( "ldrd r12, r14, [sp, #16]") TEST_P( "ldrd r1, r0, [r",7, 24,", #-16]!") TEST( "ldrd r14, r12, [sp, #16]!") TEST_P( "ldrd r1, r0, [r",7, 24,"], #16") TEST( "ldrd r7, r8, [sp], #-16") TEST_X( "ldrd r12, r14, 3f", ".align 3 \n\t" "3: .word "__stringify(VAL1)" \n\t" " .word "__stringify(VAL2)) TEST_UNSUPPORTED(".short 0xe9ff,0xec04 @ ldrd r14, r12, [pc, #16]!") TEST_UNSUPPORTED(".short 0xe8ff,0xec04 @ ldrd r14, r12, [pc], #16") TEST_UNSUPPORTED(".short 0xe9d4,0xd800 @ ldrd sp, r8, [r4]") TEST_UNSUPPORTED(".short 0xe9d4,0xf800 @ ldrd pc, r8, [r4]") TEST_UNSUPPORTED(".short 0xe9d4,0x7d00 @ ldrd r7, sp, [r4]") TEST_UNSUPPORTED(".short 0xe9d4,0x7f00 @ ldrd r7, pc, [r4]") TEST_RRP("strd r",0, VAL1,", r",1, VAL2,", [r",1, 24,", #-16]") TEST_RR( "strd r",12,VAL2,", r",14,VAL1,", [sp, #16]") TEST_RRP("strd r",1, VAL1,", r",0, VAL2,", [r",7, 24,", #-16]!") TEST_RR( "strd r",14,VAL2,", r",12,VAL1,", [sp, #16]!") TEST_RRP("strd r",1, VAL1,", r",0, VAL2,", [r",7, 24,"], #16") TEST_RR( "strd r",7, VAL2,", r",8, VAL1,", [sp], #-16") TEST_UNSUPPORTED(".short 0xe9ef,0xec04 @ strd r14, r12, [pc, #16]!") TEST_UNSUPPORTED(".short 0xe8ef,0xec04 @ strd r14, r12, [pc], #16") TEST_RX("tbb [pc, r",0, (9f-(1f+4)),"]", "9: \n\t" ".byte (2f-1b-4)>>1 \n\t" ".byte (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_RX("tbb [pc, r",4, (9f-(1f+4)+1),"]", "9: \n\t" ".byte (2f-1b-4)>>1 \n\t" ".byte (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_RRX("tbb [r",1,9f,", r",2,0,"]", "9: \n\t" ".byte (2f-1b-4)>>1 \n\t" ".byte (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,"]", "9: \n\t" ".short (2f-1b-4)>>1 \n\t" ".short (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,"]", "9: \n\t" ".short (2f-1b-4)>>1 \n\t" ".short (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_RRX("tbh [r",1,9f, ", r",14,1,"]", "9: \n\t" ".short (2f-1b-4)>>1 \n\t" ".short (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_UNSUPPORTED(".short 0xe8d1,0xf01f @ tbh [r1, pc]") TEST_UNSUPPORTED(".short 0xe8d1,0xf01d @ tbh [r1, sp]") TEST_UNSUPPORTED(".short 0xe8dd,0xf012 @ tbh [sp, r2]") TEST_UNSUPPORTED("strexb r0, r1, [r2]") TEST_UNSUPPORTED("strexh r0, r1, [r2]") TEST_UNSUPPORTED("strexd r0, r1, [r2]") TEST_UNSUPPORTED("ldrexb r0, [r1]") TEST_UNSUPPORTED("ldrexh r0, [r1]") TEST_UNSUPPORTED("ldrexd r0, [r1]") TEST_GROUP("Data-processing (shifted register) and (modified immediate)") #define _DATA_PROCESSING32_DNM(op,s,val) \ TEST_RR(op s".w r0, r",1, VAL1,", r",2, val, "") \ TEST_RR(op s" r1, r",1, VAL1,", r",2, val, ", lsl #3") \ TEST_RR(op s" r2, r",3, VAL1,", r",2, val, ", lsr #4") \ TEST_RR(op s" r3, r",3, VAL1,", r",2, val, ", asr #5") \ TEST_RR(op s" r4, r",5, VAL1,", r",2, N(val),", asr #6") \ TEST_RR(op s" r5, r",5, VAL1,", r",2, val, ", ror #7") \ TEST_RR(op s" r8, r",9, VAL1,", r",10,val, ", rrx") \ TEST_R( op s" r0, r",11,VAL1,", #0x00010001") \ TEST_R( op s" r11, r",0, VAL1,", #0xf5000000") \ TEST_R( op s" r7, r",8, VAL2,", #0x000af000") #define DATA_PROCESSING32_DNM(op,val) \ _DATA_PROCESSING32_DNM(op,"",val) \ _DATA_PROCESSING32_DNM(op,"s",val) #define DATA_PROCESSING32_NM(op,val) \ TEST_RR(op".w r",1, VAL1,", r",2, val, "") \ TEST_RR(op" r",1, VAL1,", r",2, val, ", lsl #3") \ TEST_RR(op" r",3, VAL1,", r",2, val, ", lsr #4") \ TEST_RR(op" r",3, VAL1,", r",2, val, ", asr #5") \ TEST_RR(op" r",5, VAL1,", r",2, N(val),", asr #6") \ TEST_RR(op" r",5, VAL1,", r",2, val, ", ror #7") \ TEST_RR(op" r",9, VAL1,", r",10,val, ", rrx") \ TEST_R( op" r",11,VAL1,", #0x00010001") \ TEST_R( op" r",0, VAL1,", #0xf5000000") \ TEST_R( op" r",8, VAL2,", #0x000af000") #define _DATA_PROCESSING32_DM(op,s,val) \ TEST_R( op s".w r0, r",14, val, "") \ TEST_R( op s" r1, r",12, val, ", lsl #3") \ TEST_R( op s" r2, r",11, val, ", lsr #4") \ TEST_R( op s" r3, r",10, val, ", asr #5") \ TEST_R( op s" r4, r",9, N(val),", asr #6") \ TEST_R( op s" r5, r",8, val, ", ror #7") \ TEST_R( op s" r8, r",7,val, ", rrx") \ TEST( op s" r0, #0x00010001") \ TEST( op s" r11, #0xf5000000") \ TEST( op s" r7, #0x000af000") \ TEST( op s" r4, #0x00005a00") #define DATA_PROCESSING32_DM(op,val) \ _DATA_PROCESSING32_DM(op,"",val) \ _DATA_PROCESSING32_DM(op,"s",val) DATA_PROCESSING32_DNM("and",0xf00f00ff) DATA_PROCESSING32_NM("tst",0xf00f00ff) DATA_PROCESSING32_DNM("bic",0xf00f00ff) DATA_PROCESSING32_DNM("orr",0xf00f00ff) DATA_PROCESSING32_DM("mov",VAL2) DATA_PROCESSING32_DNM("orn",0xf00f00ff) DATA_PROCESSING32_DM("mvn",VAL2) DATA_PROCESSING32_DNM("eor",0xf00f00ff) DATA_PROCESSING32_NM("teq",0xf00f00ff) DATA_PROCESSING32_DNM("add",VAL2) DATA_PROCESSING32_NM("cmn",VAL2) DATA_PROCESSING32_DNM("adc",VAL2) DATA_PROCESSING32_DNM("sbc",VAL2) DATA_PROCESSING32_DNM("sub",VAL2) DATA_PROCESSING32_NM("cmp",VAL2) DATA_PROCESSING32_DNM("rsb",VAL2) TEST_RR("pkhbt r0, r",0, HH1,", r",1, HH2,"") TEST_RR("pkhbt r14,r",12, HH1,", r",10,HH2,", lsl #2") TEST_RR("pkhtb r0, r",0, HH1,", r",1, HH2,"") TEST_RR("pkhtb r14,r",12, HH1,", r",10,HH2,", asr #2") TEST_UNSUPPORTED(".short 0xea17,0x0f0d @ tst.w r7, sp") TEST_UNSUPPORTED(".short 0xea17,0x0f0f @ tst.w r7, pc") TEST_UNSUPPORTED(".short 0xea1d,0x0f07 @ tst.w sp, r7") TEST_UNSUPPORTED(".short 0xea1f,0x0f07 @ tst.w pc, r7") TEST_UNSUPPORTED(".short 0xf01d,0x1f08 @ tst sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf01f,0x1f08 @ tst pc, #0x00080008") TEST_UNSUPPORTED(".short 0xea97,0x0f0d @ teq.w r7, sp") TEST_UNSUPPORTED(".short 0xea97,0x0f0f @ teq.w r7, pc") TEST_UNSUPPORTED(".short 0xea9d,0x0f07 @ teq.w sp, r7") TEST_UNSUPPORTED(".short 0xea9f,0x0f07 @ teq.w pc, r7") TEST_UNSUPPORTED(".short 0xf09d,0x1f08 @ tst sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf09f,0x1f08 @ tst pc, #0x00080008") TEST_UNSUPPORTED(".short 0xeb17,0x0f0d @ cmn.w r7, sp") TEST_UNSUPPORTED(".short 0xeb17,0x0f0f @ cmn.w r7, pc") TEST_P("cmn.w sp, r",7,0,"") TEST_UNSUPPORTED(".short 0xeb1f,0x0f07 @ cmn.w pc, r7") TEST( "cmn sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf11f,0x1f08 @ cmn pc, #0x00080008") TEST_UNSUPPORTED(".short 0xebb7,0x0f0d @ cmp.w r7, sp") TEST_UNSUPPORTED(".short 0xebb7,0x0f0f @ cmp.w r7, pc") TEST_P("cmp.w sp, r",7,0,"") TEST_UNSUPPORTED(".short 0xebbf,0x0f07 @ cmp.w pc, r7") TEST( "cmp sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf1bf,0x1f08 @ cmp pc, #0x00080008") TEST_UNSUPPORTED(".short 0xea5f,0x070d @ movs.w r7, sp") TEST_UNSUPPORTED(".short 0xea5f,0x070f @ movs.w r7, pc") TEST_UNSUPPORTED(".short 0xea5f,0x0d07 @ movs.w sp, r7") TEST_UNSUPPORTED(".short 0xea4f,0x0f07 @ mov.w pc, r7") TEST_UNSUPPORTED(".short 0xf04f,0x1d08 @ mov sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf04f,0x1f08 @ mov pc, #0x00080008") TEST_R("add.w r0, sp, r",1, 4,"") TEST_R("adds r0, sp, r",1, 4,", asl #3") TEST_R("add r0, sp, r",1, 4,", asl #4") TEST_R("add r0, sp, r",1, 16,", ror #1") TEST_R("add.w sp, sp, r",1, 4,"") TEST_R("add sp, sp, r",1, 4,", asl #3") TEST_UNSUPPORTED(".short 0xeb0d,0x1d01 @ add sp, sp, r1, asl #4") TEST_UNSUPPORTED(".short 0xeb0d,0x0d71 @ add sp, sp, r1, ror #1") TEST( "add.w r0, sp, #24") TEST( "add.w sp, sp, #24") TEST_UNSUPPORTED(".short 0xeb0d,0x0f01 @ add pc, sp, r1") TEST_UNSUPPORTED(".short 0xeb0d,0x000f @ add r0, sp, pc") TEST_UNSUPPORTED(".short 0xeb0d,0x000d @ add r0, sp, sp") TEST_UNSUPPORTED(".short 0xeb0d,0x0d0f @ add sp, sp, pc") TEST_UNSUPPORTED(".short 0xeb0d,0x0d0d @ add sp, sp, sp") TEST_R("sub.w r0, sp, r",1, 4,"") TEST_R("subs r0, sp, r",1, 4,", asl #3") TEST_R("sub r0, sp, r",1, 4,", asl #4") TEST_R("sub r0, sp, r",1, 16,", ror #1") TEST_R("sub.w sp, sp, r",1, 4,"") TEST_R("sub sp, sp, r",1, 4,", asl #3") TEST_UNSUPPORTED(".short 0xebad,0x1d01 @ sub sp, sp, r1, asl #4") TEST_UNSUPPORTED(".short 0xebad,0x0d71 @ sub sp, sp, r1, ror #1") TEST_UNSUPPORTED(".short 0xebad,0x0f01 @ sub pc, sp, r1") TEST( "sub.w r0, sp, #24") TEST( "sub.w sp, sp, #24") TEST_UNSUPPORTED(".short 0xea02,0x010f @ and r1, r2, pc") TEST_UNSUPPORTED(".short 0xea0f,0x0103 @ and r1, pc, r3") TEST_UNSUPPORTED(".short 0xea02,0x0f03 @ and pc, r2, r3") TEST_UNSUPPORTED(".short 0xea02,0x010d @ and r1, r2, sp") TEST_UNSUPPORTED(".short 0xea0d,0x0103 @ and r1, sp, r3") TEST_UNSUPPORTED(".short 0xea02,0x0d03 @ and sp, r2, r3") TEST_UNSUPPORTED(".short 0xf00d,0x1108 @ and r1, sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf00f,0x1108 @ and r1, pc, #0x00080008") TEST_UNSUPPORTED(".short 0xf002,0x1d08 @ and sp, r8, #0x00080008") TEST_UNSUPPORTED(".short 0xf002,0x1f08 @ and pc, r8, #0x00080008") TEST_UNSUPPORTED(".short 0xeb02,0x010f @ add r1, r2, pc") TEST_UNSUPPORTED(".short 0xeb0f,0x0103 @ add r1, pc, r3") TEST_UNSUPPORTED(".short 0xeb02,0x0f03 @ add pc, r2, r3") TEST_UNSUPPORTED(".short 0xeb02,0x010d @ add r1, r2, sp") TEST_SUPPORTED( ".short 0xeb0d,0x0103 @ add r1, sp, r3") TEST_UNSUPPORTED(".short 0xeb02,0x0d03 @ add sp, r2, r3") TEST_SUPPORTED( ".short 0xf10d,0x1108 @ add r1, sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf10d,0x1f08 @ add pc, sp, #0x00080008") TEST_UNSUPPORTED(".short 0xf10f,0x1108 @ add r1, pc, #0x00080008") TEST_UNSUPPORTED(".short 0xf102,0x1d08 @ add sp, r8, #0x00080008") TEST_UNSUPPORTED(".short 0xf102,0x1f08 @ add pc, r8, #0x00080008") TEST_UNSUPPORTED(".short 0xeaa0,0x0000") TEST_UNSUPPORTED(".short 0xeaf0,0x0000") TEST_UNSUPPORTED(".short 0xeb20,0x0000") TEST_UNSUPPORTED(".short 0xeb80,0x0000") TEST_UNSUPPORTED(".short 0xebe0,0x0000") TEST_UNSUPPORTED(".short 0xf0a0,0x0000") TEST_UNSUPPORTED(".short 0xf0c0,0x0000") TEST_UNSUPPORTED(".short 0xf0f0,0x0000") TEST_UNSUPPORTED(".short 0xf120,0x0000") TEST_UNSUPPORTED(".short 0xf180,0x0000") TEST_UNSUPPORTED(".short 0xf1e0,0x0000") TEST_GROUP("Coprocessor instructions") TEST_UNSUPPORTED(".short 0xec00,0x0000") TEST_UNSUPPORTED(".short 0xeff0,0x0000") TEST_UNSUPPORTED(".short 0xfc00,0x0000") TEST_UNSUPPORTED(".short 0xfff0,0x0000") TEST_GROUP("Data-processing (plain binary immediate)") TEST_R("addw r0, r",1, VAL1,", #0x123") TEST( "addw r14, sp, #0xf5a") TEST( "addw sp, sp, #0x20") TEST( "addw r7, pc, #0x888") TEST_UNSUPPORTED(".short 0xf20f,0x1f20 @ addw pc, pc, #0x120") TEST_UNSUPPORTED(".short 0xf20d,0x1f20 @ addw pc, sp, #0x120") TEST_UNSUPPORTED(".short 0xf20f,0x1d20 @ addw sp, pc, #0x120") TEST_UNSUPPORTED(".short 0xf200,0x1d20 @ addw sp, r0, #0x120") TEST_R("subw r0, r",1, VAL1,", #0x123") TEST( "subw r14, sp, #0xf5a") TEST( "subw sp, sp, #0x20") TEST( "subw r7, pc, #0x888") TEST_UNSUPPORTED(".short 0xf2af,0x1f20 @ subw pc, pc, #0x120") TEST_UNSUPPORTED(".short 0xf2ad,0x1f20 @ subw pc, sp, #0x120") TEST_UNSUPPORTED(".short 0xf2af,0x1d20 @ subw sp, pc, #0x120") TEST_UNSUPPORTED(".short 0xf2a0,0x1d20 @ subw sp, r0, #0x120") TEST("movw r0, #0") TEST("movw r0, #0xffff") TEST("movw lr, #0xffff") TEST_UNSUPPORTED(".short 0xf240,0x0d00 @ movw sp, #0") TEST_UNSUPPORTED(".short 0xf240,0x0f00 @ movw pc, #0") TEST_R("movt r",0, VAL1,", #0") TEST_R("movt r",0, VAL2,", #0xffff") TEST_R("movt r",14,VAL1,", #0xffff") TEST_UNSUPPORTED(".short 0xf2c0,0x0d00 @ movt sp, #0") TEST_UNSUPPORTED(".short 0xf2c0,0x0f00 @ movt pc, #0") TEST_R( "ssat r0, #24, r",0, VAL1,"") TEST_R( "ssat r14, #24, r",12, VAL2,"") TEST_R( "ssat r0, #24, r",0, VAL1,", lsl #8") TEST_R( "ssat r14, #24, r",12, VAL2,", asr #8") TEST_UNSUPPORTED(".short 0xf30c,0x0d17 @ ssat sp, #24, r12") TEST_UNSUPPORTED(".short 0xf30c,0x0f17 @ ssat pc, #24, r12") TEST_UNSUPPORTED(".short 0xf30d,0x0c17 @ ssat r12, #24, sp") TEST_UNSUPPORTED(".short 0xf30f,0x0c17 @ ssat r12, #24, pc") TEST_R( "usat r0, #24, r",0, VAL1,"") TEST_R( "usat r14, #24, r",12, VAL2,"") TEST_R( "usat r0, #24, r",0, VAL1,", lsl #8") TEST_R( "usat r14, #24, r",12, VAL2,", asr #8") TEST_UNSUPPORTED(".short 0xf38c,0x0d17 @ usat sp, #24, r12") TEST_UNSUPPORTED(".short 0xf38c,0x0f17 @ usat pc, #24, r12") TEST_UNSUPPORTED(".short 0xf38d,0x0c17 @ usat r12, #24, sp") TEST_UNSUPPORTED(".short 0xf38f,0x0c17 @ usat r12, #24, pc") TEST_R( "ssat16 r0, #12, r",0, HH1,"") TEST_R( "ssat16 r14, #12, r",12, HH2,"") TEST_UNSUPPORTED(".short 0xf32c,0x0d0b @ ssat16 sp, #12, r12") TEST_UNSUPPORTED(".short 0xf32c,0x0f0b @ ssat16 pc, #12, r12") TEST_UNSUPPORTED(".short 0xf32d,0x0c0b @ ssat16 r12, #12, sp") TEST_UNSUPPORTED(".short 0xf32f,0x0c0b @ ssat16 r12, #12, pc") TEST_R( "usat16 r0, #12, r",0, HH1,"") TEST_R( "usat16 r14, #12, r",12, HH2,"") TEST_UNSUPPORTED(".short 0xf3ac,0x0d0b @ usat16 sp, #12, r12") TEST_UNSUPPORTED(".short 0xf3ac,0x0f0b @ usat16 pc, #12, r12") TEST_UNSUPPORTED(".short 0xf3ad,0x0c0b @ usat16 r12, #12, sp") TEST_UNSUPPORTED(".short 0xf3af,0x0c0b @ usat16 r12, #12, pc") TEST_R( "sbfx r0, r",0 , VAL1,", #0, #31") TEST_R( "sbfx r14, r",12, VAL2,", #8, #16") TEST_R( "sbfx r4, r",10, VAL1,", #16, #15") TEST_UNSUPPORTED(".short 0xf34c,0x2d0f @ sbfx sp, r12, #8, #16") TEST_UNSUPPORTED(".short 0xf34c,0x2f0f @ sbfx pc, r12, #8, #16") TEST_UNSUPPORTED(".short 0xf34d,0x2c0f @ sbfx r12, sp, #8, #16") TEST_UNSUPPORTED(".short 0xf34f,0x2c0f @ sbfx r12, pc, #8, #16") TEST_R( "ubfx r0, r",0 , VAL1,", #0, #31") TEST_R( "ubfx r14, r",12, VAL2,", #8, #16") TEST_R( "ubfx r4, r",10, VAL1,", #16, #15") TEST_UNSUPPORTED(".short 0xf3cc,0x2d0f @ ubfx sp, r12, #8, #16") TEST_UNSUPPORTED(".short 0xf3cc,0x2f0f @ ubfx pc, r12, #8, #16") TEST_UNSUPPORTED(".short 0xf3cd,0x2c0f @ ubfx r12, sp, #8, #16") TEST_UNSUPPORTED(".short 0xf3cf,0x2c0f @ ubfx r12, pc, #8, #16") TEST_R( "bfc r",0, VAL1,", #4, #20") TEST_R( "bfc r",14,VAL2,", #4, #20") TEST_R( "bfc r",7, VAL1,", #0, #31") TEST_R( "bfc r",8, VAL2,", #0, #31") TEST_UNSUPPORTED(".short 0xf36f,0x0d1e @ bfc sp, #0, #31") TEST_UNSUPPORTED(".short 0xf36f,0x0f1e @ bfc pc, #0, #31") TEST_RR( "bfi r",0, VAL1,", r",0 , VAL2,", #0, #31") TEST_RR( "bfi r",12,VAL1,", r",14 , VAL2,", #4, #20") TEST_UNSUPPORTED(".short 0xf36e,0x1d17 @ bfi sp, r14, #4, #20") TEST_UNSUPPORTED(".short 0xf36e,0x1f17 @ bfi pc, r14, #4, #20") TEST_UNSUPPORTED(".short 0xf36d,0x1e17 @ bfi r14, sp, #4, #20") TEST_GROUP("Branches and miscellaneous control") CONDITION_INSTRUCTIONS(22, TEST_BF("beq.w 2f") TEST_BB("bne.w 2b") TEST_BF("bgt.w 2f") TEST_BB("blt.w 2b") TEST_BF_X("bpl.w 2f", SPACE_0x1000) ) TEST_UNSUPPORTED("msr cpsr, r0") TEST_UNSUPPORTED("msr cpsr_f, r1") TEST_UNSUPPORTED("msr spsr, r2") TEST_UNSUPPORTED("cpsie.w i") TEST_UNSUPPORTED("cpsid.w i") TEST_UNSUPPORTED("cps 0x13") TEST_SUPPORTED("yield.w") TEST("sev.w") TEST("nop.w") TEST("wfi.w") TEST_SUPPORTED("wfe.w") TEST_UNSUPPORTED("dbg.w #0") TEST_UNSUPPORTED("clrex") TEST_UNSUPPORTED("dsb") TEST_UNSUPPORTED("dmb") TEST_UNSUPPORTED("isb") TEST_UNSUPPORTED("bxj r0") TEST_UNSUPPORTED("subs pc, lr, #4") TEST("mrs r0, cpsr") TEST("mrs r14, cpsr") TEST_UNSUPPORTED(".short 0xf3ef,0x8d00 @ mrs sp, spsr") TEST_UNSUPPORTED(".short 0xf3ef,0x8f00 @ mrs pc, spsr") TEST_UNSUPPORTED("mrs r0, spsr") TEST_UNSUPPORTED("mrs lr, spsr") TEST_UNSUPPORTED(".short 0xf7f0,0x8000 @ smc #0") TEST_UNSUPPORTED(".short 0xf7f0,0xa000 @ undefeined") TEST_BF( "b.w 2f") TEST_BB( "b.w 2b") TEST_BF_X("b.w 2f", SPACE_0x1000) TEST_BF( "bl.w 2f") TEST_BB( "bl.w 2b") TEST_BB_X("bl.w 2b", SPACE_0x1000) TEST_X( "blx __dummy_arm_subroutine", ".arm \n\t" ".align \n\t" ".type __dummy_arm_subroutine, %%function \n\t" "__dummy_arm_subroutine: \n\t" "mov r0, pc \n\t" "bx lr \n\t" ".thumb \n\t" ) TEST( "blx __dummy_arm_subroutine") TEST_GROUP("Store single data item") #define SINGLE_STORE(size) \ TEST_RP( "str"size" r",0, VAL1,", [r",11,-1024,", #1024]") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, -1024,", #1080]") \ TEST_RP( "str"size" r",0, VAL1,", [r",11,256, ", #-120]") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, 256, ", #-128]") \ TEST_RP( "str"size" r",0, VAL1,", [r",11,24, "], #120") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, "], #128") \ TEST_RP( "str"size" r",0, VAL1,", [r",11,24, "], #-120") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, "], #-128") \ TEST_RP( "str"size" r",0, VAL1,", [r",11,24, ", #120]!") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, ", #128]!") \ TEST_RP( "str"size" r",0, VAL1,", [r",11,256, ", #-120]!") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, 256, ", #-128]!") \ TEST_RPR("str"size".w r",0, VAL1,", [r",1, 0,", r",2, 4,"]") \ TEST_RPR("str"size" r",14,VAL2,", [r",10,0,", r",11,4,", lsl #1]") \ TEST_R( "str"size".w r",7, VAL1,", [sp, #24]") \ TEST_RP( "str"size".w r",0, VAL2,", [r",0,0, "]") \ TEST_UNSUPPORTED("str"size"t r0, [r1, #4]") SINGLE_STORE("b") SINGLE_STORE("h") SINGLE_STORE("") TEST("str sp, [sp]") TEST_UNSUPPORTED(".short 0xf8cf,0xe000 @ str r14, [pc]") TEST_UNSUPPORTED(".short 0xf8ce,0xf000 @ str pc, [r14]") TEST_GROUP("Advanced SIMD element or structure load/store instructions") TEST_UNSUPPORTED(".short 0xf900,0x0000") TEST_UNSUPPORTED(".short 0xf92f,0xffff") TEST_UNSUPPORTED(".short 0xf980,0x0000") TEST_UNSUPPORTED(".short 0xf9ef,0xffff") TEST_GROUP("Load single data item and memory hints") #define SINGLE_LOAD(size) \ TEST_P( "ldr"size" r0, [r",11,-1024, ", #1024]") \ TEST_P( "ldr"size" r14, [r",1, -1024,", #1080]") \ TEST_P( "ldr"size" r0, [r",11,256, ", #-120]") \ TEST_P( "ldr"size" r14, [r",1, 256, ", #-128]") \ TEST_P( "ldr"size" r0, [r",11,24, "], #120") \ TEST_P( "ldr"size" r14, [r",1, 24, "], #128") \ TEST_P( "ldr"size" r0, [r",11,24, "], #-120") \ TEST_P( "ldr"size" r14, [r",1,24, "], #-128") \ TEST_P( "ldr"size" r0, [r",11,24, ", #120]!") \ TEST_P( "ldr"size" r14, [r",1, 24, ", #128]!") \ TEST_P( "ldr"size" r0, [r",11,256, ", #-120]!") \ TEST_P( "ldr"size" r14, [r",1, 256, ", #-128]!") \ TEST_PR("ldr"size".w r0, [r",1, 0,", r",2, 4,"]") \ TEST_PR("ldr"size" r14, [r",10,0,", r",11,4,", lsl #1]") \ TEST_X( "ldr"size".w r0, 3f", \ ".align 3 \n\t" \ "3: .word "__stringify(VAL1)) \ TEST_X( "ldr"size".w r14, 3f", \ ".align 3 \n\t" \ "3: .word "__stringify(VAL2)) \ TEST( "ldr"size".w r7, 3b") \ TEST( "ldr"size".w r7, [sp, #24]") \ TEST_P( "ldr"size".w r0, [r",0,0, "]") \ TEST_UNSUPPORTED("ldr"size"t r0, [r1, #4]") SINGLE_LOAD("b") SINGLE_LOAD("sb") SINGLE_LOAD("h") SINGLE_LOAD("sh") SINGLE_LOAD("") TEST_BF_P("ldr pc, [r",14, 15*4,"]") TEST_P( "ldr sp, [r",14, 13*4,"]") TEST_BF_R("ldr pc, [sp, r",14, 15*4,"]") TEST_R( "ldr sp, [sp, r",14, 13*4,"]") TEST_THUMB_TO_ARM_INTERWORK_P("ldr pc, [r",0,0,", #15*4]") TEST_SUPPORTED("ldr sp, 99f") TEST_SUPPORTED("ldr pc, 99f") TEST_UNSUPPORTED(".short 0xf854,0x700d @ ldr r7, [r4, sp]") TEST_UNSUPPORTED(".short 0xf854,0x700f @ ldr r7, [r4, pc]") TEST_UNSUPPORTED(".short 0xf814,0x700d @ ldrb r7, [r4, sp]") TEST_UNSUPPORTED(".short 0xf814,0x700f @ ldrb r7, [r4, pc]") TEST_UNSUPPORTED(".short 0xf89f,0xd004 @ ldrb sp, 99f") TEST_UNSUPPORTED(".short 0xf814,0xd008 @ ldrb sp, [r4, r8]") TEST_UNSUPPORTED(".short 0xf894,0xd000 @ ldrb sp, [r4]") TEST_UNSUPPORTED(".short 0xf860,0x0000") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xf9ff,0xffff") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xf950,0x0000") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xf95f,0xffff") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xf800,0x0800") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xf97f,0xfaff") /* Unallocated space */ TEST( "pli [pc, #4]") TEST( "pli [pc, #-4]") TEST( "pld [pc, #4]") TEST( "pld [pc, #-4]") TEST_P( "pld [r",0,-1024,", #1024]") TEST( ".short 0xf8b0,0xf400 @ pldw [r0, #1024]") TEST_P( "pli [r",4, 0b,", #1024]") TEST_P( "pld [r",7, 120,", #-120]") TEST( ".short 0xf837,0xfc78 @ pldw [r7, #-120]") TEST_P( "pli [r",11,120,", #-120]") TEST( "pld [sp, #0]") TEST_PR("pld [r",7, 24, ", r",0, 16,"]") TEST_PR("pld [r",8, 24, ", r",12,16,", lsl #3]") TEST_SUPPORTED(".short 0xf837,0xf000 @ pldw [r7, r0]") TEST_SUPPORTED(".short 0xf838,0xf03c @ pldw [r8, r12, lsl #3]"); TEST_RR("pli [r",12,0b,", r",0, 16,"]") TEST_RR("pli [r",0, 0b,", r",12,16,", lsl #3]") TEST_R( "pld [sp, r",1, 16,"]") TEST_UNSUPPORTED(".short 0xf817,0xf00d @pld [r7, sp]") TEST_UNSUPPORTED(".short 0xf817,0xf00f @pld [r7, pc]") TEST_GROUP("Data-processing (register)") #define SHIFTS32(op) \ TEST_RR(op" r0, r",1, VAL1,", r",2, 3, "") \ TEST_RR(op" r14, r",12,VAL2,", r",11,10,"") SHIFTS32("lsl") SHIFTS32("lsls") SHIFTS32("lsr") SHIFTS32("lsrs") SHIFTS32("asr") SHIFTS32("asrs") SHIFTS32("ror") SHIFTS32("rors") TEST_UNSUPPORTED(".short 0xfa01,0xff02 @ lsl pc, r1, r2") TEST_UNSUPPORTED(".short 0xfa01,0xfd02 @ lsl sp, r1, r2") TEST_UNSUPPORTED(".short 0xfa0f,0xf002 @ lsl r0, pc, r2") TEST_UNSUPPORTED(".short 0xfa0d,0xf002 @ lsl r0, sp, r2") TEST_UNSUPPORTED(".short 0xfa01,0xf00f @ lsl r0, r1, pc") TEST_UNSUPPORTED(".short 0xfa01,0xf00d @ lsl r0, r1, sp") TEST_RR( "sxtah r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "sxtah r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "sxth r8, r",7, HH1,"") TEST_UNSUPPORTED(".short 0xfa0f,0xff87 @ sxth pc, r7"); TEST_UNSUPPORTED(".short 0xfa0f,0xfd87 @ sxth sp, r7"); TEST_UNSUPPORTED(".short 0xfa0f,0xf88f @ sxth r8, pc"); TEST_UNSUPPORTED(".short 0xfa0f,0xf88d @ sxth r8, sp"); TEST_RR( "uxtah r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uxtah r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "uxth r8, r",7, HH1,"") TEST_RR( "sxtab16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "sxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "sxtb16 r8, r",7, HH1,"") TEST_RR( "uxtab16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "uxtb16 r8, r",7, HH1,"") TEST_RR( "sxtab r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "sxtab r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "sxtb r8, r",7, HH1,"") TEST_RR( "uxtab r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uxtab r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "uxtb r8, r",7, HH1,"") TEST_UNSUPPORTED(".short 0xfa60,0x00f0") TEST_UNSUPPORTED(".short 0xfa7f,0xffff") #define PARALLEL_ADD_SUB(op) \ TEST_RR( op"add16 r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"add16 r14, r",12,HH2,", r",10,HH1,"") \ TEST_RR( op"asx r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"asx r14, r",12,HH2,", r",10,HH1,"") \ TEST_RR( op"sax r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"sax r14, r",12,HH2,", r",10,HH1,"") \ TEST_RR( op"sub16 r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"sub16 r14, r",12,HH2,", r",10,HH1,"") \ TEST_RR( op"add8 r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"add8 r14, r",12,HH2,", r",10,HH1,"") \ TEST_RR( op"sub8 r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"sub8 r14, r",12,HH2,", r",10,HH1,"") TEST_GROUP("Parallel addition and subtraction, signed") PARALLEL_ADD_SUB("s") PARALLEL_ADD_SUB("q") PARALLEL_ADD_SUB("sh") TEST_GROUP("Parallel addition and subtraction, unsigned") PARALLEL_ADD_SUB("u") PARALLEL_ADD_SUB("uq") PARALLEL_ADD_SUB("uh") TEST_GROUP("Miscellaneous operations") TEST_RR("qadd r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR("qadd lr, r",9, VAL2,", r",8, VAL1,"") TEST_RR("qsub r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR("qsub lr, r",9, VAL2,", r",8, VAL1,"") TEST_RR("qdadd r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR("qdadd lr, r",9, VAL2,", r",8, VAL1,"") TEST_RR("qdsub r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR("qdsub lr, r",9, VAL2,", r",8, VAL1,"") TEST_R("rev.w r0, r",0, VAL1,"") TEST_R("rev r14, r",12, VAL2,"") TEST_R("rev16.w r0, r",0, VAL1,"") TEST_R("rev16 r14, r",12, VAL2,"") TEST_R("rbit r0, r",0, VAL1,"") TEST_R("rbit r14, r",12, VAL2,"") TEST_R("revsh.w r0, r",0, VAL1,"") TEST_R("revsh r14, r",12, VAL2,"") TEST_UNSUPPORTED(".short 0xfa9c,0xff8c @ rev pc, r12"); TEST_UNSUPPORTED(".short 0xfa9c,0xfd8c @ rev sp, r12"); TEST_UNSUPPORTED(".short 0xfa9f,0xfe8f @ rev r14, pc"); TEST_UNSUPPORTED(".short 0xfa9d,0xfe8d @ rev r14, sp"); TEST_RR("sel r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR("sel r14, r",12,VAL1,", r",10, VAL2,"") TEST_R("clz r0, r",0, 0x0,"") TEST_R("clz r7, r",14,0x1,"") TEST_R("clz lr, r",7, 0xffffffff,"") TEST_UNSUPPORTED(".short 0xfa80,0xf030") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfaff,0xff7f") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfab0,0xf000") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfaff,0xff7f") /* Unallocated space */ TEST_GROUP("Multiply, multiply accumulate, and absolute difference operations") TEST_RR( "mul r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "mul r7, r",8, VAL2,", r",9, VAL2,"") TEST_UNSUPPORTED(".short 0xfb08,0xff09 @ mul pc, r8, r9") TEST_UNSUPPORTED(".short 0xfb08,0xfd09 @ mul sp, r8, r9") TEST_UNSUPPORTED(".short 0xfb0f,0xf709 @ mul r7, pc, r9") TEST_UNSUPPORTED(".short 0xfb0d,0xf709 @ mul r7, sp, r9") TEST_UNSUPPORTED(".short 0xfb08,0xf70f @ mul r7, r8, pc") TEST_UNSUPPORTED(".short 0xfb08,0xf70d @ mul r7, r8, sp") TEST_RRR( "mla r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "mla r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_UNSUPPORTED(".short 0xfb08,0xaf09 @ mla pc, r8, r9, r10"); TEST_UNSUPPORTED(".short 0xfb08,0xad09 @ mla sp, r8, r9, r10"); TEST_UNSUPPORTED(".short 0xfb0f,0xa709 @ mla r7, pc, r9, r10"); TEST_UNSUPPORTED(".short 0xfb0d,0xa709 @ mla r7, sp, r9, r10"); TEST_UNSUPPORTED(".short 0xfb08,0xa70f @ mla r7, r8, pc, r10"); TEST_UNSUPPORTED(".short 0xfb08,0xa70d @ mla r7, r8, sp, r10"); TEST_UNSUPPORTED(".short 0xfb08,0xd709 @ mla r7, r8, r9, sp"); TEST_RRR( "mls r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "mls r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RRR( "smlabb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlabb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RRR( "smlatb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlatb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RRR( "smlabt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlabt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RRR( "smlatt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlatt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "smulbb r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulbb r7, r",8, VAL3,", r",9, VAL1,"") TEST_RR( "smultb r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smultb r7, r",8, VAL3,", r",9, VAL1,"") TEST_RR( "smulbt r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulbt r7, r",8, VAL3,", r",9, VAL1,"") TEST_RR( "smultt r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smultt r7, r",8, VAL3,", r",9, VAL1,"") TEST_RRR( "smlad r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smlad r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_RRR( "smladx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smladx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_RR( "smuad r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smuad r14, r",12,HH2,", r",10,HH1,"") TEST_RR( "smuadx r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smuadx r14, r",12,HH2,", r",10,HH1,"") TEST_RRR( "smlawb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlawb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RRR( "smlawt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlawt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "smulwb r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulwb r7, r",8, VAL3,", r",9, VAL1,"") TEST_RR( "smulwt r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulwt r7, r",8, VAL3,", r",9, VAL1,"") TEST_RRR( "smlsd r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smlsd r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_RRR( "smlsdx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smlsdx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_RR( "smusd r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smusd r14, r",12,HH2,", r",10,HH1,"") TEST_RR( "smusdx r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smusdx r14, r",12,HH2,", r",10,HH1,"") TEST_RRR( "smmla r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmla r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_RRR( "smmlar r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmlar r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_RR( "smmul r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR( "smmul r14, r",12,VAL2,", r",10,VAL1,"") TEST_RR( "smmulr r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR( "smmulr r14, r",12,VAL2,", r",10,VAL1,"") TEST_RRR( "smmls r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmls r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_RRR( "smmlsr r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmlsr r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_RRR( "usada8 r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL3,"") TEST_RRR( "usada8 r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL3,"") TEST_RR( "usad8 r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR( "usad8 r14, r",12,VAL2,", r",10,VAL1,"") TEST_UNSUPPORTED(".short 0xfb00,0xf010") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfb0f,0xff1f") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfb70,0xf010") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfb7f,0xff1f") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfb70,0x0010") /* Unallocated space */ TEST_UNSUPPORTED(".short 0xfb7f,0xff1f") /* Unallocated space */ TEST_GROUP("Long multiply, long multiply accumulate, and divide") TEST_RR( "smull r0, r1, r",2, VAL1,", r",3, VAL2,"") TEST_RR( "smull r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_UNSUPPORTED(".short 0xfb89,0xf80a @ smull pc, r8, r9, r10"); TEST_UNSUPPORTED(".short 0xfb89,0xd80a @ smull sp, r8, r9, r10"); TEST_UNSUPPORTED(".short 0xfb89,0x7f0a @ smull r7, pc, r9, r10"); TEST_UNSUPPORTED(".short 0xfb89,0x7d0a @ smull r7, sp, r9, r10"); TEST_UNSUPPORTED(".short 0xfb8f,0x780a @ smull r7, r8, pc, r10"); TEST_UNSUPPORTED(".short 0xfb8d,0x780a @ smull r7, r8, sp, r10"); TEST_UNSUPPORTED(".short 0xfb89,0x780f @ smull r7, r8, r9, pc"); TEST_UNSUPPORTED(".short 0xfb89,0x780d @ smull r7, r8, r9, sp"); TEST_RR( "umull r0, r1, r",2, VAL1,", r",3, VAL2,"") TEST_RR( "umull r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_RRRR( "smlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "smlalbb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlalbb r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "smlalbt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlalbt r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "smlaltb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlaltb r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "smlaltt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlaltt r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "smlald r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2) TEST_RRRR( "smlald r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1) TEST_RRRR( "smlaldx r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2) TEST_RRRR( "smlaldx r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1) TEST_RRRR( "smlsld r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2) TEST_RRRR( "smlsld r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1) TEST_RRRR( "smlsldx r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2) TEST_RRRR( "smlsldx r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1) TEST_RRRR( "umlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "umlal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "umaal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "umaal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_GROUP("Coprocessor instructions") TEST_UNSUPPORTED(".short 0xfc00,0x0000") TEST_UNSUPPORTED(".short 0xffff,0xffff") TEST_GROUP("Testing instructions in IT blocks") TEST_ITBLOCK("sub.w r0, r0") verbose("\n"); }
OneOfMany07/tty-ng
arch/arm/kernel/kprobes-test-thumb.c
C
gpl-2.0
44,841
/* * drivers/net/phy/cicada.c * * Driver for Cicada PHYs * * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> /* Cicada Extended Control Register 1 */ #define MII_CIS8201_EXT_CON1 0x17 #define MII_CIS8201_EXTCON1_INIT 0x0000 /* Cicada Interrupt Mask Register */ #define MII_CIS8201_IMASK 0x19 #define MII_CIS8201_IMASK_IEN 0x8000 #define MII_CIS8201_IMASK_SPEED 0x4000 #define MII_CIS8201_IMASK_LINK 0x2000 #define MII_CIS8201_IMASK_DUPLEX 0x1000 #define MII_CIS8201_IMASK_MASK 0xf000 /* Cicada Interrupt Status Register */ #define MII_CIS8201_ISTAT 0x1a #define MII_CIS8201_ISTAT_STATUS 0x8000 #define MII_CIS8201_ISTAT_SPEED 0x4000 #define MII_CIS8201_ISTAT_LINK 0x2000 #define MII_CIS8201_ISTAT_DUPLEX 0x1000 /* Cicada Auxiliary Control/Status Register */ #define MII_CIS8201_AUX_CONSTAT 0x1c #define MII_CIS8201_AUXCONSTAT_INIT 0x0004 #define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020 #define MII_CIS8201_AUXCONSTAT_SPEED 0x0018 #define MII_CIS8201_AUXCONSTAT_GBIT 0x0010 #define MII_CIS8201_AUXCONSTAT_100 0x0008 MODULE_DESCRIPTION("Cicadia PHY driver"); MODULE_AUTHOR("Andy Fleming"); MODULE_LICENSE("GPL"); static int cis820x_config_init(struct phy_device *phydev) { int err; err = phy_write(phydev, MII_CIS8201_AUX_CONSTAT, MII_CIS8201_AUXCONSTAT_INIT); if (err < 0) return err; err = phy_write(phydev, MII_CIS8201_EXT_CON1, MII_CIS8201_EXTCON1_INIT); return err; } static int cis820x_ack_interrupt(struct phy_device *phydev) { int err = phy_read(phydev, MII_CIS8201_ISTAT); return (err < 0) ? err : 0; } static int cis820x_config_intr(struct phy_device *phydev) { int err; if(phydev->interrupts == PHY_INTERRUPT_ENABLED) err = phy_write(phydev, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK); else err = phy_write(phydev, MII_CIS8201_IMASK, 0); return err; } /* Cicada 8201, a.k.a Vitesse VSC8201 */ static struct phy_driver cis8201_driver = { .phy_id = 0x000fc410, .name = "Cicada Cis8201", .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = &cis820x_config_init, .config_aneg = &genphy_config_aneg, .read_status = &genphy_read_status, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, .driver = { .owner = THIS_MODULE,}, }; /* Cicada 8204 */ static struct phy_driver cis8204_driver = { .phy_id = 0x000fc440, .name = "Cicada Cis8204", .phy_id_mask = 0x000fffc0, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = &cis820x_config_init, .config_aneg = &genphy_config_aneg, .read_status = &genphy_read_status, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, .driver = { .owner = THIS_MODULE,}, }; static int __init cicada_init(void) { int ret; ret = phy_driver_register(&cis8204_driver); if (ret) goto err1; ret = phy_driver_register(&cis8201_driver); if (ret) goto err2; return 0; err2: phy_driver_unregister(&cis8204_driver); err1: return ret; } static void __exit cicada_exit(void) { phy_driver_unregister(&cis8204_driver); phy_driver_unregister(&cis8201_driver); } module_init(cicada_init); module_exit(cicada_exit); static struct mdio_device_id __maybe_unused cicada_tbl[] = { { 0x000fc410, 0x000ffff0 }, { 0x000fc440, 0x000fffc0 }, { } }; MODULE_DEVICE_TABLE(mdio, cicada_tbl);
PatrikKT/KofilaKernel
drivers/net/phy/cicada.c
C
gpl-2.0
4,138
/* * sound/oss/dev_table.c * * Device call tables. * * * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. */ #include <linux/init.h> #include "sound_config.h" struct audio_operations *audio_devs[MAX_AUDIO_DEV]; EXPORT_SYMBOL(audio_devs); int num_audiodevs; EXPORT_SYMBOL(num_audiodevs); struct mixer_operations *mixer_devs[MAX_MIXER_DEV]; EXPORT_SYMBOL(mixer_devs); int num_mixers; EXPORT_SYMBOL(num_mixers); struct synth_operations *synth_devs[MAX_SYNTH_DEV+MAX_MIDI_DEV]; EXPORT_SYMBOL(synth_devs); int num_synths; struct midi_operations *midi_devs[MAX_MIDI_DEV]; EXPORT_SYMBOL(midi_devs); int num_midis; EXPORT_SYMBOL(num_midis); struct sound_timer_operations *sound_timer_devs[MAX_TIMER_DEV] = { &default_sound_timer, NULL }; EXPORT_SYMBOL(sound_timer_devs); int num_sound_timers = 1; static int sound_alloc_audiodev(void); int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver, int driver_size, int flags, unsigned int format_mask, void *devc, int dma1, int dma2) { struct audio_driver *d; struct audio_operations *op; int num; if (vers != AUDIO_DRIVER_VERSION || driver_size > sizeof(struct audio_driver)) { printk(KERN_ERR "Sound: Incompatible audio driver for %s\n", name); return -(EINVAL); } num = sound_alloc_audiodev(); if (num == -1) { printk(KERN_ERR "sound: Too many audio drivers\n"); return -(EBUSY); } d = (struct audio_driver *) (sound_mem_blocks[sound_nblocks] = vmalloc(sizeof(struct audio_driver))); sound_nblocks++; if (sound_nblocks >= MAX_MEM_BLOCKS) sound_nblocks = MAX_MEM_BLOCKS - 1; op = (struct audio_operations *) (sound_mem_blocks[sound_nblocks] = vzalloc(sizeof(struct audio_operations))); sound_nblocks++; if (sound_nblocks >= MAX_MEM_BLOCKS) sound_nblocks = MAX_MEM_BLOCKS - 1; if (d == NULL || op == NULL) { printk(KERN_ERR "Sound: Can't allocate driver for (%s)\n", name); sound_unload_audiodev(num); return -(ENOMEM); } init_waitqueue_head(&op->in_sleeper); init_waitqueue_head(&op->out_sleeper); init_waitqueue_head(&op->poll_sleeper); if (driver_size < sizeof(struct audio_driver)) memset((char *) d, 0, sizeof(struct audio_driver)); memcpy((char *) d, (char *) driver, driver_size); op->d = d; strlcpy(op->name, name, sizeof(op->name)); op->flags = flags; op->format_mask = format_mask; op->devc = devc; /* * Hardcoded defaults */ audio_devs[num] = op; DMAbuf_init(num, dma1, dma2); audio_init_devices(); return num; } EXPORT_SYMBOL(sound_install_audiodrv); int sound_install_mixer(int vers, char *name, struct mixer_operations *driver, int driver_size, void *devc) { struct mixer_operations *op; int n = sound_alloc_mixerdev(); if (n == -1) { printk(KERN_ERR "Sound: Too many mixer drivers\n"); return -EBUSY; } if (vers != MIXER_DRIVER_VERSION || driver_size > sizeof(struct mixer_operations)) { printk(KERN_ERR "Sound: Incompatible mixer driver for %s\n", name); return -EINVAL; } /* FIXME: This leaks a mixer_operations struct every time its called until you unload sound! */ op = (struct mixer_operations *) (sound_mem_blocks[sound_nblocks] = vzalloc(sizeof(struct mixer_operations))); sound_nblocks++; if (sound_nblocks >= MAX_MEM_BLOCKS) sound_nblocks = MAX_MEM_BLOCKS - 1; if (op == NULL) { printk(KERN_ERR "Sound: Can't allocate mixer driver for (%s)\n", name); return -ENOMEM; } memcpy((char *) op, (char *) driver, driver_size); strlcpy(op->name, name, sizeof(op->name)); op->devc = devc; mixer_devs[n] = op; return n; } EXPORT_SYMBOL(sound_install_mixer); void sound_unload_audiodev(int dev) { if (dev != -1) { DMAbuf_deinit(dev); audio_devs[dev] = NULL; unregister_sound_dsp((dev<<4)+3); } } EXPORT_SYMBOL(sound_unload_audiodev); static int sound_alloc_audiodev(void) { int i = register_sound_dsp(&oss_sound_fops, -1); if(i==-1) return i; i>>=4; if(i>=num_audiodevs) num_audiodevs = i + 1; return i; } int sound_alloc_mididev(void) { int i = register_sound_midi(&oss_sound_fops, -1); if(i==-1) return i; i>>=4; if(i>=num_midis) num_midis = i + 1; return i; } EXPORT_SYMBOL(sound_alloc_mididev); int sound_alloc_synthdev(void) { int i; for (i = 0; i < MAX_SYNTH_DEV; i++) { if (synth_devs[i] == NULL) { if (i >= num_synths) num_synths++; return i; } } return -1; } EXPORT_SYMBOL(sound_alloc_synthdev); int sound_alloc_mixerdev(void) { int i = register_sound_mixer(&oss_sound_fops, -1); if(i==-1) return -1; i>>=4; if(i>=num_mixers) num_mixers = i + 1; return i; } EXPORT_SYMBOL(sound_alloc_mixerdev); int sound_alloc_timerdev(void) { int i; for (i = 0; i < MAX_TIMER_DEV; i++) { if (sound_timer_devs[i] == NULL) { if (i >= num_sound_timers) num_sound_timers++; return i; } } return -1; } EXPORT_SYMBOL(sound_alloc_timerdev); void sound_unload_mixerdev(int dev) { if (dev != -1) { mixer_devs[dev] = NULL; unregister_sound_mixer(dev<<4); num_mixers--; } } EXPORT_SYMBOL(sound_unload_mixerdev); void sound_unload_mididev(int dev) { if (dev != -1) { midi_devs[dev] = NULL; unregister_sound_midi((dev<<4)+2); } } EXPORT_SYMBOL(sound_unload_mididev); void sound_unload_synthdev(int dev) { if (dev != -1) synth_devs[dev] = NULL; } EXPORT_SYMBOL(sound_unload_synthdev); void sound_unload_timerdev(int dev) { if (dev != -1) sound_timer_devs[dev] = NULL; } EXPORT_SYMBOL(sound_unload_timerdev);
drsn0w/android_kernel_lge_bullhead
sound/oss/dev_table.c
C
gpl-2.0
5,590
<?php /** * @package Joomla.Libraries * @subpackage Helper * * @copyright Copyright (C) 2005 - 2014 Open Source Matters, Inc. All rights reserved. * @license GNU General Public License version 2 or later; see LICENSE */ defined('JPATH_PLATFORM') or die; /** * Versions helper class, provides methods to perform various tasks relevant * versioning of content. * * @since 3.2 */ class JHelperContenthistory extends JHelper { /** * Alias for storing type in versions table * * @var string * @since 3.2 */ public $typeAlias = null; /** * Constructor * * @param string $typeAlias The type of content to be versioned (for example, 'com_content.article'). * * @since 3.2 */ public function __construct($typeAlias = null) { $this->typeAlias = $typeAlias; } /** * Method to delete the history for an item. * * @param JTable $table JTable object being versioned * * @return boolean true on success, otherwise false. * * @since 3.2 */ public function deleteHistory($table) { $key = $table->getKeyName(); $id = $table->$key; $typeTable = JTable::getInstance('Contenttype', 'JTable'); $typeId = $typeTable->getTypeId($this->typeAlias); $db = JFactory::getDbo(); $query = $db->getQuery(true); $query->delete($db->quoteName('#__ucm_history')) ->where($db->quoteName('ucm_item_id') . ' = ' . (int) $id) ->where($db->quoteName('ucm_type_id') . ' = ' . (int) $typeId); $db->setQuery($query); return $db->execute(); } /** * Method to get a list of available versions of this item. * * @param integer $typeId Type id for this component item. * @param mixed $id Primary key of row to get history for. * * @return mixed The return value or null if the query failed. * * @since 3.2 */ public function getHistory($typeId, $id) { $db = JFactory::getDbo(); $query = $db->getQuery(true); $query->select($db->quoteName('h.version_note') . ',' . $db->quoteName('h.save_date') . ',' . $db->quoteName('u.name')) ->from($db->quoteName('#__ucm_history') . ' AS h ') ->leftJoin($db->quoteName('#__users') . ' AS u ON ' . $db->quoteName('u.id') . ' = ' . $db->quoteName('h.editor_user_id')) ->where($db->quoteName('ucm_item_id') . ' = ' . $db->quote($id)) ->where($db->quoteName('ucm_type_id') . ' = ' . (int) $typeId) ->order($db->quoteName('save_date') . ' DESC '); $db->setQuery($query); return $db->loadObjectList(); } /** * Method to save a version snapshot to the content history table. * * @param JTable $table JTable object being versioned * * @return boolean True on success, otherwise false. * * @since 3.2 */ public function store($table) { $dataObject = $this->getDataObject($table); $historyTable = JTable::getInstance('Contenthistory', 'JTable'); $typeTable = JTable::getInstance('Contenttype', 'JTable'); $typeTable->load(array('type_alias' => $this->typeAlias)); $historyTable->set('ucm_type_id', $typeTable->type_id); $key = $table->getKeyName(); $historyTable->set('ucm_item_id', $table->$key); // Don't store unless we have a non-zero item id if (!$historyTable->ucm_item_id) { return true; } $historyTable->set('version_data', json_encode($dataObject)); $input = JFactory::getApplication()->input; $data = $input->get('jform', array(), 'array'); $versionName = false; if (isset($data['version_note'])) { $versionName = JFilterInput::getInstance()->clean($data['version_note'], 'string'); $historyTable->set('version_note', $versionName); } // Don't save if hash already exists and same version note $historyTable->set('sha1_hash', $historyTable->getSha1($dataObject, $typeTable)); if ($historyRow = $historyTable->getHashMatch()) { if (!$versionName || ($historyRow->version_note == $versionName)) { return true; } else { // Update existing row to set version note $historyTable->set('version_id', $historyRow->version_id); } } $result = $historyTable->store(); if ($maxVersions = JComponentHelper::getParams('com_content')->get('history_limit', 0)) { $historyTable->deleteOldVersions($maxVersions); } return $result; } }
joomla-projects/GSoC-SQL-Optimization
libraries/cms/helper/contenthistory.php
PHP
gpl-2.0
4,231
/* packet-credssp.c * Routines for CredSSP (Credential Security Support Provider) packet dissection * Graeme Lunt 2011 * * $Id$ * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include <glib.h> #include <epan/packet.h> #include <epan/asn1.h> #include <epan/tap.h> #include <epan/exported_pdu.h> #include "packet-ber.h" #include "packet-credssp.h" #define PNAME "Credential Security Support Provider" #define PSNAME "CredSSP" #define PFNAME "credssp" #define TS_PASSWORD_CREDS 1 #define TS_SMARTCARD_CREDS 2 static gint creds_type; static gint exported_pdu_tap = -1; /* Initialize the protocol and registered fields */ static int proto_credssp = -1; /* List of dissectors to call for negoToken data */ static heur_dissector_list_t credssp_heur_subdissector_list; static int hf_credssp_TSPasswordCreds = -1; /* TSPasswordCreds */ static int hf_credssp_TSSmartCardCreds = -1; /* TSSmartCardCreds */ static int hf_credssp_TSCredentials = -1; /* TSCredentials */ #include "packet-credssp-hf.c" /* Initialize the subtree pointers */ static gint ett_credssp = -1; #include "packet-credssp-ett.c" #include "packet-credssp-fn.c" /* * Dissect CredSSP PDUs */ static void dissect_credssp(tvbuff_t *tvb, packet_info *pinfo, proto_tree *parent_tree) { proto_item *item=NULL; proto_tree *tree=NULL; if(parent_tree){ item = proto_tree_add_item(parent_tree, proto_credssp, tvb, 0, -1, ENC_NA); tree = proto_item_add_subtree(item, ett_credssp); } col_set_str(pinfo->cinfo, COL_PROTOCOL, "CredSSP"); col_clear(pinfo->cinfo, COL_INFO); creds_type = -1; dissect_TSRequest_PDU(tvb, pinfo, tree); } static gboolean dissect_credssp_heur(tvbuff_t *tvb, packet_info *pinfo, proto_tree *parent_tree, void *data _U_) { asn1_ctx_t asn1_ctx; int offset = 0; gint8 ber_class; gboolean pc; gint32 tag; guint32 length; asn1_ctx_init(&asn1_ctx, ASN1_ENC_BER, TRUE, pinfo); /* Look for SEQUENCE, CONTEXT 0, and INTEGER 2 */ if(tvb_length(tvb) > 7) { offset = get_ber_identifier(tvb, offset, &ber_class, &pc, &tag); if((ber_class == BER_CLASS_UNI) && (tag == BER_UNI_TAG_SEQUENCE) && (pc == TRUE)) { offset = get_ber_length(tvb, offset, NULL, NULL); offset = get_ber_identifier(tvb, offset, &ber_class, &pc, &tag); if((ber_class == BER_CLASS_CON) && (tag == 0)) { offset = get_ber_length(tvb, offset, NULL, NULL); offset = get_ber_identifier(tvb, offset, &ber_class, &pc, &tag); if((ber_class == BER_CLASS_UNI) && (tag == BER_UNI_TAG_INTEGER)) { offset = get_ber_length(tvb, offset, &length, NULL); if((length == 1) && (tvb_get_guint8(tvb, offset) == 2)) { if (have_tap_listener(exported_pdu_tap)) { exp_pdu_data_t *exp_pdu_data; exp_pdu_data = load_export_pdu_tags(pinfo, "credssp", -1, (EXP_PDU_TAG_IP_SRC_BIT | EXP_PDU_TAG_IP_DST_BIT | EXP_PDU_TAG_SRC_PORT_BIT | EXP_PDU_TAG_DST_PORT_BIT | EXP_PDU_TAG_ORIG_FNO_BIT)); exp_pdu_data->tvb_length = tvb_length(tvb); exp_pdu_data->pdu_tvb = tvb; tap_queue_packet(exported_pdu_tap, pinfo, exp_pdu_data); } dissect_credssp(tvb, pinfo, parent_tree); return TRUE; } } } } } return FALSE; } /*--- proto_register_credssp -------------------------------------------*/ void proto_register_credssp(void) { /* List of fields */ static hf_register_info hf[] = { { &hf_credssp_TSPasswordCreds, { "TSPasswordCreds", "credssp.TSPasswordCreds", FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_credssp_TSSmartCardCreds, { "TSSmartCardCreds", "credssp.TSSmartCardCreds", FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_credssp_TSCredentials, { "TSCredentials", "credssp.TSCredentials", FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL }}, #include "packet-credssp-hfarr.c" }; /* List of subtrees */ static gint *ett[] = { &ett_credssp, #include "packet-credssp-ettarr.c" }; /* Register protocol */ proto_credssp = proto_register_protocol(PNAME, PSNAME, PFNAME); register_dissector("credssp", dissect_credssp, proto_credssp); /* Register fields and subtrees */ proto_register_field_array(proto_credssp, hf, array_length(hf)); proto_register_subtree_array(ett, array_length(ett)); /* heuristic dissectors for any premable e.g. CredSSP before RDP */ register_heur_dissector_list("credssp", &credssp_heur_subdissector_list); } /*--- proto_reg_handoff_credssp --- */ void proto_reg_handoff_credssp(void) { heur_dissector_add("ssl", dissect_credssp_heur, proto_credssp); exported_pdu_tap = find_tap_id(EXPORT_PDU_TAP_NAME_LAYER_7); }
hashbrowncipher/wireshark
asn1/credssp/packet-credssp-template.c
C
gpl-2.0
5,658
/* Portions of this file are subject to the following copyrights. See * the Net-SNMP's COPYING file for more details and other copyrights * that may apply: */ /* * Copyright © 2003 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms specified in the COPYING file * distributed with the Net-SNMP package. */ #include <net-snmp/net-snmp-config.h> #include <sys/types.h> #include <sys/stat.h> #if TIME_WITH_SYS_TIME # ifdef WIN32 # include <sys/timeb.h> # else # include <sys/time.h> # endif # include <time.h> #else # if HAVE_SYS_TIME_H # include <sys/time.h> # else # include <time.h> # endif #endif #if HAVE_WINSOCK_H #include <winsock.h> #endif #if HAVE_STRING_H #include <string.h> #endif #if HAVE_STRING_H #include <string.h> #endif #include <net-snmp/net-snmp-includes.h> #include <net-snmp/agent/net-snmp-agent-includes.h> #include "struct.h" #include "file.h" #include "util_funcs.h" #if HAVE_DMALLOC_H #include <dmalloc.h> #endif #define MAXFILE 20 struct filestat fileTable[MAXFILE]; int fileCount; void init_file(void) { struct variable2 file_table[] = { {FILE_INDEX, ASN_INTEGER, RONLY, var_file_table, 1, {1}}, {FILE_NAME, ASN_OCTET_STR, RONLY, var_file_table, 1, {2}}, {FILE_SIZE, ASN_INTEGER, RONLY, var_file_table, 1, {3}}, {FILE_MAX, ASN_INTEGER, RONLY, var_file_table, 1, {4}}, {FILE_ERROR, ASN_INTEGER, RONLY, var_file_table, 1, {100}}, {FILE_MSG, ASN_OCTET_STR, RONLY, var_file_table, 1, {101}} }; /* * Define the OID pointer to the top of the mib tree that we're * registering underneath */ oid file_variables_oid[] = { UCDAVIS_MIB, 15, 1 }; /* * register ourselves with the agent to handle our mib tree */ REGISTER_MIB("ucd-snmp/file", file_table, variable2, file_variables_oid); snmpd_register_config_handler("file", file_parse_config, file_free_config, "file [maxsize]"); } void file_free_config(void) { fileCount = 0; } void file_parse_config(const char *token, char *cptr) { char *cp; if (fileCount < MAXFILE) { fileTable[fileCount].max = -1; cp = copy_nword(cptr, fileTable[fileCount].name, FILE_NAME_MAX); if (strlen(fileTable[fileCount].name) >= FILE_NAME_MAX - 1) { config_perror("file name too long"); return; } if (cp) fileTable[fileCount].max = strtoul(cp, NULL, 10); else fileTable[fileCount].max = -1; fileCount++; } } void updateFile(int iindex) { struct stat sb; if (stat(fileTable[iindex].name, &sb) == 0) fileTable[iindex].size = sb.st_size >> 10; } /* * OID functions */ u_char * var_file_table(struct variable *vp, oid * name, size_t * length, int exact, size_t * var_len, WriteMethod ** write_method) { static long long_ret; static char error[256]; int iindex; struct filestat *file; if (header_simple_table (vp, name, length, exact, var_len, write_method, fileCount)) return (NULL); iindex = name[*length - 1] - 1; updateFile(iindex); file = &fileTable[iindex]; switch (vp->magic) { case FILE_INDEX: long_ret = iindex + 1; return (u_char *) & long_ret; case FILE_NAME: *var_len = strlen(file->name); return (u_char *) file->name; case FILE_SIZE: long_ret = file->size; return (u_char *) & long_ret; case FILE_MAX: long_ret = file->max; return (u_char *) & long_ret; case FILE_ERROR: if (file->max >= 0 && file->size > file->max) long_ret = 1; else long_ret = 0; return (u_char *) & long_ret; case FILE_MSG: if (file->max >= 0 && file->size > file->max) snprintf(error, sizeof(error), FILE_ERROR_MSG, file->name, file->max, file->size); else strcpy(error, ""); *var_len = strlen(error); return (u_char *) error; default: DEBUGMSGTL(("snmpd", "unknown sub-id %d in var_file_table\n", vp->magic)); } return NULL; }
scs/uclinux
user/net-snmp/net-snmp-5.2.1/agent/mibgroup/ucd-snmp/file.c
C
gpl-2.0
4,297
/* * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "classfile/javaClasses.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/debugInfoRec.hpp" #include "code/nmethod.hpp" #include "code/pcDesc.hpp" #include "code/scopeDesc.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/oopMapCache.hpp" #include "memory/resourceArea.hpp" #include "oops/instanceKlass.hpp" #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/objectMonitor.inline.hpp" #include "runtime/signature.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/synchronizer.hpp" #include "runtime/vframe.hpp" #include "runtime/vframeArray.hpp" #include "runtime/vframe_hp.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC vframe::vframe(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : _reg_map(reg_map), _thread(thread) { assert(fr != NULL, "must have frame"); _fr = *fr; } vframe::vframe(const frame* fr, JavaThread* thread) : _reg_map(thread), _thread(thread) { assert(fr != NULL, "must have frame"); _fr = *fr; } vframe* vframe::new_vframe(const frame* f, const RegisterMap* reg_map, JavaThread* thread) { // Interpreter frame if (f->is_interpreted_frame()) { return new interpretedVFrame(f, reg_map, thread); } // Compiled frame CodeBlob* cb = f->cb(); if (cb != NULL) { if (cb->is_nmethod()) { nmethod* nm = (nmethod*)cb; return new compiledVFrame(f, reg_map, thread, nm); } if (f->is_runtime_frame()) { // Skip this frame and try again. RegisterMap temp_map = *reg_map; frame s = f->sender(&temp_map); return new_vframe(&s, &temp_map, thread); } } // External frame return new externalVFrame(f, reg_map, thread); } vframe* vframe::sender() const { RegisterMap temp_map = *register_map(); assert(is_top(), "just checking"); if (_fr.is_entry_frame() && _fr.is_first_frame()) return NULL; frame s = _fr.real_sender(&temp_map); if (s.is_first_frame()) return NULL; return vframe::new_vframe(&s, &temp_map, thread()); } vframe* vframe::top() const { vframe* vf = (vframe*) this; while (!vf->is_top()) vf = vf->sender(); return vf; } javaVFrame* vframe::java_sender() const { vframe* f = sender(); while (f != NULL) { if (f->is_java_frame()) return javaVFrame::cast(f); f = f->sender(); } return NULL; } // ------------- javaVFrame -------------- GrowableArray<MonitorInfo*>* javaVFrame::locked_monitors() { assert(SafepointSynchronize::is_at_safepoint() || JavaThread::current() == thread(), "must be at safepoint or it's a java frame of the current thread"); GrowableArray<MonitorInfo*>* mons = monitors(); GrowableArray<MonitorInfo*>* result = new GrowableArray<MonitorInfo*>(mons->length()); if (mons->is_empty()) return result; bool found_first_monitor = false; ObjectMonitor *pending_monitor = thread()->current_pending_monitor(); ObjectMonitor *waiting_monitor = thread()->current_waiting_monitor(); oop pending_obj = (pending_monitor != NULL ? (oop) pending_monitor->object() : (oop) NULL); oop waiting_obj = (waiting_monitor != NULL ? (oop) waiting_monitor->object() : (oop) NULL); for (int index = (mons->length()-1); index >= 0; index--) { MonitorInfo* monitor = mons->at(index); if (monitor->eliminated() && is_compiled_frame()) continue; // skip eliminated monitor oop obj = monitor->owner(); if (obj == NULL) continue; // skip unowned monitor // // Skip the monitor that the thread is blocked to enter or waiting on // if (!found_first_monitor && (obj == pending_obj || obj == waiting_obj)) { continue; } found_first_monitor = true; result->append(monitor); } return result; } static void print_locked_object_class_name(outputStream* st, Handle obj, const char* lock_state) { if (obj.not_null()) { st->print("\t- %s <" INTPTR_FORMAT "> ", lock_state, (address)obj()); if (obj->klass() == SystemDictionary::Class_klass()) { Klass* target_klass = java_lang_Class::as_Klass(obj()); st->print_cr("(a java.lang.Class for %s)", InstanceKlass::cast(target_klass)->external_name()); } else { Klass* k = obj->klass(); st->print_cr("(a %s)", k->external_name()); } } } void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) { ResourceMark rm; // If this is the first frame, and java.lang.Object.wait(...) then print out the receiver. if (frame_count == 0) { if (method()->name() == vmSymbols::wait_name() && method()->method_holder()->name() == vmSymbols::java_lang_Object()) { StackValueCollection* locs = locals(); if (!locs->is_empty()) { StackValue* sv = locs->at(0); if (sv->type() == T_OBJECT) { Handle o = locs->at(0)->get_obj(); print_locked_object_class_name(st, o, "waiting on"); } } } else if (thread()->current_park_blocker() != NULL) { oop obj = thread()->current_park_blocker(); Klass* k = obj->klass(); st->print_cr("\t- %s <" INTPTR_FORMAT "> (a %s)", "parking to wait for ", (address)obj, k->external_name()); } } // Print out all monitors that we have locked or are trying to lock GrowableArray<MonitorInfo*>* mons = monitors(); if (!mons->is_empty()) { bool found_first_monitor = false; for (int index = (mons->length()-1); index >= 0; index--) { MonitorInfo* monitor = mons->at(index); if (monitor->eliminated() && is_compiled_frame()) { // Eliminated in compiled code if (monitor->owner_is_scalar_replaced()) { Klass* k = java_lang_Class::as_Klass(monitor->owner_klass()); st->print("\t- eliminated <owner is scalar replaced> (a %s)", k->external_name()); } else { oop obj = monitor->owner(); if (obj != NULL) { print_locked_object_class_name(st, obj, "eliminated"); } } continue; } if (monitor->owner() != NULL) { // the monitor is associated with an object, i.e., it is locked // First, assume we have the monitor locked. If we haven't found an // owned monitor before and this is the first frame, then we need to // see if we have completed the lock or we are blocked trying to // acquire it - we can only be blocked if the monitor is inflated const char *lock_state = "locked"; // assume we have the monitor locked if (!found_first_monitor && frame_count == 0) { markOop mark = monitor->owner()->mark(); if (mark->has_monitor() && ( // we have marked ourself as pending on this monitor mark->monitor() == thread()->current_pending_monitor() || // we are not the owner of this monitor !mark->monitor()->is_entered(thread()) )) { lock_state = "waiting to lock"; } } found_first_monitor = true; print_locked_object_class_name(st, monitor->owner(), lock_state); } } } } // ------------- interpretedVFrame -------------- u_char* interpretedVFrame::bcp() const { return fr().interpreter_frame_bcp(); } void interpretedVFrame::set_bcp(u_char* bcp) { fr().interpreter_frame_set_bcp(bcp); } intptr_t* interpretedVFrame::locals_addr_at(int offset) const { assert(fr().is_interpreted_frame(), "frame should be an interpreted frame"); return fr().interpreter_frame_local_at(offset); } GrowableArray<MonitorInfo*>* interpretedVFrame::monitors() const { GrowableArray<MonitorInfo*>* result = new GrowableArray<MonitorInfo*>(5); for (BasicObjectLock* current = (fr().previous_monitor_in_interpreter_frame(fr().interpreter_frame_monitor_begin())); current >= fr().interpreter_frame_monitor_end(); current = fr().previous_monitor_in_interpreter_frame(current)) { result->push(new MonitorInfo(current->obj(), current->lock(), false, false)); } return result; } int interpretedVFrame::bci() const { return method()->bci_from(bcp()); } Method* interpretedVFrame::method() const { return fr().interpreter_frame_method(); } static StackValue* create_stack_value_from_oop_map(const InterpreterOopMap& oop_mask, int index, const intptr_t* const addr) { assert(index >= 0 && index < oop_mask.number_of_entries(), "invariant"); // categorize using oop_mask if (oop_mask.is_oop(index)) { // reference (oop) "r" Handle h(addr != NULL ? (*(oop*)addr) : (oop)NULL); return new StackValue(h); } // value (integer) "v" return new StackValue(addr != NULL ? *addr : 0); } static bool is_in_expression_stack(const frame& fr, const intptr_t* const addr) { assert(addr != NULL, "invariant"); // Ensure to be 'inside' the expresion stack (i.e., addr >= sp for Intel). // In case of exceptions, the expression stack is invalid and the sp // will be reset to express this condition. if (frame::interpreter_frame_expression_stack_direction() > 0) { return addr <= fr.interpreter_frame_tos_address(); } return addr >= fr.interpreter_frame_tos_address(); } static void stack_locals(StackValueCollection* result, int length, const InterpreterOopMap& oop_mask, const frame& fr) { assert(result != NULL, "invariant"); for (int i = 0; i < length; ++i) { const intptr_t* const addr = fr.interpreter_frame_local_at(i); assert(addr != NULL, "invariant"); assert(addr >= fr.sp(), "must be inside the frame"); StackValue* const sv = create_stack_value_from_oop_map(oop_mask, i, addr); assert(sv != NULL, "sanity check"); result->add(sv); } } static void stack_expressions(StackValueCollection* result, int length, int max_locals, const InterpreterOopMap& oop_mask, const frame& fr) { assert(result != NULL, "invariant"); for (int i = 0; i < length; ++i) { const intptr_t* addr = fr.interpreter_frame_expression_stack_at(i); assert(addr != NULL, "invariant"); if (!is_in_expression_stack(fr, addr)) { // Need to ensure no bogus escapes. addr = NULL; } StackValue* const sv = create_stack_value_from_oop_map(oop_mask, i + max_locals, addr); assert(sv != NULL, "sanity check"); result->add(sv); } } StackValueCollection* interpretedVFrame::locals() const { return stack_data(false); } StackValueCollection* interpretedVFrame::expressions() const { return stack_data(true); } /* * Worker routine for fetching references and/or values * for a particular bci in the interpretedVFrame. * * Returns data for either "locals" or "expressions", * using bci relative oop_map (oop_mask) information. * * @param expressions bool switch controlling what data to return (false == locals / true == expressions) * */ StackValueCollection* interpretedVFrame::stack_data(bool expressions) const { InterpreterOopMap oop_mask; // oopmap for current bci if (TraceDeoptimization && Verbose) { methodHandle m_h(Thread::current(), method()); OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask); } else { method()->mask_for(bci(), &oop_mask); } const int mask_len = oop_mask.number_of_entries(); // If the method is native, method()->max_locals() is not telling the truth. // For our purposes, max locals instead equals the size of parameters. const int max_locals = method()->is_native() ? method()->size_of_parameters() : method()->max_locals(); assert(mask_len >= max_locals, "invariant"); const int length = expressions ? mask_len - max_locals : max_locals; assert(length >= 0, "invariant"); StackValueCollection* const result = new StackValueCollection(length); if (0 == length) { return result; } if (expressions) { stack_expressions(result, length, max_locals, oop_mask, fr()); } else { stack_locals(result, length, oop_mask, fr()); } assert(length == result->size(), "invariant"); return result; } void interpretedVFrame::set_locals(StackValueCollection* values) const { if (values == NULL || values->size() == 0) return; // If the method is native, max_locals is not telling the truth. // maxlocals then equals the size of parameters const int max_locals = method()->is_native() ? method()->size_of_parameters() : method()->max_locals(); assert(max_locals == values->size(), "Mismatch between actual stack format and supplied data"); // handle locals for (int i = 0; i < max_locals; i++) { // Find stack location intptr_t *addr = locals_addr_at(i); // Depending on oop/int put it in the right package const StackValue* const sv = values->at(i); assert(sv != NULL, "sanity check"); if (sv->type() == T_OBJECT) { *(oop *) addr = (sv->get_obj())(); } else { // integer *addr = sv->get_int(); } } } // ------------- cChunk -------------- entryVFrame::entryVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : externalVFrame(fr, reg_map, thread) {} void vframeStreamCommon::found_bad_method_frame() { // 6379830 Cut point for an assertion that occasionally fires when // we are using the performance analyzer. // Disable this assert when testing the analyzer with fastdebug. // -XX:SuppressErrorAt=vframe.cpp:XXX (XXX=following line number) assert(false, "invalid bci or invalid scope desc"); } // top-frame will be skipped vframeStream::vframeStream(JavaThread* thread, frame top_frame, bool stop_at_java_call_stub) : vframeStreamCommon(thread) { _stop_at_java_call_stub = stop_at_java_call_stub; // skip top frame, as it may not be at safepoint _frame = top_frame.sender(&_reg_map); while (!fill_from_frame()) { _frame = _frame.sender(&_reg_map); } } // Step back n frames, skip any pseudo frames in between. // This function is used in Class.forName, Class.newInstance, Method.Invoke, // AccessController.doPrivileged. void vframeStreamCommon::security_get_caller_frame(int depth) { assert(depth >= 0, err_msg("invalid depth: %d", depth)); for (int n = 0; !at_end(); security_next()) { if (!method()->is_ignored_by_security_stack_walk()) { if (n == depth) { // We have reached the desired depth; return. return; } n++; // this is a non-skipped frame; count it against the depth } } // NOTE: At this point there were not enough frames on the stack // to walk to depth. Callers of this method have to check for at_end. } void vframeStreamCommon::security_next() { if (method()->is_prefixed_native()) { skip_prefixed_method_and_wrappers(); // calls next() } else { next(); } } void vframeStreamCommon::skip_prefixed_method_and_wrappers() { ResourceMark rm; HandleMark hm; int method_prefix_count = 0; char** method_prefixes = JvmtiExport::get_all_native_method_prefixes(&method_prefix_count); KlassHandle prefixed_klass(method()->method_holder()); const char* prefixed_name = method()->name()->as_C_string(); size_t prefixed_name_len = strlen(prefixed_name); int prefix_index = method_prefix_count-1; while (!at_end()) { next(); if (method()->method_holder() != prefixed_klass()) { break; // classes don't match, can't be a wrapper } const char* name = method()->name()->as_C_string(); size_t name_len = strlen(name); size_t prefix_len = prefixed_name_len - name_len; if (prefix_len <= 0 || strcmp(name, prefixed_name + prefix_len) != 0) { break; // prefixed name isn't prefixed version of method name, can't be a wrapper } for (; prefix_index >= 0; --prefix_index) { const char* possible_prefix = method_prefixes[prefix_index]; size_t possible_prefix_len = strlen(possible_prefix); if (possible_prefix_len == prefix_len && strncmp(possible_prefix, prefixed_name, prefix_len) == 0) { break; // matching prefix found } } if (prefix_index < 0) { break; // didn't find the prefix, can't be a wrapper } prefixed_name = name; prefixed_name_len = name_len; } } void vframeStreamCommon::skip_reflection_related_frames() { while (!at_end() && (JDK_Version::is_gte_jdk14x_version() && UseNewReflection && (method()->method_holder()->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass()) || method()->method_holder()->is_subclass_of(SystemDictionary::reflect_ConstructorAccessorImpl_klass())))) { next(); } } #ifndef PRODUCT void vframe::print() { if (WizardMode) _fr.print_value_on(tty,NULL); } void vframe::print_value() const { ((vframe*)this)->print(); } void entryVFrame::print_value() const { ((entryVFrame*)this)->print(); } void entryVFrame::print() { vframe::print(); tty->print_cr("C Chunk inbetween Java"); tty->print_cr("C link " INTPTR_FORMAT, _fr.link()); } // ------------- javaVFrame -------------- static void print_stack_values(const char* title, StackValueCollection* values) { if (values->is_empty()) return; tty->print_cr("\t%s:", title); values->print(); } void javaVFrame::print() { ResourceMark rm; vframe::print(); tty->print("\t"); method()->print_value(); tty->cr(); tty->print_cr("\tbci: %d", bci()); print_stack_values("locals", locals()); print_stack_values("expressions", expressions()); GrowableArray<MonitorInfo*>* list = monitors(); if (list->is_empty()) return; tty->print_cr("\tmonitor list:"); for (int index = (list->length()-1); index >= 0; index--) { MonitorInfo* monitor = list->at(index); tty->print("\t obj\t"); if (monitor->owner_is_scalar_replaced()) { Klass* k = java_lang_Class::as_Klass(monitor->owner_klass()); tty->print("( is scalar replaced %s)", k->external_name()); } else if (monitor->owner() == NULL) { tty->print("( null )"); } else { monitor->owner()->print_value(); tty->print("(" INTPTR_FORMAT ")", (address)monitor->owner()); } if (monitor->eliminated() && is_compiled_frame()) tty->print(" ( lock is eliminated )"); tty->cr(); tty->print("\t "); monitor->lock()->print_on(tty); tty->cr(); } } void javaVFrame::print_value() const { Method* m = method(); InstanceKlass* k = m->method_holder(); tty->print_cr("frame( sp=" INTPTR_FORMAT ", unextended_sp=" INTPTR_FORMAT ", fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT ")", _fr.sp(), _fr.unextended_sp(), _fr.fp(), _fr.pc()); tty->print("%s.%s", k->internal_name(), m->name()->as_C_string()); if (!m->is_native()) { Symbol* source_name = k->source_file_name(); int line_number = m->line_number_from_bci(bci()); if (source_name != NULL && (line_number != -1)) { tty->print("(%s:%d)", source_name->as_C_string(), line_number); } } else { tty->print("(Native Method)"); } // Check frame size and print warning if it looks suspiciously large if (fr().sp() != NULL) { RegisterMap map = *register_map(); uint size = fr().frame_size(&map); #ifdef _LP64 if (size > 8*K) warning("SUSPICIOUSLY LARGE FRAME (%d)", size); #else if (size > 4*K) warning("SUSPICIOUSLY LARGE FRAME (%d)", size); #endif } } bool javaVFrame::structural_compare(javaVFrame* other) { // Check static part if (method() != other->method()) return false; if (bci() != other->bci()) return false; // Check locals StackValueCollection *locs = locals(); StackValueCollection *other_locs = other->locals(); assert(locs->size() == other_locs->size(), "sanity check"); int i; for(i = 0; i < locs->size(); i++) { // it might happen the compiler reports a conflict and // the interpreter reports a bogus int. if ( is_compiled_frame() && locs->at(i)->type() == T_CONFLICT) continue; if (other->is_compiled_frame() && other_locs->at(i)->type() == T_CONFLICT) continue; if (!locs->at(i)->equal(other_locs->at(i))) return false; } // Check expressions StackValueCollection* exprs = expressions(); StackValueCollection* other_exprs = other->expressions(); assert(exprs->size() == other_exprs->size(), "sanity check"); for(i = 0; i < exprs->size(); i++) { if (!exprs->at(i)->equal(other_exprs->at(i))) return false; } return true; } void javaVFrame::print_activation(int index) const { // frame number and method tty->print("%2d - ", index); ((vframe*)this)->print_value(); tty->cr(); if (WizardMode) { ((vframe*)this)->print(); tty->cr(); } } void javaVFrame::verify() const { } void interpretedVFrame::verify() const { } // ------------- externalVFrame -------------- void externalVFrame::print() { _fr.print_value_on(tty,NULL); } void externalVFrame::print_value() const { ((vframe*)this)->print(); } #endif // PRODUCT
bloodstars/OpenJDK
hotspot/src/share/vm/runtime/vframe.cpp
C++
gpl-2.0
22,350
/* Copyright (c) 2003-2015, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.html or http://ckeditor.com/license */ CKEDITOR.lang['hu']={"editor":"HTML szerkesztő","editorPanel":"Rich Text szerkesztő panel","common":{"editorHelp":"Segítségért nyomjon ALT 0","browseServer":"Böngészés a szerveren","url":"Hivatkozás","protocol":"Protokoll","upload":"Feltöltés","uploadSubmit":"Küldés a szerverre","image":"Kép","flash":"Flash","form":"Űrlap","checkbox":"Jelölőnégyzet","radio":"Választógomb","textField":"Szövegmező","textarea":"Szövegterület","hiddenField":"Rejtettmező","button":"Gomb","select":"Legördülő lista","imageButton":"Képgomb","notSet":"<nincs beállítva>","id":"Azonosító","name":"Név","langDir":"Írás iránya","langDirLtr":"Balról jobbra","langDirRtl":"Jobbról balra","langCode":"Nyelv kódja","longDescr":"Részletes leírás webcíme","cssClass":"Stíluskészlet","advisoryTitle":"Súgócimke","cssStyle":"Stílus","ok":"Rendben","cancel":"Mégsem","close":"Bezárás","preview":"Előnézet","resize":"Húzza az átméretezéshez","generalTab":"Általános","advancedTab":"További opciók","validateNumberFailed":"A mezőbe csak számokat írhat.","confirmNewPage":"Minden nem mentett változás el fog veszni! Biztosan be szeretné tölteni az oldalt?","confirmCancel":"Az űrlap tartalma megváltozott, ám a változásokat nem rögzítette. Biztosan be szeretné zárni az űrlapot?","options":"Beállítások","target":"Cél","targetNew":"Új ablak (_blank)","targetTop":"Legfelső ablak (_top)","targetSelf":"Aktuális ablakban (_self)","targetParent":"Szülő ablak (_parent)","langDirLTR":"Balról jobbra (LTR)","langDirRTL":"Jobbról balra (RTL)","styles":"Stílus","cssClasses":"Stíluslap osztály","width":"Szélesség","height":"Magasság","align":"Igazítás","alignLeft":"Bal","alignRight":"Jobbra","alignCenter":"Középre","alignJustify":"Sorkizárt","alignTop":"Tetejére","alignMiddle":"Középre","alignBottom":"Aljára","alignNone":"None","invalidValue":"Érvénytelen érték.","invalidHeight":"A magasság mezőbe csak számokat írhat.","invalidWidth":"A szélesség mezőbe csak számokat írhat.","invalidCssLength":"\"%1\"-hez megadott érték csakis egy pozitív szám lehet, esetleg egy érvényes CSS egységgel megjelölve(px, %, in, cm, mm, em, ex, pt vagy pc).","invalidHtmlLength":"\"%1\"-hez megadott érték csakis egy pozitív szám lehet, esetleg egy érvényes HTML egységgel megjelölve(px vagy %).","invalidInlineStyle":"Az inline stílusnak megadott értéknek tartalmaznia kell egy vagy több rekordot a \"name : value\" formátumban, pontosvesszővel elválasztva.","cssLengthTooltip":"Adjon meg egy számot értéknek pixelekben vagy egy számot érvényes CSS mértékegységben (px, %, in, cm, mm, em, ex, pt, vagy pc).","unavailable":"%1<span class=\"cke_accessibility\">, nem elérhető</span>"},"about":{"copy":"Copyright &copy; $1. Minden jog fenntartva.","dlgTitle":"CKEditor névjegy","help":"Itt találsz segítséget: $1","moreInfo":"Licenszelési információkért kérjük látogassa meg weboldalunkat:","title":"CKEditor névjegy","userGuide":"CKEditor Felhasználói útmutató"},"basicstyles":{"bold":"Félkövér","italic":"Dőlt","strike":"Áthúzott","subscript":"Alsó index","superscript":"Felső index","underline":"Aláhúzott"},"bidi":{"ltr":"Szöveg iránya balról jobbra","rtl":"Szöveg iránya jobbról balra"},"blockquote":{"toolbar":"Idézet blokk"},"clipboard":{"copy":"Másolás","copyError":"A böngésző biztonsági beállításai nem engedélyezik a szerkesztőnek, hogy végrehajtsa a másolás műveletet. Használja az alábbi billentyűkombinációt (Ctrl/Cmd+X).","cut":"Kivágás","cutError":"A böngésző biztonsági beállításai nem engedélyezik a szerkesztőnek, hogy végrehajtsa a kivágás műveletet. Használja az alábbi billentyűkombinációt (Ctrl/Cmd+X).","paste":"Beillesztés","pasteArea":"Beszúrás mező","pasteMsg":"Másolja be az alábbi mezőbe a <STRONG>Ctrl/Cmd+V</STRONG> billentyűk lenyomásával, majd nyomjon <STRONG>Rendben</STRONG>-t.","securityMsg":"A böngésző biztonsági beállításai miatt a szerkesztő nem képes hozzáférni a vágólap adataihoz. Illeszd be újra ebben az ablakban.","title":"Beillesztés"},"button":{"selectedLabel":"%1 (Kiválasztva)"},"colorbutton":{"auto":"Automatikus","bgColorTitle":"Háttérszín","colors":{"000":"Fekete","800000":"Bordó","8B4513":"Barna","2F4F4F":"Sötét türkiz","008080":"Türkiz","000080":"Király kék","4B0082":"Indigó kék","696969":"Szürke","B22222":"Tégla vörös","A52A2A":"Vörös","DAA520":"Arany sárga","006400":"Sötét zöld","40E0D0":"Türkiz","0000CD":"Kék","800080":"Lila","808080":"Szürke","F00":"Piros","FF8C00":"Sötét narancs","FFD700":"Arany","008000":"Zöld","0FF":"Türkiz","00F":"Kék","EE82EE":"Rózsaszín","A9A9A9":"Sötét szürke","FFA07A":"Lazac","FFA500":"Narancs","FFFF00":"Citromsárga","00FF00":"Neon zöld","AFEEEE":"Világos türkiz","ADD8E6":"Világos kék","DDA0DD":"Világos lila","D3D3D3":"Világos szürke","FFF0F5":"Lavender Blush","FAEBD7":"Törtfehér","FFFFE0":"Világos sárga","F0FFF0":"Menta","F0FFFF":"Azúr kék","F0F8FF":"Halvány kék","E6E6FA":"Lavender","FFF":"Fehér"},"more":"További színek...","panelTitle":"Színek","textColorTitle":"Betűszín"},"colordialog":{"clear":"Ürítés","highlight":"Nagyítás","options":"Szín opciók","selected":"Kiválasztott","title":"Válasszon színt"},"templates":{"button":"Sablonok","emptyListMsg":"(Nincs sablon megadva)","insertOption":"Kicseréli a jelenlegi tartalmat","options":"Sablon opciók","selectPromptMsg":"Válassza ki melyik sablon nyíljon meg a szerkesztőben<br>(a jelenlegi tartalom elveszik):","title":"Elérhető sablonok"},"contextmenu":{"options":"Helyi menü opciók"},"div":{"IdInputLabel":"Azonosító","advisoryTitleInputLabel":"Tipp szöveg","cssClassInputLabel":"Stíluslap osztály","edit":"DIV szerkesztése","inlineStyleInputLabel":"Inline stílus","langDirLTRLabel":"Balról jobbra (LTR)","langDirLabel":"Nyelvi irány","langDirRTLLabel":"Jobbról balra (RTL)","languageCodeInputLabel":" Nyelv kódja","remove":"DIV eltávolítása","styleSelectLabel":"Stílus","title":"DIV tároló létrehozása","toolbar":"DIV tároló létrehozása"},"toolbar":{"toolbarCollapse":"Eszköztár összecsukása","toolbarExpand":"Eszköztár szétnyitása","toolbarGroups":{"document":"Dokumentum","clipboard":"Vágólap/Visszavonás","editing":"Szerkesztés","forms":"Űrlapok","basicstyles":"Alapstílusok","paragraph":"Bekezdés","links":"Hivatkozások","insert":"Beszúrás","styles":"Stílusok","colors":"Színek","tools":"Eszközök"},"toolbars":"Szerkesztő Eszköztár"},"elementspath":{"eleLabel":"Elem utak","eleTitle":"%1 elem"},"find":{"find":"Keresés","findOptions":"Find Options","findWhat":"Keresett szöveg:","matchCase":"kis- és nagybetű megkülönböztetése","matchCyclic":"Ciklikus keresés","matchWord":"csak ha ez a teljes szó","notFoundMsg":"A keresett szöveg nem található.","replace":"Csere","replaceAll":"Az összes cseréje","replaceSuccessMsg":"%1 egyezőség cserélve.","replaceWith":"Csere erre:","title":"Keresés és csere"},"fakeobjects":{"anchor":"Horgony","flash":"Flash animáció","hiddenfield":"Rejtett mezõ","iframe":"IFrame","unknown":"Ismeretlen objektum"},"flash":{"access":"Szkript hozzáférés","accessAlways":"Mindig","accessNever":"Soha","accessSameDomain":"Azonos domainről","alignAbsBottom":"Legaljára","alignAbsMiddle":"Közepére","alignBaseline":"Alapvonalhoz","alignTextTop":"Szöveg tetejére","bgcolor":"Háttérszín","chkFull":"Teljes képernyő engedélyezése","chkLoop":"Folyamatosan","chkMenu":"Flash menü engedélyezése","chkPlay":"Automata lejátszás","flashvars":"Flash változók","hSpace":"Vízsz. táv","properties":"Flash tulajdonságai","propertiesTab":"Tulajdonságok","quality":"Minőség","qualityAutoHigh":"Automata jó","qualityAutoLow":"Automata gyenge","qualityBest":"Legjobb","qualityHigh":"Jó","qualityLow":"Gyenge","qualityMedium":"Közepes","scale":"Méretezés","scaleAll":"Mindent mutat","scaleFit":"Teljes kitöltés","scaleNoBorder":"Keret nélkül","title":"Flash tulajdonságai","vSpace":"Függ. táv","validateHSpace":"A vízszintes távolsűág mezőbe csak számokat írhat.","validateSrc":"Adja meg a hivatkozás webcímét","validateVSpace":"A függőleges távolsűág mezőbe csak számokat írhat.","windowMode":"Ablak mód","windowModeOpaque":"Opaque","windowModeTransparent":"Transparent","windowModeWindow":"Window"},"font":{"fontSize":{"label":"Méret","voiceLabel":"Betűméret","panelTitle":"Méret"},"label":"Betűtípus","panelTitle":"Betűtípus","voiceLabel":"Betűtípus"},"forms":{"button":{"title":"Gomb tulajdonságai","text":"Szöveg (Érték)","type":"Típus","typeBtn":"Gomb","typeSbm":"Küldés","typeRst":"Alaphelyzet"},"checkboxAndRadio":{"checkboxTitle":"Jelölőnégyzet tulajdonságai","radioTitle":"Választógomb tulajdonságai","value":"Érték","selected":"Kiválasztott"},"form":{"title":"Űrlap tulajdonságai","menu":"Űrlap tulajdonságai","action":"Adatfeldolgozást végző hivatkozás","method":"Adatküldés módja","encoding":"Kódolás"},"hidden":{"title":"Rejtett mező tulajdonságai","name":"Név","value":"Érték"},"select":{"title":"Legördülő lista tulajdonságai","selectInfo":"Alaptulajdonságok","opAvail":"Elérhető opciók","value":"Érték","size":"Méret","lines":"sor","chkMulti":"több sor is kiválasztható","opText":"Szöveg","opValue":"Érték","btnAdd":"Hozzáad","btnModify":"Módosít","btnUp":"Fel","btnDown":"Le","btnSetValue":"Legyen az alapértelmezett érték","btnDelete":"Töröl"},"textarea":{"title":"Szövegterület tulajdonságai","cols":"Karakterek száma egy sorban","rows":"Sorok száma"},"textfield":{"title":"Szövegmező tulajdonságai","name":"Név","value":"Érték","charWidth":"Megjelenített karakterek száma","maxChars":"Maximális karakterszám","type":"Típus","typeText":"Szöveg","typePass":"Jelszó","typeEmail":"Ímél","typeSearch":"Keresés","typeTel":"Telefonszám","typeUrl":"URL"}},"format":{"label":"Formátum","panelTitle":"Formátum","tag_address":"Címsor","tag_div":"Bekezdés (DIV)","tag_h1":"Fejléc 1","tag_h2":"Fejléc 2","tag_h3":"Fejléc 3","tag_h4":"Fejléc 4","tag_h5":"Fejléc 5","tag_h6":"Fejléc 6","tag_p":"Normál","tag_pre":"Formázott"},"horizontalrule":{"toolbar":"Elválasztóvonal beillesztése"},"iframe":{"border":"Legyen keret","noUrl":"Kérem írja be a iframe URL-t","scrolling":"Gördítősáv bekapcsolása","title":"IFrame Tulajdonságok","toolbar":"IFrame"},"image":{"alertUrl":"Töltse ki a kép webcímét","alt":"Buborék szöveg","border":"Keret","btnUpload":"Küldés a szerverre","button2Img":"A kiválasztott képgombból sima képet szeretne csinálni?","hSpace":"Vízsz. táv","img2Button":"A kiválasztott képből képgombot szeretne csinálni?","infoTab":"Alaptulajdonságok","linkTab":"Hivatkozás","lockRatio":"Arány megtartása","menu":"Kép tulajdonságai","resetSize":"Eredeti méret","title":"Kép tulajdonságai","titleButton":"Képgomb tulajdonságai","upload":"Feltöltés","urlMissing":"Hiányzik a kép URL-je","vSpace":"Függ. táv","validateBorder":"A keret méretének egész számot kell beírni!","validateHSpace":"Vízszintes távolságnak egész számot kell beírni!","validateVSpace":"Függőleges távolságnak egész számot kell beírni!"},"indent":{"indent":"Behúzás növelése","outdent":"Behúzás csökkentése"},"smiley":{"options":"Hangulatjel opciók","title":"Hangulatjel beszúrása","toolbar":"Hangulatjelek"},"justify":{"block":"Sorkizárt","center":"Középre","left":"Balra","right":"Jobbra"},"language":{"button":"Nyelv beállítása","remove":"Nyelv eltávolítása"},"link":{"acccessKey":"Billentyűkombináció","advanced":"További opciók","advisoryContentType":"Súgó tartalomtípusa","advisoryTitle":"Súgócimke","anchor":{"toolbar":"Horgony beillesztése/szerkesztése","menu":"Horgony tulajdonságai","title":"Horgony tulajdonságai","name":"Horgony neve","errorName":"Kérem adja meg a horgony nevét","remove":"Horgony eltávolítása"},"anchorId":"Azonosító szerint","anchorName":"Horgony név szerint","charset":"Hivatkozott tartalom kódlapja","cssClasses":"Stíluskészlet","emailAddress":"E-Mail cím","emailBody":"Üzenet","emailSubject":"Üzenet tárgya","id":"Id","info":"Alaptulajdonságok","langCode":"Írás iránya","langDir":"Írás iránya","langDirLTR":"Balról jobbra","langDirRTL":"Jobbról balra","menu":"Hivatkozás módosítása","name":"Név","noAnchors":"(Nincs horgony a dokumentumban)","noEmail":"Adja meg az E-Mail címet","noUrl":"Adja meg a hivatkozás webcímét","other":"<más>","popupDependent":"Szülőhöz kapcsolt (csak Netscape)","popupFeatures":"Felugró ablak jellemzői","popupFullScreen":"Teljes képernyő (csak IE)","popupLeft":"Bal pozíció","popupLocationBar":"Címsor","popupMenuBar":"Menü sor","popupResizable":"Átméretezés","popupScrollBars":"Gördítősáv","popupStatusBar":"Állapotsor","popupToolbar":"Eszköztár","popupTop":"Felső pozíció","rel":"Kapcsolat típusa","selectAnchor":"Horgony választása","styles":"Stílus","tabIndex":"Tabulátor index","target":"Tartalom megjelenítése","targetFrame":"<keretben>","targetFrameName":"Keret neve","targetPopup":"<felugró ablakban>","targetPopupName":"Felugró ablak neve","title":"Hivatkozás tulajdonságai","toAnchor":"Horgony az oldalon","toEmail":"E-Mail","toUrl":"URL","toolbar":"Hivatkozás beillesztése/módosítása","type":"Hivatkozás típusa","unlink":"Hivatkozás törlése","upload":"Feltöltés"},"list":{"bulletedlist":"Felsorolás","numberedlist":"Számozás"},"liststyle":{"armenian":"Örmény számozás","bulletedTitle":"Pontozott lista tulajdonságai","circle":"Kör","decimal":"Arab számozás (1, 2, 3, stb.)","decimalLeadingZero":"Számozás bevezető nullákkal (01, 02, 03, stb.)","disc":"Korong","georgian":"Grúz számozás (an, ban, gan, stb.)","lowerAlpha":"Kisbetűs (a, b, c, d, e, stb.)","lowerGreek":"Görög (alpha, beta, gamma, stb.)","lowerRoman":"Római kisbetűs (i, ii, iii, iv, v, stb.)","none":"Nincs","notset":"<Nincs beállítva>","numberedTitle":"Sorszámozott lista tulajdonságai","square":"Négyzet","start":"Kezdőszám","type":"Típus","upperAlpha":"Nagybetűs (A, B, C, D, E, stb.)","upperRoman":"Római nagybetűs (I, II, III, IV, V, stb.)","validateStartNumber":"A kezdőszám nem lehet tört érték."},"magicline":{"title":"Szúrja be a bekezdést ide"},"maximize":{"maximize":"Teljes méret","minimize":"Kis méret"},"newpage":{"toolbar":"Új oldal"},"pagebreak":{"alt":"Oldaltörés","toolbar":"Oldaltörés beillesztése"},"pastetext":{"button":"Beillesztés formázatlan szövegként","title":"Beillesztés formázatlan szövegként"},"pastefromword":{"confirmCleanup":"Úgy tűnik a beillesztett szöveget Word-ből másolt át. Meg szeretné tisztítani a szöveget? (ajánlott)","error":"Egy belső hiba miatt nem sikerült megtisztítani a szöveget","title":"Beillesztés Word-ből","toolbar":"Beillesztés Word-ből"},"preview":{"preview":"Előnézet"},"print":{"toolbar":"Nyomtatás"},"removeformat":{"toolbar":"Formázás eltávolítása"},"save":{"toolbar":"Mentés"},"selectall":{"toolbar":"Mindent kijelöl"},"showblocks":{"toolbar":"Blokkok megjelenítése"},"sourcearea":{"toolbar":"Forráskód"},"specialchar":{"options":"Speciális karakter opciók","title":"Speciális karakter választása","toolbar":"Speciális karakter beillesztése"},"scayt":{"btn_about":"SCAYT névjegy","btn_dictionaries":"Szótár","btn_disable":"SCAYT letiltása","btn_enable":"SCAYT engedélyezése","btn_langs":"Nyelvek","btn_options":"Beállítások","text_title":"Helyesírás ellenőrzés gépelés közben"},"stylescombo":{"label":"Stílus","panelTitle":"Formázási stílusok","panelTitle1":"Blokk stílusok","panelTitle2":"Inline stílusok","panelTitle3":"Objektum stílusok"},"table":{"border":"Szegélyméret","caption":"Felirat","cell":{"menu":"Cella","insertBefore":"Beszúrás balra","insertAfter":"Beszúrás jobbra","deleteCell":"Cellák törlése","merge":"Cellák egyesítése","mergeRight":"Cellák egyesítése jobbra","mergeDown":"Cellák egyesítése lefelé","splitHorizontal":"Cellák szétválasztása vízszintesen","splitVertical":"Cellák szétválasztása függőlegesen","title":"Cella tulajdonságai","cellType":"Cella típusa","rowSpan":"Függőleges egyesítés","colSpan":"Vízszintes egyesítés","wordWrap":"Hosszú sorok törése","hAlign":"Vízszintes igazítás","vAlign":"Függőleges igazítás","alignBaseline":"Alapvonalra","bgColor":"Háttér színe","borderColor":"Keret színe","data":"Adat","header":"Fejléc","yes":"Igen","no":"Nem","invalidWidth":"A szélesség mezőbe csak számokat írhat.","invalidHeight":"A magasság mezőbe csak számokat írhat.","invalidRowSpan":"A függőleges egyesítés mezőbe csak számokat írhat.","invalidColSpan":"A vízszintes egyesítés mezőbe csak számokat írhat.","chooseColor":"Válasszon"},"cellPad":"Cella belső margó","cellSpace":"Cella térköz","column":{"menu":"Oszlop","insertBefore":"Beszúrás balra","insertAfter":"Beszúrás jobbra","deleteColumn":"Oszlopok törlése"},"columns":"Oszlopok","deleteTable":"Táblázat törlése","headers":"Fejlécek","headersBoth":"Mindkettő","headersColumn":"Első oszlop","headersNone":"Nincsenek","headersRow":"Első sor","invalidBorder":"A szegélyméret mezőbe csak számokat írhat.","invalidCellPadding":"A cella belső margó mezőbe csak számokat írhat.","invalidCellSpacing":"A cella térköz mezőbe csak számokat írhat.","invalidCols":"Az oszlopok számának nagyobbnak kell lenni mint 0.","invalidHeight":"A magasság mezőbe csak számokat írhat.","invalidRows":"A sorok számának nagyobbnak kell lenni mint 0.","invalidWidth":"A szélesség mezőbe csak számokat írhat.","menu":"Táblázat tulajdonságai","row":{"menu":"Sor","insertBefore":"Beszúrás fölé","insertAfter":"Beszúrás alá","deleteRow":"Sorok törlése"},"rows":"Sorok","summary":"Leírás","title":"Táblázat tulajdonságai","toolbar":"Táblázat","widthPc":"százalék","widthPx":"képpont","widthUnit":"Szélesség egység"},"undo":{"redo":"Ismétlés","undo":"Visszavonás"},"wsc":{"btnIgnore":"Kihagyja","btnIgnoreAll":"Mindet kihagyja","btnReplace":"Csere","btnReplaceAll":"Összes cseréje","btnUndo":"Visszavonás","changeTo":"Módosítás","errorLoading":"Hiba a szolgáltatás host betöltése közben: %s.","ieSpellDownload":"A helyesírás-ellenőrző nincs telepítve. Szeretné letölteni most?","manyChanges":"Helyesírás-ellenőrzés kész: %1 szó cserélve","noChanges":"Helyesírás-ellenőrzés kész: Nincs változtatott szó","noMispell":"Helyesírás-ellenőrzés kész: Nem találtam hibát","noSuggestions":"Nincs javaslat","notAvailable":"Sajnálom, de a szolgáltatás jelenleg nem elérhető.","notInDic":"Nincs a szótárban","oneChange":"Helyesírás-ellenőrzés kész: Egy szó cserélve","progress":"Helyesírás-ellenőrzés folyamatban...","title":"Helyesírás ellenörző","toolbar":"Helyesírás-ellenőrzés"}};
Wainaina3/CourseWare
plugins/ckeditor/lang/hu.js
JavaScript
gpl-2.0
19,242
//file: _insn_test_shl_X1.c //op=188 #include <stdio.h> #include <stdlib.h> void func_exit(void) { printf("%s\n", __func__); exit(0); } void func_call(void) { printf("%s\n", __func__); exit(0); } unsigned long mem[2] = { 0x53094f44a07b725d, 0xe61730d0d9026b62 }; int main(void) { unsigned long a[4] = { 0, 0 }; asm __volatile__ ( "moveli r23, -28145\n" "shl16insli r23, r23, -7915\n" "shl16insli r23, r23, 2786\n" "shl16insli r23, r23, 4140\n" "moveli r19, -17474\n" "shl16insli r19, r19, 5541\n" "shl16insli r19, r19, 25238\n" "shl16insli r19, r19, 6803\n" "moveli r48, -9071\n" "shl16insli r48, r48, -18527\n" "shl16insli r48, r48, -17369\n" "shl16insli r48, r48, 13142\n" "{ fnop ; shl r23, r19, r48 }\n" "move %0, r23\n" "move %1, r19\n" "move %2, r48\n" :"=r"(a[0]),"=r"(a[1]),"=r"(a[2])); printf("%016lx\n", a[0]); printf("%016lx\n", a[1]); printf("%016lx\n", a[2]); return 0; }
acarno/slicer
valgrind/none/tests/tilegx/insn_test_shl_X1.c
C
gpl-2.0
1,309
/* Config file crap Supports various things, as virtual config entries and such crap Works surprisingly well considering how old it is ... */ #define _CRT_SECURE_NO_DEPRECATE (1) #include <errno.h> #include "cfg.h" #include "ini.h" string cfgPath; bool save_config = true; ConfigFile cfgdb; void savecfgf() { FILE* cfgfile = fopen(cfgPath.c_str(),"wt"); if (!cfgfile) { printf("Error : Unable to open file for saving \n"); } else { cfgdb.save(cfgfile); fclose(cfgfile); } } void cfgSaveStr(const wchar * Section, const wchar * Key, const wchar * String) { cfgdb.set(string(Section), string(Key), string(String)); if(save_config) { savecfgf(); } //WritePrivateProfileString(Section,Key,String,cfgPath); } //New config code /* I want config to be really flexible .. so , here is the new implementation : Functions : cfgLoadInt : Load an int , if it does not exist save the default value to it and return it cfgSaveInt : Save an int cfgLoadStr : Load a str , if it does not exist save the default value to it and return it cfgSaveStr : Save a str cfgExists : Returns true if the Section:Key exists. If Key is null , it retuns true if Section exists Config parameters can be read from the config file , and can be given at the command line -cfg section:key=value -> defines a value at command line If a cfgSave* is made on a value defined by command line , then the command line value is replaced by it cfg values set by command line are not written to the cfg file , unless a cfgSave* is used There are some special values , all of em are on the emu namespace :) These are readonly : emu:AppPath : Returns the path where the emulator is stored emu:PluginPath : Returns the path where the plugins are loaded from emu:DataPath : Returns the path where the bios/data files are emu:FullName : str,returns the emulator's name + version string (ex."nullDC v1.0.0 Private Beta 2 built on {datetime}") emu:ShortName : str,returns the emulator's name + version string , short form (ex."nullDC 1.0.0pb2") emu:Name : str,returns the emulator's name (ex."nullDC") These are read/write emu:Caption : str , get/set the window caption */ /////////////////////////////// /* ** This will verify there is a working file @ ./szIniFn ** - if not present, it will write defaults */ bool cfgOpen() { const char* filename = "/emu.cfg"; string config_path_read = get_readonly_config_path(filename); cfgPath = get_writable_config_path(filename); FILE* cfgfile = fopen(config_path_read.c_str(),"r"); if(cfgfile != NULL) { cfgdb.parse(cfgfile); fclose(cfgfile); } else { // Config file can't be opened int error_code = errno; printf("Warning: Unable to open the config file '%s' for reading (%s)\n", config_path_read.c_str(), strerror(error_code)); if (error_code == ENOENT || cfgPath != config_path_read) { // Config file didn't exist printf("Creating new empty config file at '%s'\n", cfgPath.c_str()); savecfgf(); } else { // There was some other error (may be a permissions problem or something like that) save_config = false; } } return true; } //Implementations of the interface :) //Section must be set //If key is 0 , it looks for the section //0 : not found //1 : found section , key was 0 //2 : found section & key s32 cfgExists(const wchar * Section, const wchar * Key) { if(cfgdb.has_entry(string(Section), string(Key))) { return 2; } else { return (cfgdb.has_section(string(Section)) ? 1 : 0); } } void cfgLoadStr(const wchar * Section, const wchar * Key, wchar * Return,const wchar* Default) { string value = cfgdb.get(Section, Key, Default); strcpy(Return, value.c_str()); } string cfgLoadStr(const wchar * Section, const wchar * Key, const wchar* Default) { if(!cfgdb.has_entry(string(Section), string(Key))) { cfgSaveStr(Section, Key, Default); } return cfgdb.get(string(Section), string(Key), string(Default)); } //These are helpers , mainly :) void cfgSaveInt(const wchar * Section, const wchar * Key, s32 Int) { cfgdb.set_int(string(Section), string(Key), Int); if(save_config) { savecfgf(); } } s32 cfgLoadInt(const wchar * Section, const wchar * Key,s32 Default) { if(!cfgdb.has_entry(string(Section), string(Key))) { cfgSaveInt(Section, Key, Default); } return cfgdb.get_int(string(Section), string(Key), Default); } void cfgSetVirtual(const wchar * Section, const wchar * Key, const wchar * String) { cfgdb.set(string(Section), string(Key), string(String), true); }
Alcaro/reicast-emulator
core/cfg/cfg.cpp
C++
gpl-2.0
4,521
/* * Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "blackrock_spire.h" enum Spells { SPELL_CROWDPUMMEL = 10887, SPELL_MIGHTYBLOW = 14099, }; enum Events { EVENT_CROWD_PUMMEL = 1, EVENT_MIGHTY_BLOW = 2, }; const Position SummonLocation = { -169.839f, -324.961f, 64.401f, 3.124f }; class boss_halycon : public CreatureScript { public: boss_halycon() : CreatureScript("boss_halycon") { } CreatureAI* GetAI(Creature* creature) const { return new boss_halyconAI(creature); } struct boss_halyconAI : public BossAI { boss_halyconAI(Creature* creature) : BossAI(creature, DATA_HALYCON) {} bool Summoned; void Reset() { _Reset(); Summoned = false; } void EnterCombat(Unit* /*who*/) { _EnterCombat(); events.ScheduleEvent(EVENT_CROWD_PUMMEL, 8 * IN_MILLISECONDS); events.ScheduleEvent(EVENT_MIGHTY_BLOW, 14 * IN_MILLISECONDS); } void JustDied(Unit* /*killer*/) { _JustDied(); } void UpdateAI(uint32 diff) { if (!UpdateVictim()) return; //Summon Gizrul if (!Summoned && HealthBelowPct(25)) { me->SummonCreature(NPC_GIZRUL_THE_SLAVENER, SummonLocation, TEMPSUMMON_TIMED_DESPAWN, 300 * IN_MILLISECONDS); Summoned = true; } events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_CROWD_PUMMEL: DoCastVictim(SPELL_CROWDPUMMEL); events.ScheduleEvent(EVENT_CROWD_PUMMEL, 14 * IN_MILLISECONDS); break; case EVENT_MIGHTY_BLOW: DoCastVictim(SPELL_MIGHTYBLOW); events.ScheduleEvent(EVENT_MIGHTY_BLOW, 10 * IN_MILLISECONDS); break; } } DoMeleeAttackIfReady(); } }; }; void AddSC_boss_halycon() { new boss_halycon(); }
Darthye/OverEmu
src/server/scripts/EasternKingdoms/BlackrockSpire/boss_halycon.cpp
C++
gpl-2.0
3,146
/*jshint maxlen:10000000 */ Discourse.SiteSettingsOriginal = {"title":"Discourse Meta","logo_url":"/assets/logo.png","logo_small_url":"/assets/logo-single.png","traditional_markdown_linebreaks":false,"top_menu":"latest|new|unread|read|starred|categories","post_menu":"like|edit|flag|delete|share|bookmark|admin|reply","share_links":"twitter|facebook|google+|email","track_external_right_clicks":false,"must_approve_users":false,"ga_tracking_code":"UA-33736483-2","ga_domain_name":"","enable_long_polling":true,"polling_interval":3000,"anon_polling_interval":30000,"min_post_length":20,"max_post_length":16000,"min_topic_title_length":15,"max_topic_title_length":255,"min_private_message_title_length":2,"allow_uncategorized_topics":true,"min_search_term_length":3,"flush_timings_secs":5,"suppress_reply_directly_below":true,"email_domains_blacklist":"mailinator.com","email_domains_whitelist":null,"version_checks":true,"min_title_similar_length":10,"min_body_similar_length":15,"category_colors":"BF1E2E|F1592A|F7941D|9EB83B|3AB54A|12A89D|25AAE2|0E76BD|652D90|92278F|ED207B|8C6238|231F20|808281|B3B5B4|283890","max_upload_size_kb":1024,"category_featured_topics":6,"favicon_url":"/assets/favicon.ico","dynamic_favicon":false,"uncategorized_name":"uncategorized","uncategorized_color":"AB9364","uncategorized_text_color":"FFFFFF","invite_only":false,"login_required":false,"min_password_length":8,"enable_local_logins":true,"enable_google_logins":true,"enable_yahoo_logins":true,"enable_twitter_logins":true,"enable_facebook_logins":true,"enable_cas_logins":false,"enable_github_logins":true,"enable_persona_logins":true,"educate_until_posts":2,"topic_views_heat_low":1000,"topic_views_heat_medium":2000,"topic_views_heat_high":5000,"min_private_message_post_length":5,"faq_url":"","tos_url":"","privacy_policy_url":"","authorized_extensions":".jpg|.jpeg|.png|.gif|.txt","relative_date_duration":14,"delete_removed_posts_after":24,"delete_user_max_post_age":7, "default_code_lang": "lang-auto", "suppress_uncategorized_badge": true}; Discourse.SiteSettings = jQuery.extend(true, {}, Discourse.SiteSettingsOriginal);
AlphaStaxLLC/discourse
test/javascripts/fixtures/site_settings_fixtures.js
JavaScript
gpl-2.0
2,116
/* * Copyright (c) 1998-2012 Caucho Technology -- all rights reserved * * This file is part of Resin(R) Open Source * * Each copy or derived work must preserve the copyright notice and this * notice unmodified. * * Resin Open Source is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Resin Open Source is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, or any warranty * of NON-INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with Resin Open Source; if not, write to the * * Free Software Foundation, Inc. * 59 Temple Place, Suite 330 * Boston, MA 02111-1307 USA * * @author Scott Ferguson */ package com.caucho.jca.cfg; import java.util.Properties; import javax.mail.Authenticator; import javax.mail.PasswordAuthentication; import javax.mail.Session; import com.caucho.config.ConfigException; import com.caucho.config.cfg.AbstractBeanConfig; /** * Configuration for a javamail. */ public class JavaMailConfig extends AbstractBeanConfig { private Properties _props = new Properties(); private Authenticator _auth; private String _user; private String _password; private Session _session; public JavaMailConfig() { setClass(Session.class); } /** * Sets the authenticator */ public void setAuthenticator(Authenticator auth) { _auth = auth; } // // well-known attributes // /** * mail.from */ public void setFrom(String from) { setProperty("mail.from", from); } /** * mail.host */ public void setHost(String host) { setProperty("mail.host", host); } /** * mail.imap.host */ public void setImapHost(String host) { setProperty("mail.imap.host", host); } /** * mail.imap.user */ public void setImapUser(String user) { setProperty("mail.imap.user", user); } /** * mail.pop3.host */ public void setPop3Host(String host) { setProperty("mail.pop3.host", host); } /** * mail.pop3.user */ public void setPop3User(String user) { setProperty("mail.pop3.user", user); } /** * mail.smtp.auth */ public void setSmtpAuth(boolean isEnable) { setProperty("mail.smtp.auth", isEnable ? "true" : "false"); } /** * mail.smtp.host */ public void setSmtpHost(String host) { setProperty("mail.smtp.host", host); } /** * mail.smtp.ssl */ public void setSmtpSsl(boolean ssl) { setProperty("mail.smtp.ssl", String.valueOf(ssl)); } /** * mail.smtp.port */ public void setSmtpPort(int port) { setProperty("mail.smtp.port", String.valueOf(port)); } /** * mail.smtp.user */ public void setSmtpUser(String user) { setProperty("mail.smtp.user", user); } /** * mail.store.protocol */ public void setStoreProtocol(String protocol) { setProperty("mail.store.protocol", protocol); } /** * mail.transport.protocol */ public void setTransportProtocol(String protocol) { setProperty("mail.transport.protocol", protocol); } /** * mail.user */ public void setUser(String user) { _user = user; setProperty("mail.user", user); } /** * password */ public void setPassword(String password) { _password = password; } /** * Sets an attribute. */ public void setProperty(String name, String value) { _props.put(name, value); } public void setProperties(Properties props) { _props.putAll(props); } public void setValue(Properties props) { _props.putAll(props); } @Override public void initImpl() throws ConfigException { super.initImpl(); try { if (getInit() != null) getInit().configure(this); Authenticator auth = _auth; if (auth == null && _user != null && _password != null) auth = new StandardAuthenticator(_user, _password); if (auth != null) _session = Session.getInstance(_props, auth); else _session = Session.getInstance(_props); deploy(); } catch (Exception e) { throw ConfigException.create(e); } } @Override public Object replaceObject() { return _session; } static class StandardAuthenticator extends Authenticator { private final String _userName; private final PasswordAuthentication _passwordAuth; StandardAuthenticator(String userName, String password) { _userName = userName; _passwordAuth = new PasswordAuthentication(userName, password); } @Override protected PasswordAuthentication getPasswordAuthentication() { return _passwordAuth; } @Override public String toString() { return getClass().getSimpleName() + "[" + _userName + "]"; } } }
bertrama/resin
modules/resin/src/com/caucho/jca/cfg/JavaMailConfig.java
Java
gpl-2.0
5,150
/* SecureClassLoader.java --- A Secure Class Loader Copyright (C) 1999 Free Software Foundation, Inc. This file is part of GNU Classpath. GNU Classpath is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU Classpath is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU Classpath; see the file COPYING. If not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License cover the whole combination. As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so. If you do not wish to do so, delete this exception statement from your version. */ package java.security; /** A Secure Class Loader for loading classes with additional support for specifying code source and permissions when they are retrieved by the system policy handler. @since JDK 1.2 @author Mark Benvenuto */ public class SecureClassLoader extends ClassLoader { protected SecureClassLoader(ClassLoader parent) { super(parent); SecurityManager sm = System.getSecurityManager(); if(sm != null) sm.checkCreateClassLoader(); } protected SecureClassLoader() { SecurityManager sm = System.getSecurityManager(); if(sm != null) sm.checkCreateClassLoader(); } /** Creates a class using an array of bytes and a CodeSource. @param name the name to give the class. null if unknown. @param b the data representing the classfile, in classfile format. @param off the offset into the data where the classfile starts. @param len the length of the classfile data in the array. @param cs the CodeSource for the class or null when unknown. @return the class that was defined and optional CodeSource. @exception ClassFormatError if the byte array is not in proper classfile format. */ protected final Class defineClass(String name, byte[] b, int off, int len, CodeSource cs) { // FIXME: Need to cache ProtectionDomains according to 1.3 docs. if (cs != null) { ProtectionDomain protectionDomain = new ProtectionDomain(cs, getPermissions(cs)); return super.defineClass(name, b, off, len, protectionDomain); } else return super.defineClass(name, b, off, len); } /** Returns a PermissionCollection for the specified CodeSource. The default implmentation invokes java.security.Policy.getPermissions. This method is called by defineClass that takes a CodeSource arguement to build a proper ProtectionDomain for the class being defined. */ protected PermissionCollection getPermissions(CodeSource cs) { Policy policy = Policy.getPolicy(); return policy.getPermissions(cs); } }
aosm/gccfast
libjava/java/security/SecureClassLoader.java
Java
gpl-2.0
3,852
#!/usr/bin/env python '''Test that an empty document doesn't break. Press ESC to exit the test. ''' __docformat__ = 'restructuredtext' __version__ = '$Id: STYLE.py 1754 2008-02-10 13:26:52Z Alex.Holkner $' __noninteractive = True import unittest from pyglet import app from pyglet import gl from pyglet import graphics from pyglet.text import caret from pyglet.text import document from pyglet.text import layout from pyglet import window from pyglet.window import key, mouse class TestWindow(window.Window): def __init__(self, doctype, *args, **kwargs): super(TestWindow, self).__init__(*args, **kwargs) self.batch = graphics.Batch() self.document = doctype() self.layout = layout.IncrementalTextLayout(self.document, self.width, self.height, batch=self.batch) def on_draw(self): gl.glClearColor(1, 1, 1, 1) self.clear() self.batch.draw() class TestCase(unittest.TestCase): def testUnformatted(self): self.window = TestWindow(document.UnformattedDocument) self.window.dispatch_events() self.window.close() def testFormatted(self): self.window = TestWindow(document.FormattedDocument) self.window.dispatch_events() self.window.close() if __name__ == '__main__': unittest.main()
shrimpboyho/flappy-bird-py
pyglet-1.1.4/tests/text/EMPTY.py
Python
gpl-2.0
1,326
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="pt"> <head> <!-- Generated by javadoc (version 1.7.0_11) on Fri Jan 25 09:33:30 BRST 2013 --> <title>FileMappingServiceSecurity (Cyclos 3 Documentation)</title> <meta name="date" content="2013-01-25"> <link rel="stylesheet" type="text/css" href="../../../../../../../stylesheet.css" title="Style"> </head> <body> <script type="text/javascript"><!-- if (location.href.indexOf('is-external=true') == -1) { parent.document.title="FileMappingServiceSecurity (Cyclos 3 Documentation)"; } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar_top"> <!-- --> </a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../../../overview-summary.html">Overview</a></li> <li><a href="package-summary.html">Package</a></li> <li class="navBarCell1Rev">Class</li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../../../index-all.html">Index</a></li> <li><a href="../../../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingServiceLocal.html" title="interface in nl.strohalm.cyclos.services.accounts.external.filemapping"><span class="strong">Prev Class</span></a></li> <li>Next Class</li> </ul> <ul class="navList"> <li><a href="../../../../../../../index.html?nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingServiceSecurity.html" target="_top">Frames</a></li> <li><a href="FileMappingServiceSecurity.html" target="_top">No Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <div> <ul class="subNavList"> <li>Summary:&nbsp;</li> <li>Nested&nbsp;|&nbsp;</li> <li><a href="#fields_inherited_from_class_nl.strohalm.cyclos.services.BaseServiceSecurity">Field</a>&nbsp;|&nbsp;</li> <li><a href="#constructor_summary">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method_summary">Method</a></li> </ul> <ul class="subNavList"> <li>Detail:&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor_detail">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method_detail">Method</a></li> </ul> </div> <a name="skip-navbar_top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <!-- ======== START OF CLASS DATA ======== --> <div class="header"> <div class="subTitle">nl.strohalm.cyclos.services.accounts.external.filemapping</div> <h2 title="Class FileMappingServiceSecurity" class="title">Class FileMappingServiceSecurity</h2> </div> <div class="contentContainer"> <ul class="inheritance"> <li>java.lang.Object</li> <li> <ul class="inheritance"> <li><a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html" title="class in nl.strohalm.cyclos.services">nl.strohalm.cyclos.services.BaseServiceSecurity</a></li> <li> <ul class="inheritance"> <li>nl.strohalm.cyclos.services.accounts.external.filemapping.FileMappingServiceSecurity</li> </ul> </li> </ul> </li> </ul> <div class="description"> <ul class="blockList"> <li class="blockList"> <dl> <dt>All Implemented Interfaces:</dt> <dd><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingService.html" title="interface in nl.strohalm.cyclos.services.accounts.external.filemapping">FileMappingService</a>, <a href="../../../../../../../nl/strohalm/cyclos/services/Service.html" title="interface in nl.strohalm.cyclos.services">Service</a>, <a href="../../../../../../../nl/strohalm/cyclos/services/ServiceSecurity.html" title="interface in nl.strohalm.cyclos.services">ServiceSecurity</a></dd> </dl> <hr> <br> <pre>public class <span class="strong">FileMappingServiceSecurity</span> extends <a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html" title="class in nl.strohalm.cyclos.services">BaseServiceSecurity</a> implements <a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingService.html" title="interface in nl.strohalm.cyclos.services.accounts.external.filemapping">FileMappingService</a></pre> <div class="block">Security implementation for <a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingService.html" title="interface in nl.strohalm.cyclos.services.accounts.external.filemapping"><code>FileMappingService</code></a></div> <dl><dt><span class="strong">Author:</span></dt> <dd>jcomas</dd></dl> </li> </ul> </div> <div class="summary"> <ul class="blockList"> <li class="blockList"> <!-- =========== FIELD SUMMARY =========== --> <ul class="blockList"> <li class="blockList"><a name="field_summary"> <!-- --> </a> <h3>Field Summary</h3> <ul class="blockList"> <li class="blockList"><a name="fields_inherited_from_class_nl.strohalm.cyclos.services.BaseServiceSecurity"> <!-- --> </a> <h3>Fields inherited from class&nbsp;nl.strohalm.cyclos.services.<a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html" title="class in nl.strohalm.cyclos.services">BaseServiceSecurity</a></h3> <code><a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html#fetchService">fetchService</a>, <a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html#permissionService">permissionService</a></code></li> </ul> </li> </ul> <!-- ======== CONSTRUCTOR SUMMARY ======== --> <ul class="blockList"> <li class="blockList"><a name="constructor_summary"> <!-- --> </a> <h3>Constructor Summary</h3> <table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation"> <caption><span>Constructors</span><span class="tabEnd">&nbsp;</span></caption> <tr> <th class="colOne" scope="col">Constructor and Description</th> </tr> <tr class="altColor"> <td class="colOne"><code><strong><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingServiceSecurity.html#FileMappingServiceSecurity()">FileMappingServiceSecurity</a></strong>()</code>&nbsp;</td> </tr> </table> </li> </ul> <!-- ========== METHOD SUMMARY =========== --> <ul class="blockList"> <li class="blockList"><a name="method_summary"> <!-- --> </a> <h3>Method Summary</h3> <table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation"> <caption><span>Methods</span><span class="tabEnd">&nbsp;</span></caption> <tr> <th class="colFirst" scope="col">Modifier and Type</th> <th class="colLast" scope="col">Method and Description</th> </tr> <tr class="altColor"> <td class="colFirst"><code><a href="../../../../../../../nl/strohalm/cyclos/entities/accounts/external/filemapping/FileMapping.html" title="class in nl.strohalm.cyclos.entities.accounts.external.filemapping">FileMapping</a></code></td> <td class="colLast"><code><strong><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingServiceSecurity.html#load(java.lang.Long, nl.strohalm.cyclos.entities.Relationship...)">load</a></strong>(java.lang.Long&nbsp;id, <a href="../../../../../../../nl/strohalm/cyclos/entities/Relationship.html" title="interface in nl.strohalm.cyclos.entities">Relationship</a>...&nbsp;fetch)</code> <div class="block">Loads a file mapping by id</div> </td> </tr> <tr class="rowColor"> <td class="colFirst"><code>void</code></td> <td class="colLast"><code><strong><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingServiceSecurity.html#reset(nl.strohalm.cyclos.entities.accounts.external.filemapping.FileMapping)">reset</a></strong>(<a href="../../../../../../../nl/strohalm/cyclos/entities/accounts/external/filemapping/FileMapping.html" title="class in nl.strohalm.cyclos.entities.accounts.external.filemapping">FileMapping</a>&nbsp;fileMapping)</code> <div class="block">Reset the file mapping reference on the external account, removing it from the database</div> </td> </tr> <tr class="altColor"> <td class="colFirst"><code><a href="../../../../../../../nl/strohalm/cyclos/entities/accounts/external/filemapping/FileMapping.html" title="class in nl.strohalm.cyclos.entities.accounts.external.filemapping">FileMapping</a></code></td> <td class="colLast"><code><strong><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingServiceSecurity.html#save(nl.strohalm.cyclos.entities.accounts.external.filemapping.FileMapping)">save</a></strong>(<a href="../../../../../../../nl/strohalm/cyclos/entities/accounts/external/filemapping/FileMapping.html" title="class in nl.strohalm.cyclos.entities.accounts.external.filemapping">FileMapping</a>&nbsp;fileMapping)</code> <div class="block">Saves the file mapping</div> </td> </tr> <tr class="rowColor"> <td class="colFirst"><code>void</code></td> <td class="colLast"><code><strong><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingServiceSecurity.html#setFileMappingServiceLocal(nl.strohalm.cyclos.services.accounts.external.filemapping.FileMappingServiceLocal)">setFileMappingServiceLocal</a></strong>(<a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingServiceLocal.html" title="interface in nl.strohalm.cyclos.services.accounts.external.filemapping">FileMappingServiceLocal</a>&nbsp;fileMappingService)</code>&nbsp;</td> </tr> <tr class="altColor"> <td class="colFirst"><code>void</code></td> <td class="colLast"><code><strong><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingServiceSecurity.html#validate(nl.strohalm.cyclos.entities.accounts.external.filemapping.FileMapping)">validate</a></strong>(<a href="../../../../../../../nl/strohalm/cyclos/entities/accounts/external/filemapping/FileMapping.html" title="class in nl.strohalm.cyclos.entities.accounts.external.filemapping">FileMapping</a>&nbsp;fileMapping)</code> <div class="block">Validate the specified file mapping</div> </td> </tr> </table> <ul class="blockList"> <li class="blockList"><a name="methods_inherited_from_class_nl.strohalm.cyclos.services.BaseServiceSecurity"> <!-- --> </a> <h3>Methods inherited from class&nbsp;nl.strohalm.cyclos.services.<a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html" title="class in nl.strohalm.cyclos.services">BaseServiceSecurity</a></h3> <code><a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html#addToFetch(nl.strohalm.cyclos.entities.Relationship[], nl.strohalm.cyclos.entities.Relationship...)">addToFetch</a>, <a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html#checkHasUser()">checkHasUser</a>, <a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html#checkIsSystem()">checkIsSystem</a>, <a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html#getPermissionServiceLocal()">getPermissionServiceLocal</a>, <a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html#getSession()">getSession</a>, <a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html#getSessionFactory()">getSessionFactory</a>, <a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html#hasPermission(nl.strohalm.cyclos.access.Permission...)">hasPermission</a>, <a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html#hasPermissionFor(nl.strohalm.cyclos.access.Permission, nl.strohalm.cyclos.entities.Entity...)">hasPermissionFor</a>, <a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html#load(java.lang.Class, java.lang.Long)">load</a>, <a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html#setFetchServiceLocal(nl.strohalm.cyclos.services.fetch.FetchServiceLocal)">setFetchServiceLocal</a>, <a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html#setPermissionServiceLocal(nl.strohalm.cyclos.services.permissions.PermissionServiceLocal)">setPermissionServiceLocal</a>, <a href="../../../../../../../nl/strohalm/cyclos/services/BaseServiceSecurity.html#setSessionFactory(org.hibernate.SessionFactory)">setSessionFactory</a></code></li> </ul> <ul class="blockList"> <li class="blockList"><a name="methods_inherited_from_class_java.lang.Object"> <!-- --> </a> <h3>Methods inherited from class&nbsp;java.lang.Object</h3> <code>clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait</code></li> </ul> </li> </ul> </li> </ul> </div> <div class="details"> <ul class="blockList"> <li class="blockList"> <!-- ========= CONSTRUCTOR DETAIL ======== --> <ul class="blockList"> <li class="blockList"><a name="constructor_detail"> <!-- --> </a> <h3>Constructor Detail</h3> <a name="FileMappingServiceSecurity()"> <!-- --> </a> <ul class="blockListLast"> <li class="blockList"> <h4>FileMappingServiceSecurity</h4> <pre>public&nbsp;FileMappingServiceSecurity()</pre> </li> </ul> </li> </ul> <!-- ============ METHOD DETAIL ========== --> <ul class="blockList"> <li class="blockList"><a name="method_detail"> <!-- --> </a> <h3>Method Detail</h3> <a name="load(java.lang.Long, nl.strohalm.cyclos.entities.Relationship...)"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>load</h4> <pre>public&nbsp;<a href="../../../../../../../nl/strohalm/cyclos/entities/accounts/external/filemapping/FileMapping.html" title="class in nl.strohalm.cyclos.entities.accounts.external.filemapping">FileMapping</a>&nbsp;load(java.lang.Long&nbsp;id, <a href="../../../../../../../nl/strohalm/cyclos/entities/Relationship.html" title="interface in nl.strohalm.cyclos.entities">Relationship</a>...&nbsp;fetch)</pre> <div class="block"><strong>Description copied from interface:&nbsp;<code><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingService.html#load(java.lang.Long, nl.strohalm.cyclos.entities.Relationship...)">FileMappingService</a></code></strong></div> <div class="block">Loads a file mapping by id</div> <dl> <dt><strong>Specified by:</strong></dt> <dd><code><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingService.html#load(java.lang.Long, nl.strohalm.cyclos.entities.Relationship...)">load</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingService.html" title="interface in nl.strohalm.cyclos.services.accounts.external.filemapping">FileMappingService</a></code></dd> </dl> </li> </ul> <a name="reset(nl.strohalm.cyclos.entities.accounts.external.filemapping.FileMapping)"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>reset</h4> <pre>public&nbsp;void&nbsp;reset(<a href="../../../../../../../nl/strohalm/cyclos/entities/accounts/external/filemapping/FileMapping.html" title="class in nl.strohalm.cyclos.entities.accounts.external.filemapping">FileMapping</a>&nbsp;fileMapping)</pre> <div class="block"><strong>Description copied from interface:&nbsp;<code><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingService.html#reset(nl.strohalm.cyclos.entities.accounts.external.filemapping.FileMapping)">FileMappingService</a></code></strong></div> <div class="block">Reset the file mapping reference on the external account, removing it from the database</div> <dl> <dt><strong>Specified by:</strong></dt> <dd><code><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingService.html#reset(nl.strohalm.cyclos.entities.accounts.external.filemapping.FileMapping)">reset</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingService.html" title="interface in nl.strohalm.cyclos.services.accounts.external.filemapping">FileMappingService</a></code></dd> </dl> </li> </ul> <a name="save(nl.strohalm.cyclos.entities.accounts.external.filemapping.FileMapping)"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>save</h4> <pre>public&nbsp;<a href="../../../../../../../nl/strohalm/cyclos/entities/accounts/external/filemapping/FileMapping.html" title="class in nl.strohalm.cyclos.entities.accounts.external.filemapping">FileMapping</a>&nbsp;save(<a href="../../../../../../../nl/strohalm/cyclos/entities/accounts/external/filemapping/FileMapping.html" title="class in nl.strohalm.cyclos.entities.accounts.external.filemapping">FileMapping</a>&nbsp;fileMapping)</pre> <div class="block"><strong>Description copied from interface:&nbsp;<code><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingService.html#save(nl.strohalm.cyclos.entities.accounts.external.filemapping.FileMapping)">FileMappingService</a></code></strong></div> <div class="block">Saves the file mapping</div> <dl> <dt><strong>Specified by:</strong></dt> <dd><code><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingService.html#save(nl.strohalm.cyclos.entities.accounts.external.filemapping.FileMapping)">save</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingService.html" title="interface in nl.strohalm.cyclos.services.accounts.external.filemapping">FileMappingService</a></code></dd> </dl> </li> </ul> <a name="setFileMappingServiceLocal(nl.strohalm.cyclos.services.accounts.external.filemapping.FileMappingServiceLocal)"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>setFileMappingServiceLocal</h4> <pre>public&nbsp;void&nbsp;setFileMappingServiceLocal(<a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingServiceLocal.html" title="interface in nl.strohalm.cyclos.services.accounts.external.filemapping">FileMappingServiceLocal</a>&nbsp;fileMappingService)</pre> </li> </ul> <a name="validate(nl.strohalm.cyclos.entities.accounts.external.filemapping.FileMapping)"> <!-- --> </a> <ul class="blockListLast"> <li class="blockList"> <h4>validate</h4> <pre>public&nbsp;void&nbsp;validate(<a href="../../../../../../../nl/strohalm/cyclos/entities/accounts/external/filemapping/FileMapping.html" title="class in nl.strohalm.cyclos.entities.accounts.external.filemapping">FileMapping</a>&nbsp;fileMapping)</pre> <div class="block"><strong>Description copied from interface:&nbsp;<code><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingService.html#validate(nl.strohalm.cyclos.entities.accounts.external.filemapping.FileMapping)">FileMappingService</a></code></strong></div> <div class="block">Validate the specified file mapping</div> <dl> <dt><strong>Specified by:</strong></dt> <dd><code><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingService.html#validate(nl.strohalm.cyclos.entities.accounts.external.filemapping.FileMapping)">validate</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingService.html" title="interface in nl.strohalm.cyclos.services.accounts.external.filemapping">FileMappingService</a></code></dd> </dl> </li> </ul> </li> </ul> </li> </ul> </div> </div> <!-- ========= END OF CLASS DATA ========= --> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar_bottom"> <!-- --> </a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../../../overview-summary.html">Overview</a></li> <li><a href="package-summary.html">Package</a></li> <li class="navBarCell1Rev">Class</li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../../../index-all.html">Index</a></li> <li><a href="../../../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li><a href="../../../../../../../nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingServiceLocal.html" title="interface in nl.strohalm.cyclos.services.accounts.external.filemapping"><span class="strong">Prev Class</span></a></li> <li>Next Class</li> </ul> <ul class="navList"> <li><a href="../../../../../../../index.html?nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingServiceSecurity.html" target="_top">Frames</a></li> <li><a href="FileMappingServiceSecurity.html" target="_top">No Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <div> <ul class="subNavList"> <li>Summary:&nbsp;</li> <li>Nested&nbsp;|&nbsp;</li> <li><a href="#fields_inherited_from_class_nl.strohalm.cyclos.services.BaseServiceSecurity">Field</a>&nbsp;|&nbsp;</li> <li><a href="#constructor_summary">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method_summary">Method</a></li> </ul> <ul class="subNavList"> <li>Detail:&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor_detail">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method_detail">Method</a></li> </ul> </div> <a name="skip-navbar_bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> </body> </html>
korczis/cyclos
docs/api/nl/strohalm/cyclos/services/accounts/external/filemapping/FileMappingServiceSecurity.html
HTML
gpl-2.0
22,591
/* Copyright (c) 2009-2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/cpufreq.h> #include <linux/cpu.h> #include <linux/regulator/consumer.h> #include <asm/cpu.h> #include <mach/board.h> #include <mach/msm_iomap.h> #include <mach/msm_bus.h> #include <mach/msm_bus_board.h> #include <mach/socinfo.h> #include <mach/rpm-regulator.h> #include "acpuclock.h" #include "avs.h" #ifdef CONFIG_SEC_DEBUG_DCVS_LOG #include <mach/sec_debug.h> #endif /* Frequency switch modes. */ #define SHOT_SWITCH 4 #define HOP_SWITCH 5 #define SIMPLE_SLEW 6 #define COMPLEX_SLEW 7 /* PLL calibration limits. * The PLL hardware is capable of 384MHz to 1536MHz. The L_VALs * used for calibration should respect these limits. */ #define L_VAL_SCPLL_CAL_MIN 0x08 /* = 432 MHz with 27MHz source */ #if !defined(CONFIG_MSM_FORCE_MAX_CPU_TABLE) #define L_VAL_SCPLL_CAL_MAX 0x1C /* = 1512 MHz with 27MHz source */ #else #define L_VAL_SCPLL_CAL_MAX 0x21 #endif #if !defined(CONFIG_MSM_FORCE_MAX_CPU_TABLE) #define MAX_VDD_SC 1250000 /* uV */ #define MAX_VDD_MEM 1250000 /* uV */ #else #define MAX_VDD_SC 1350000 /* uV */ #define MAX_VDD_MEM 1350000 /* uV */ #endif #define MAX_VDD_DIG 1200000 /* uV */ #define MAX_AXI 310500 /* KHz */ #define SCPLL_LOW_VDD_FMAX 594000 /* KHz */ #define SCPLL_LOW_VDD 1000000 /* uV */ #define SCPLL_NOMINAL_VDD 1100000 /* uV */ /* SCPLL Modes. */ #define SCPLL_POWER_DOWN 0 #define SCPLL_BYPASS 1 #define SCPLL_STANDBY 2 #define SCPLL_FULL_CAL 4 #define SCPLL_HALF_CAL 5 #define SCPLL_STEP_CAL 6 #define SCPLL_NORMAL 7 #define SCPLL_DEBUG_NONE 0 #define SCPLL_DEBUG_FULL 3 /* SCPLL registers offsets. */ #define SCPLL_DEBUG_OFFSET 0x0 #define SCPLL_CTL_OFFSET 0x4 #define SCPLL_CAL_OFFSET 0x8 #define SCPLL_STATUS_OFFSET 0x10 #define SCPLL_CFG_OFFSET 0x1C #define SCPLL_FSM_CTL_EXT_OFFSET 0x24 #define SCPLL_LUT_A_HW_MAX (0x38 + ((L_VAL_SCPLL_CAL_MAX / 4) * 4)) /* Clock registers. */ #define SPSS0_CLK_CTL_ADDR (MSM_ACC0_BASE + 0x04) #define SPSS0_CLK_SEL_ADDR (MSM_ACC0_BASE + 0x08) #define SPSS1_CLK_CTL_ADDR (MSM_ACC1_BASE + 0x04) #define SPSS1_CLK_SEL_ADDR (MSM_ACC1_BASE + 0x08) #define SPSS_L2_CLK_SEL_ADDR (MSM_GCC_BASE + 0x38) /* PTE EFUSE register. */ #define QFPROM_PTE_EFUSE_ADDR (MSM_QFPROM_BASE + 0x00C0) static const void * const clk_ctl_addr[] = {SPSS0_CLK_CTL_ADDR, SPSS1_CLK_CTL_ADDR}; static const void * const clk_sel_addr[] = {SPSS0_CLK_SEL_ADDR, SPSS1_CLK_SEL_ADDR, SPSS_L2_CLK_SEL_ADDR}; static const int rpm_vreg_voter[] = { RPM_VREG_VOTER1, RPM_VREG_VOTER2 }; static struct regulator *regulator_sc[NR_CPUS]; enum scplls { CPU0 = 0, CPU1, L2, }; static const void * const sc_pll_base[] = { [CPU0] = MSM_SCPLL_BASE + 0x200, [CPU1] = MSM_SCPLL_BASE + 0x300, [L2] = MSM_SCPLL_BASE + 0x400, }; enum sc_src { ACPU_AFAB, ACPU_PLL_8, ACPU_SCPLL, }; static struct clock_state { struct clkctl_acpu_speed *current_speed[NR_CPUS]; struct clkctl_l2_speed *current_l2_speed; spinlock_t l2_lock; struct mutex lock; } drv_state; struct clkctl_l2_speed { unsigned int khz; unsigned int src_sel; unsigned int l_val; unsigned int vdd_dig; unsigned int vdd_mem; unsigned int bw_level; }; static struct clkctl_l2_speed *l2_vote[NR_CPUS]; struct clkctl_acpu_speed { unsigned int use_for_scaling[2]; /* One for each CPU. */ unsigned int acpuclk_khz; int pll; unsigned int acpuclk_src_sel; unsigned int acpuclk_src_div; unsigned int core_src_sel; unsigned int l_val; struct clkctl_l2_speed *l2_level; unsigned int vdd_sc; unsigned int avsdscr_setting; }; /* Instantaneous bandwidth requests in MB/s. */ #define BW_MBPS(_bw) \ { \ .vectors = &(struct msm_bus_vectors){ \ .src = MSM_BUS_MASTER_AMPSS_M0, \ .dst = MSM_BUS_SLAVE_EBI_CH0, \ .ib = (_bw) * 1000000ULL, \ .ab = 0, \ }, \ .num_paths = 1, \ } static struct msm_bus_paths bw_level_tbl[] = { [0] = BW_MBPS(824), /* At least 103 MHz on bus. */ [1] = BW_MBPS(1336), /* At least 167 MHz on bus. */ [2] = BW_MBPS(2008), /* At least 251 MHz on bus. */ [3] = BW_MBPS(2480), /* At least 310 MHz on bus. */ }; static struct msm_bus_scale_pdata bus_client_pdata = { .usecase = bw_level_tbl, .num_usecases = ARRAY_SIZE(bw_level_tbl), .active_only = 1, .name = "acpuclock", }; static uint32_t bus_perf_client; /* L2 frequencies = 2 * 27 MHz * L_VAL */ static struct clkctl_l2_speed l2_freq_tbl_v2[] = { [0] = { MAX_AXI, 0, 0, 1000000, 1100000, 0}, [1] = { 432000, 1, 0x08, 1000000, 1100000, 0}, [2] = { 486000, 1, 0x09, 1000000, 1100000, 0}, [3] = { 540000, 1, 0x0A, 1000000, 1100000, 0}, [4] = { 594000, 1, 0x0B, 1000000, 1100000, 0}, [5] = { 648000, 1, 0x0C, 1000000, 1100000, 1}, [6] = { 702000, 1, 0x0D, 1100000, 1100000, 1}, [7] = { 756000, 1, 0x0E, 1100000, 1100000, 1}, [8] = { 810000, 1, 0x0F, 1100000, 1100000, 1}, [9] = { 864000, 1, 0x10, 1100000, 1100000, 1}, [10] = { 918000, 1, 0x11, 1100000, 1100000, 2}, [11] = { 972000, 1, 0x12, 1100000, 1100000, 2}, [12] = {1026000, 1, 0x13, 1100000, 1100000, 2}, [13] = {1080000, 1, 0x14, 1100000, 1200000, 2}, [14] = {1134000, 1, 0x15, 1100000, 1200000, 2}, [15] = {1188000, 1, 0x16, 1200000, 1200000, 3}, [16] = {1242000, 1, 0x17, 1200000, 1212500, 3}, [17] = {1296000, 1, 0x18, 1200000, 1225000, 3}, [18] = {1350000, 1, 0x19, 1200000, 1225000, 3}, [19] = {1404000, 1, 0x1A, 1200000, 1250000, 3}, #if defined(CONFIG_MSM_FORCE_MAX_CPU_TABLE) [20] = {1458000, 1, 0x1B, 1200000, 1250000, 3}, [21] = {1512000, 1, 0x1C, 1200000, 1275000, 3}, #endif }; #define L2(x) (&l2_freq_tbl_v2[(x)]) /* SCPLL frequencies = 2 * 27 MHz * L_VAL */ static struct clkctl_acpu_speed acpu_freq_tbl_1188mhz[] = { { {1, 1}, 192000, ACPU_PLL_8, 3, 1, 0, 0, L2(1), 812500, 0x03006000}, /* MAX_AXI row is used to source CPU cores and L2 from the AFAB clock. */ { {0, 0}, MAX_AXI, ACPU_AFAB, 1, 0, 0, 0, L2(0), 875000, 0x03006000}, { {1, 1}, 384000, ACPU_PLL_8, 3, 0, 0, 0, L2(1), 875000, 0x03006000}, { {1, 1}, 432000, ACPU_SCPLL, 0, 0, 1, 0x08, L2(1), 887500, 0x03006000}, { {1, 1}, 486000, ACPU_SCPLL, 0, 0, 1, 0x09, L2(2), 912500, 0x03006000}, { {1, 1}, 540000, ACPU_SCPLL, 0, 0, 1, 0x0A, L2(3), 925000, 0x03006000}, { {1, 1}, 594000, ACPU_SCPLL, 0, 0, 1, 0x0B, L2(4), 937500, 0x03006000}, { {1, 1}, 648000, ACPU_SCPLL, 0, 0, 1, 0x0C, L2(5), 950000, 0x03006000}, { {1, 1}, 702000, ACPU_SCPLL, 0, 0, 1, 0x0D, L2(6), 975000, 0x03006000}, { {1, 1}, 756000, ACPU_SCPLL, 0, 0, 1, 0x0E, L2(7), 1000000, 0x03006000}, { {1, 1}, 810000, ACPU_SCPLL, 0, 0, 1, 0x0F, L2(8), 1012500, 0x03006000}, { {1, 1}, 864000, ACPU_SCPLL, 0, 0, 1, 0x10, L2(9), 1037500, 0x03006000}, { {1, 1}, 918000, ACPU_SCPLL, 0, 0, 1, 0x11, L2(10), 1062500, 0x03006000}, { {1, 1}, 972000, ACPU_SCPLL, 0, 0, 1, 0x12, L2(11), 1087500, 0x03006000}, { {1, 1}, 1026000, ACPU_SCPLL, 0, 0, 1, 0x13, L2(12), 1125000, 0x03006000}, { {1, 1}, 1080000, ACPU_SCPLL, 0, 0, 1, 0x14, L2(13), 1137500, 0x03006000}, { {1, 1}, 1134000, ACPU_SCPLL, 0, 0, 1, 0x15, L2(14), 1162500, 0x03006000}, { {1, 1}, 1188000, ACPU_SCPLL, 0, 0, 1, 0x16, L2(15), 1187500, 0x03006000}, { {0, 0}, 0 }, }; /* SCPLL frequencies = 2 * 27 MHz * L_VAL */ static struct clkctl_acpu_speed acpu_freq_tbl_slow[] = { { {1, 1}, 192000, ACPU_PLL_8, 3, 1, 0, 0, L2(1), 800000, 0x03006000}, /* MAX_AXI row is used to source CPU cores and L2 from the AFAB clock. */ { {0, 0}, MAX_AXI, ACPU_AFAB, 1, 0, 0, 0, L2(0), 825000, 0x03006000}, { {1, 1}, 384000, ACPU_PLL_8, 3, 0, 0, 0, L2(1), 825000, 0x03006000}, { {1, 1}, 432000, ACPU_SCPLL, 0, 0, 1, 0x08, L2(1), 850000, 0x03006000}, { {1, 1}, 486000, ACPU_SCPLL, 0, 0, 1, 0x09, L2(2), 850000, 0x03006000}, { {1, 1}, 540000, ACPU_SCPLL, 0, 0, 1, 0x0A, L2(3), 875000, 0x03006000}, { {1, 1}, 594000, ACPU_SCPLL, 0, 0, 1, 0x0B, L2(4), 875000, 0x03006000}, { {1, 1}, 648000, ACPU_SCPLL, 0, 0, 1, 0x0C, L2(5), 900000, 0x03006000}, { {1, 1}, 702000, ACPU_SCPLL, 0, 0, 1, 0x0D, L2(6), 900000, 0x03006000}, { {1, 1}, 756000, ACPU_SCPLL, 0, 0, 1, 0x0E, L2(7), 925000, 0x03006000}, { {1, 1}, 810000, ACPU_SCPLL, 0, 0, 1, 0x0F, L2(8), 975000, 0x03006000}, { {1, 1}, 864000, ACPU_SCPLL, 0, 0, 1, 0x10, L2(9), 975000, 0x03006000}, { {1, 1}, 918000, ACPU_SCPLL, 0, 0, 1, 0x11, L2(10), 1000000, 0x03006000}, { {1, 1}, 972000, ACPU_SCPLL, 0, 0, 1, 0x12, L2(11), 1025000, 0x03006000}, { {1, 1}, 1026000, ACPU_SCPLL, 0, 0, 1, 0x13, L2(12), 1025000, 0x03006000}, { {1, 1}, 1080000, ACPU_SCPLL, 0, 0, 1, 0x14, L2(13), 1050000, 0x03006000}, { {1, 1}, 1134000, ACPU_SCPLL, 0, 0, 1, 0x15, L2(14), 1075000, 0x03006000}, { {1, 1}, 1188000, ACPU_SCPLL, 0, 0, 1, 0x16, L2(15), 1100000, 0x03006000}, { {1, 1}, 1242000, ACPU_SCPLL, 0, 0, 1, 0x17, L2(16), 1125000, 0x03006000}, { {1, 1}, 1296000, ACPU_SCPLL, 0, 0, 1, 0x18, L2(17), 1150000, 0x03006000}, { {1, 1}, 1350000, ACPU_SCPLL, 0, 0, 1, 0x19, L2(18), 1150000, 0x03006000}, { {1, 1}, 1404000, ACPU_SCPLL, 0, 0, 1, 0x1A, L2(19), 1175000, 0x03006000}, { {1, 1}, 1458000, ACPU_SCPLL, 0, 0, 1, 0x1B, L2(19), 1200000, 0x03006000}, { {1, 1}, 1512000, ACPU_SCPLL, 0, 0, 1, 0x1C, L2(19), 1225000, 0x03006000}, { {0, 0}, 0 }, }; /* SCPLL frequencies = 2 * 27 MHz * L_VAL */ static struct clkctl_acpu_speed acpu_freq_tbl_nom[] = { { {1, 1}, 192000, ACPU_PLL_8, 3, 1, 0, 0, L2(1), 800000, 0x03006000}, /* MAX_AXI row is used to source CPU cores and L2 from the AFAB clock. */ { {0, 0}, MAX_AXI, ACPU_AFAB, 1, 0, 0, 0, L2(0), 825000, 0x03006000}, { {1, 1}, 384000, ACPU_PLL_8, 3, 0, 0, 0, L2(1), 825000, 0x03006000}, { {1, 1}, 432000, ACPU_SCPLL, 0, 0, 1, 0x08, L2(1), 850000, 0x03006000}, { {1, 1}, 486000, ACPU_SCPLL, 0, 0, 1, 0x09, L2(2), 850000, 0x03006000}, { {1, 1}, 540000, ACPU_SCPLL, 0, 0, 1, 0x0A, L2(3), 875000, 0x03006000}, { {1, 1}, 594000, ACPU_SCPLL, 0, 0, 1, 0x0B, L2(4), 875000, 0x03006000}, { {1, 1}, 648000, ACPU_SCPLL, 0, 0, 1, 0x0C, L2(5), 900000, 0x03006000}, { {1, 1}, 702000, ACPU_SCPLL, 0, 0, 1, 0x0D, L2(6), 900000, 0x03006000}, { {1, 1}, 756000, ACPU_SCPLL, 0, 0, 1, 0x0E, L2(7), 925000, 0x03006000}, { {1, 1}, 810000, ACPU_SCPLL, 0, 0, 1, 0x0F, L2(8), 950000, 0x03006000}, { {1, 1}, 864000, ACPU_SCPLL, 0, 0, 1, 0x10, L2(9), 975000, 0x03006000}, { {1, 1}, 918000, ACPU_SCPLL, 0, 0, 1, 0x11, L2(10), 975000, 0x03006000}, { {1, 1}, 972000, ACPU_SCPLL, 0, 0, 1, 0x12, L2(11), 1000000, 0x03006000}, { {1, 1}, 1026000, ACPU_SCPLL, 0, 0, 1, 0x13, L2(12), 1000000, 0x03006000}, { {1, 1}, 1080000, ACPU_SCPLL, 0, 0, 1, 0x14, L2(13), 1025000, 0x03006000}, { {1, 1}, 1134000, ACPU_SCPLL, 0, 0, 1, 0x15, L2(14), 1025000, 0x03006000}, { {1, 1}, 1188000, ACPU_SCPLL, 0, 0, 1, 0x16, L2(15), 1050000, 0x03006000}, { {1, 1}, 1242000, ACPU_SCPLL, 0, 0, 1, 0x17, L2(16), 1075000, 0x03006000}, { {1, 1}, 1296000, ACPU_SCPLL, 0, 0, 1, 0x18, L2(17), 1100000, 0x03006000}, { {1, 1}, 1350000, ACPU_SCPLL, 0, 0, 1, 0x19, L2(18), 1125000, 0x03006000}, { {1, 1}, 1404000, ACPU_SCPLL, 0, 0, 1, 0x1A, L2(19), 1150000, 0x03006000}, { {1, 1}, 1458000, ACPU_SCPLL, 0, 0, 1, 0x1B, L2(19), 1150000, 0x03006000}, { {1, 1}, 1512000, ACPU_SCPLL, 0, 0, 1, 0x1C, L2(19), 1175000, 0x03006000}, { {0, 0}, 0 }, }; /* SCPLL frequencies = 2 * 27 MHz * L_VAL */ static struct clkctl_acpu_speed acpu_freq_tbl_fast[] = { { {1, 1}, 192000, ACPU_PLL_8, 3, 1, 0, 0, L2(1), 800000, 0x03006000}, /* MAX_AXI row is used to source CPU cores and L2 from the AFAB clock. */ { {0, 0}, MAX_AXI, ACPU_AFAB, 1, 0, 0, 0, L2(0), 825000, 0x03006000}, { {1, 1}, 384000, ACPU_PLL_8, 3, 0, 0, 0, L2(1), 825000, 0x03006000}, { {1, 1}, 432000, ACPU_SCPLL, 0, 0, 1, 0x08, L2(1), 850000, 0x03006000}, { {1, 1}, 486000, ACPU_SCPLL, 0, 0, 1, 0x09, L2(2), 850000, 0x03006000}, { {1, 1}, 540000, ACPU_SCPLL, 0, 0, 1, 0x0A, L2(3), 875000, 0x03006000}, { {1, 1}, 594000, ACPU_SCPLL, 0, 0, 1, 0x0B, L2(4), 875000, 0x03006000}, { {1, 1}, 648000, ACPU_SCPLL, 0, 0, 1, 0x0C, L2(5), 900000, 0x03006000}, { {1, 1}, 702000, ACPU_SCPLL, 0, 0, 1, 0x0D, L2(6), 900000, 0x03006000}, { {1, 1}, 756000, ACPU_SCPLL, 0, 0, 1, 0x0E, L2(7), 925000, 0x03006000}, { {1, 1}, 810000, ACPU_SCPLL, 0, 0, 1, 0x0F, L2(8), 925000, 0x03006000}, { {1, 1}, 864000, ACPU_SCPLL, 0, 0, 1, 0x10, L2(9), 950000, 0x03006000}, { {1, 1}, 918000, ACPU_SCPLL, 0, 0, 1, 0x11, L2(10), 950000, 0x03006000}, { {1, 1}, 972000, ACPU_SCPLL, 0, 0, 1, 0x12, L2(11), 950000, 0x03006000}, { {1, 1}, 1026000, ACPU_SCPLL, 0, 0, 1, 0x13, L2(12), 975000, 0x03006000}, { {1, 1}, 1080000, ACPU_SCPLL, 0, 0, 1, 0x14, L2(13), 1000000, 0x03006000}, { {1, 1}, 1134000, ACPU_SCPLL, 0, 0, 1, 0x15, L2(14), 1000000, 0x03006000}, { {1, 1}, 1188000, ACPU_SCPLL, 0, 0, 1, 0x16, L2(15), 1025000, 0x03006000}, { {1, 1}, 1242000, ACPU_SCPLL, 0, 0, 1, 0x17, L2(16), 1050000, 0x03006000}, { {1, 1}, 1296000, ACPU_SCPLL, 0, 0, 1, 0x18, L2(17), 1075000, 0x03006000}, { {1, 1}, 1350000, ACPU_SCPLL, 0, 0, 1, 0x19, L2(18), 1100000, 0x03006000}, { {1, 1}, 1404000, ACPU_SCPLL, 0, 0, 1, 0x1A, L2(19), 1100000, 0x03006000}, { {1, 1}, 1458000, ACPU_SCPLL, 0, 0, 1, 0x1B, L2(19), 1100000, 0x03006000}, { {1, 1}, 1512000, ACPU_SCPLL, 0, 0, 1, 0x1C, L2(19), 1125000, 0x03006000}, { {0, 0}, 0 }, }; #if defined(CONFIG_MSM_FORCE_MAX_CPU_TABLE) /* SCPLL frequencies = 2 * 27 MHz * L_VAL */ static struct clkctl_acpu_speed acpu_freq_tbl_max[] = { { {1, 1}, 192000, ACPU_PLL_8, 3, 1, 0, 0, L2(1), 812500, 0x03006000}, /* MAX_AXI row is used to source CPU cores and L2 from the AFAB clock. */ { {0, 0}, MAX_AXI, ACPU_AFAB, 1, 0, 0, 0, L2(0), 875000, 0x03006000}, { {1, 1}, 384000, ACPU_PLL_8, 3, 0, 0, 0, L2(1), 875000, 0x03006000}, { {1, 1}, 432000, ACPU_SCPLL, 0, 0, 1, 0x08, L2(1), 887500, 0x03006000}, { {1, 1}, 486000, ACPU_SCPLL, 0, 0, 1, 0x09, L2(2), 912500, 0x03006000}, { {1, 1}, 540000, ACPU_SCPLL, 0, 0, 1, 0x0A, L2(3), 925000, 0x03006000}, { {1, 1}, 594000, ACPU_SCPLL, 0, 0, 1, 0x0B, L2(4), 937500, 0x03006000}, { {1, 1}, 648000, ACPU_SCPLL, 0, 0, 1, 0x0C, L2(5), 950000, 0x03006000}, { {1, 1}, 702000, ACPU_SCPLL, 0, 0, 1, 0x0D, L2(6), 975000, 0x03006000}, { {1, 1}, 756000, ACPU_SCPLL, 0, 0, 1, 0x0E, L2(7), 1000000, 0x03006000}, { {1, 1}, 810000, ACPU_SCPLL, 0, 0, 1, 0x0F, L2(8), 1012500, 0x03006000}, { {1, 1}, 864000, ACPU_SCPLL, 0, 0, 1, 0x10, L2(9), 1037500, 0x03006000}, { {1, 1}, 918000, ACPU_SCPLL, 0, 0, 1, 0x11, L2(10), 1037500, 0x03006000}, { {1, 1}, 972000, ACPU_SCPLL, 0, 0, 1, 0x12, L2(11), 1037500, 0x03006000}, { {1, 1}, 1026000, ACPU_SCPLL, 0, 0, 1, 0x13, L2(12), 1050000, 0x03006000}, { {1, 1}, 1080000, ACPU_SCPLL, 0, 0, 1, 0x14, L2(13), 1062500, 0x03006000}, { {1, 1}, 1134000, ACPU_SCPLL, 0, 0, 1, 0x15, L2(14), 1075000, 0x03006000}, { {1, 1}, 1188000, ACPU_SCPLL, 0, 0, 1, 0x16, L2(15), 1087500, 0x03006000}, { {1, 1}, 1242000, ACPU_SCPLL, 0, 0, 1, 0x17, L2(16), 1100000, 0x03006000}, { {1, 1}, 1296000, ACPU_SCPLL, 0, 0, 1, 0x18, L2(17), 1125000, 0x03006000}, { {1, 1}, 1350000, ACPU_SCPLL, 0, 0, 1, 0x19, L2(18), 1150000, 0x03006000}, { {1, 1}, 1404000, ACPU_SCPLL, 0, 0, 1, 0x1A, L2(19), 1175000, 0x03006000}, { {1, 1}, 1458000, ACPU_SCPLL, 0, 0, 1, 0x1B, L2(19), 1187500, 0x03006000}, { {1, 1}, 1512000, ACPU_SCPLL, 0, 0, 1, 0x1C, L2(19), 1200000, 0x03006000}, { {1, 1}, 1566000, ACPU_SCPLL, 0, 0, 1, 0x1D, L2(20), 1225000, 0x03006000}, { {1, 1}, 1620000, ACPU_SCPLL, 0, 0, 1, 0x1E, L2(20), 1250000, 0x03006000}, { {1, 1}, 1674000, ACPU_SCPLL, 0, 0, 1, 0x1F, L2(20), 1275000, 0x03006000}, { {1, 1}, 1728000, ACPU_SCPLL, 0, 0, 1, 0x20, L2(21), 1300000, 0x03006000}, { {1, 1}, 1782000, ACPU_SCPLL, 0, 0, 1, 0x21, L2(21), 1325000, 0x03006000}, { {0, 0}, 0 }, }; #endif /* acpu_freq_tbl row to use when reconfiguring SC/L2 PLLs. */ #define CAL_IDX 1 static struct clkctl_acpu_speed *acpu_freq_tbl; static struct clkctl_l2_speed *l2_freq_tbl = l2_freq_tbl_v2; static unsigned int l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_v2); static unsigned long acpuclk_8x60_get_rate(int cpu) { return drv_state.current_speed[cpu]->acpuclk_khz; } static void select_core_source(unsigned int id, unsigned int src) { uint32_t regval; int shift; shift = (id == L2) ? 0 : 1; regval = readl_relaxed(clk_sel_addr[id]); regval &= ~(0x3 << shift); regval |= (src << shift); writel_relaxed(regval, clk_sel_addr[id]); } static void select_clk_source_div(unsigned int id, struct clkctl_acpu_speed *s) { uint32_t reg_clksel, reg_clkctl, src_sel; /* Configure the PLL divider mux if we plan to use it. */ if (s->core_src_sel == 0) { reg_clksel = readl_relaxed(clk_sel_addr[id]); /* CLK_SEL_SRC1N0 (bank) bit. */ src_sel = reg_clksel & 1; /* Program clock source and divider. */ reg_clkctl = readl_relaxed(clk_ctl_addr[id]); reg_clkctl &= ~(0xFF << (8 * src_sel)); reg_clkctl |= s->acpuclk_src_sel << (4 + 8 * src_sel); reg_clkctl |= s->acpuclk_src_div << (0 + 8 * src_sel); writel_relaxed(reg_clkctl, clk_ctl_addr[id]); /* Toggle clock source. */ reg_clksel ^= 1; /* Program clock source selection. */ writel_relaxed(reg_clksel, clk_sel_addr[id]); } } static void scpll_enable(int sc_pll, uint32_t l_val) { uint32_t regval; /* Power-up SCPLL into standby mode. */ writel_relaxed(SCPLL_STANDBY, sc_pll_base[sc_pll] + SCPLL_CTL_OFFSET); mb(); udelay(10); /* Shot-switch to target frequency. */ regval = (l_val << 3) | SHOT_SWITCH; writel_relaxed(regval, sc_pll_base[sc_pll] + SCPLL_FSM_CTL_EXT_OFFSET); writel_relaxed(SCPLL_NORMAL, sc_pll_base[sc_pll] + SCPLL_CTL_OFFSET); mb(); udelay(20); } static void scpll_disable(int sc_pll) { /* Power down SCPLL. */ writel_relaxed(SCPLL_POWER_DOWN, sc_pll_base[sc_pll] + SCPLL_CTL_OFFSET); } static void scpll_change_freq(int sc_pll, uint32_t l_val) { uint32_t regval; const void *base_addr = sc_pll_base[sc_pll]; /* Complex-slew switch to target frequency. */ regval = (l_val << 3) | COMPLEX_SLEW; writel_relaxed(regval, base_addr + SCPLL_FSM_CTL_EXT_OFFSET); writel_relaxed(SCPLL_NORMAL, base_addr + SCPLL_CTL_OFFSET); /* Wait for frequency switch to start. */ while (((readl_relaxed(base_addr + SCPLL_CTL_OFFSET) >> 3) & 0x3F) != l_val) cpu_relax(); /* Wait for frequency switch to finish. */ while (readl_relaxed(base_addr + SCPLL_STATUS_OFFSET) & 0x1) cpu_relax(); } /* Vote for the L2 speed and return the speed that should be applied. */ static struct clkctl_l2_speed *compute_l2_speed(unsigned int voting_cpu, struct clkctl_l2_speed *tgt_s) { struct clkctl_l2_speed *new_s; int cpu; /* Bounds check. */ BUG_ON(tgt_s >= (l2_freq_tbl + l2_freq_tbl_size)); /* Find max L2 speed vote. */ l2_vote[voting_cpu] = tgt_s; new_s = l2_freq_tbl; for_each_present_cpu(cpu) new_s = max(new_s, l2_vote[cpu]); return new_s; } /* Set the L2's clock speed. */ static void set_l2_speed(struct clkctl_l2_speed *tgt_s) { if (tgt_s == drv_state.current_l2_speed) return; if (drv_state.current_l2_speed->src_sel == 1 && tgt_s->src_sel == 1) scpll_change_freq(L2, tgt_s->l_val); else { if (tgt_s->src_sel == 1) { scpll_enable(L2, tgt_s->l_val); mb(); select_core_source(L2, tgt_s->src_sel); } else { select_core_source(L2, tgt_s->src_sel); mb(); scpll_disable(L2); } } drv_state.current_l2_speed = tgt_s; } /* Update the bus bandwidth request. */ static void set_bus_bw(unsigned int bw) { int ret; /* Bounds check. */ if (bw >= ARRAY_SIZE(bw_level_tbl)) { pr_err("%s: invalid bandwidth request (%d)\n", __func__, bw); return; } /* Update bandwidth if requst has changed. This may sleep. */ ret = msm_bus_scale_client_update_request(bus_perf_client, bw); if (ret) pr_err("%s: bandwidth request failed (%d)\n", __func__, ret); return; } /* Apply any per-cpu voltage increases. */ static int increase_vdd(int cpu, unsigned int vdd_sc, unsigned int vdd_mem, unsigned int vdd_dig, enum setrate_reason reason) { int rc = 0; /* Increase vdd_mem active-set before vdd_dig and vdd_sc. * vdd_mem should be >= both vdd_sc and vdd_dig. */ rc = rpm_vreg_set_voltage(RPM_VREG_ID_PM8058_S0, rpm_vreg_voter[cpu], vdd_mem, MAX_VDD_MEM, 0); if (rc) { pr_err("%s: vdd_mem (cpu%d) increase failed (%d)\n", __func__, cpu, rc); return rc; } /* Increase vdd_dig active-set vote. */ rc = rpm_vreg_set_voltage(RPM_VREG_ID_PM8058_S1, rpm_vreg_voter[cpu], vdd_dig, MAX_VDD_DIG, 0); if (rc) { pr_err("%s: vdd_dig (cpu%d) increase failed (%d)\n", __func__, cpu, rc); return rc; } /* Don't update the Scorpion voltage in the hotplug path. It should * already be correct. Attempting to set it is bad because we don't * know what CPU we are running on at this point, but the Scorpion * regulator API requires we call it from the affected CPU. */ if (reason == SETRATE_HOTPLUG) return rc; /* Update per-core Scorpion voltage. */ rc = regulator_set_voltage(regulator_sc[cpu], vdd_sc, MAX_VDD_SC); if (rc) { pr_err("%s: vdd_sc (cpu%d) increase failed (%d)\n", __func__, cpu, rc); return rc; } return rc; } /* Apply any per-cpu voltage decreases. */ static void decrease_vdd(int cpu, unsigned int vdd_sc, unsigned int vdd_mem, unsigned int vdd_dig, enum setrate_reason reason) { int ret; /* Update per-core Scorpion voltage. This must be called on the CPU * that's being affected. Don't do this in the hotplug remove path, * where the rail is off and we're executing on the other CPU. */ if (reason != SETRATE_HOTPLUG) { ret = regulator_set_voltage(regulator_sc[cpu], vdd_sc, MAX_VDD_SC); if (ret) { pr_err("%s: vdd_sc (cpu%d) decrease failed (%d)\n", __func__, cpu, ret); return; } } /* Decrease vdd_dig active-set vote. */ ret = rpm_vreg_set_voltage(RPM_VREG_ID_PM8058_S1, rpm_vreg_voter[cpu], vdd_dig, MAX_VDD_DIG, 0); if (ret) { pr_err("%s: vdd_dig (cpu%d) decrease failed (%d)\n", __func__, cpu, ret); return; } /* Decrease vdd_mem active-set after vdd_dig and vdd_sc. * vdd_mem should be >= both vdd_sc and vdd_dig. */ ret = rpm_vreg_set_voltage(RPM_VREG_ID_PM8058_S0, rpm_vreg_voter[cpu], vdd_mem, MAX_VDD_MEM, 0); if (ret) { pr_err("%s: vdd_mem (cpu%d) decrease failed (%d)\n", __func__, cpu, ret); return; } } static void switch_sc_speed(int cpu, struct clkctl_acpu_speed *tgt_s) { struct clkctl_acpu_speed *strt_s = drv_state.current_speed[cpu]; if (strt_s->pll != ACPU_SCPLL && tgt_s->pll != ACPU_SCPLL) { select_clk_source_div(cpu, tgt_s); /* Select core source because target may be AFAB. */ select_core_source(cpu, tgt_s->core_src_sel); } else if (strt_s->pll != ACPU_SCPLL && tgt_s->pll == ACPU_SCPLL) { scpll_enable(cpu, tgt_s->l_val); mb(); select_core_source(cpu, tgt_s->core_src_sel); } else if (strt_s->pll == ACPU_SCPLL && tgt_s->pll != ACPU_SCPLL) { select_clk_source_div(cpu, tgt_s); select_core_source(cpu, tgt_s->core_src_sel); /* Core source switch must complete before disabling SCPLL. */ mb(); udelay(1); scpll_disable(cpu); } else scpll_change_freq(cpu, tgt_s->l_val); /* Update the driver state with the new clock freq */ drv_state.current_speed[cpu] = tgt_s; } static int acpuclk_8x60_set_rate(int cpu, unsigned long rate, enum setrate_reason reason) { struct clkctl_acpu_speed *tgt_s, *strt_s; struct clkctl_l2_speed *tgt_l2; unsigned int vdd_mem, vdd_dig, pll_vdd_dig; unsigned long flags; int rc = 0; if (cpu > num_possible_cpus()) { rc = -EINVAL; goto out; } if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) mutex_lock(&drv_state.lock); strt_s = drv_state.current_speed[cpu]; /* Return early if rate didn't change. */ if (rate == strt_s->acpuclk_khz) goto out; /* Find target frequency. */ for (tgt_s = acpu_freq_tbl; tgt_s->acpuclk_khz != 0; tgt_s++) if (tgt_s->acpuclk_khz == rate) break; if (tgt_s->acpuclk_khz == 0) { rc = -EINVAL; goto out; } /* AVS needs SAW_VCTL to be intitialized correctly, before enable, * and is not initialized at acpuclk_init(). */ if (reason == SETRATE_CPUFREQ) AVS_DISABLE(cpu); /* Calculate vdd_mem and vdd_dig requirements. * vdd_mem must be >= vdd_sc */ vdd_mem = max(tgt_s->vdd_sc, tgt_s->l2_level->vdd_mem); /* Factor-in PLL vdd_dig requirements. */ if ((tgt_s->l2_level->khz > SCPLL_LOW_VDD_FMAX) || (tgt_s->pll == ACPU_SCPLL && tgt_s->acpuclk_khz > SCPLL_LOW_VDD_FMAX)) pll_vdd_dig = SCPLL_NOMINAL_VDD; else pll_vdd_dig = SCPLL_LOW_VDD; vdd_dig = max(tgt_s->l2_level->vdd_dig, pll_vdd_dig); /* Increase VDD levels if needed. */ if ((reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG || reason == SETRATE_INIT) && (tgt_s->acpuclk_khz > strt_s->acpuclk_khz)) { rc = increase_vdd(cpu, tgt_s->vdd_sc, vdd_mem, vdd_dig, reason); if (rc) goto out; } pr_debug("Switching from ACPU%d rate %u KHz -> %u KHz\n", cpu, strt_s->acpuclk_khz, tgt_s->acpuclk_khz); #ifdef CONFIG_SEC_DEBUG_DCVS_LOG sec_debug_dcvs_log(cpu, strt_s->acpuclk_khz, tgt_s->acpuclk_khz); #endif /* Switch CPU speed. */ switch_sc_speed(cpu, tgt_s); /* Update the L2 vote and apply the rate change. */ spin_lock_irqsave(&drv_state.l2_lock, flags); tgt_l2 = compute_l2_speed(cpu, tgt_s->l2_level); set_l2_speed(tgt_l2); spin_unlock_irqrestore(&drv_state.l2_lock, flags); /* Nothing else to do for SWFI. */ if (reason == SETRATE_SWFI) goto out; /* Nothing else to do for power collapse. */ if (reason == SETRATE_PC) goto out; /* Update bus bandwith request. */ set_bus_bw(tgt_l2->bw_level); /* Drop VDD levels if we can. */ if (tgt_s->acpuclk_khz < strt_s->acpuclk_khz) decrease_vdd(cpu, tgt_s->vdd_sc, vdd_mem, vdd_dig, reason); pr_debug("ACPU%d speed change complete\n", cpu); /* Re-enable AVS */ if (reason == SETRATE_CPUFREQ) AVS_ENABLE(cpu, tgt_s->avsdscr_setting); out: if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) mutex_unlock(&drv_state.lock); return rc; } static void __init scpll_init(int sc_pll) { uint32_t regval; pr_debug("Initializing SCPLL%d\n", sc_pll); /* Clear calibration LUT registers containing max frequency entry. * LUT registers are only writeable in debug mode. */ writel_relaxed(SCPLL_DEBUG_FULL, sc_pll_base[sc_pll] + SCPLL_DEBUG_OFFSET); writel_relaxed(0x0, sc_pll_base[sc_pll] + SCPLL_LUT_A_HW_MAX); writel_relaxed(SCPLL_DEBUG_NONE, sc_pll_base[sc_pll] + SCPLL_DEBUG_OFFSET); /* Power-up SCPLL into standby mode. */ writel_relaxed(SCPLL_STANDBY, sc_pll_base[sc_pll] + SCPLL_CTL_OFFSET); mb(); udelay(10); /* Calibrate the SCPLL to the maximum range supported by the h/w. We * might not use the full range of calibrated frequencies, but this * simplifies changes required for future increases in max CPU freq. */ regval = (L_VAL_SCPLL_CAL_MAX << 24) | (L_VAL_SCPLL_CAL_MIN << 16); writel_relaxed(regval, sc_pll_base[sc_pll] + SCPLL_CAL_OFFSET); /* Start calibration */ writel_relaxed(SCPLL_FULL_CAL, sc_pll_base[sc_pll] + SCPLL_CTL_OFFSET); /* Wait for proof that calibration has started before checking the * 'calibration done' bit in the status register. Waiting for the * LUT register we cleared to contain data accomplishes this. * This is required since the 'calibration done' bit takes time to * transition from 'done' to 'not done' when starting a calibration. */ while (readl_relaxed(sc_pll_base[sc_pll] + SCPLL_LUT_A_HW_MAX) == 0) cpu_relax(); /* Wait for calibration to complete. */ while (readl_relaxed(sc_pll_base[sc_pll] + SCPLL_STATUS_OFFSET) & 0x2) cpu_relax(); /* Power-down SCPLL. */ scpll_disable(sc_pll); } /* Force ACPU core and L2 cache clocks to rates that don't require SCPLLs. */ static void __init unselect_scplls(void) { int cpu; /* Ensure CAL_IDX frequency uses AFAB sources for CPU cores and L2. */ BUG_ON(acpu_freq_tbl[CAL_IDX].core_src_sel != 0); BUG_ON(acpu_freq_tbl[CAL_IDX].l2_level->src_sel != 0); for_each_possible_cpu(cpu) { select_clk_source_div(cpu, &acpu_freq_tbl[CAL_IDX]); select_core_source(cpu, acpu_freq_tbl[CAL_IDX].core_src_sel); drv_state.current_speed[cpu] = &acpu_freq_tbl[CAL_IDX]; l2_vote[cpu] = acpu_freq_tbl[CAL_IDX].l2_level; } select_core_source(L2, acpu_freq_tbl[CAL_IDX].l2_level->src_sel); drv_state.current_l2_speed = acpu_freq_tbl[CAL_IDX].l2_level; } /* Ensure SCPLLs use the 27MHz PXO. */ static void __init scpll_set_refs(void) { int cpu; uint32_t regval; /* Bit 4 = 0:PXO, 1:MXO. */ for_each_possible_cpu(cpu) { regval = readl_relaxed(sc_pll_base[cpu] + SCPLL_CFG_OFFSET); regval &= ~BIT(4); writel_relaxed(regval, sc_pll_base[cpu] + SCPLL_CFG_OFFSET); } regval = readl_relaxed(sc_pll_base[L2] + SCPLL_CFG_OFFSET); regval &= ~BIT(4); writel_relaxed(regval, sc_pll_base[L2] + SCPLL_CFG_OFFSET); } /* Voltage regulator initialization. */ static void __init regulator_init(void) { struct clkctl_acpu_speed **freq = drv_state.current_speed; const char *regulator_sc_name[] = {"8901_s0", "8901_s1"}; int cpu, ret; for_each_possible_cpu(cpu) { /* VDD_SC0, VDD_SC1 */ regulator_sc[cpu] = regulator_get(NULL, regulator_sc_name[cpu]); if (IS_ERR(regulator_sc[cpu])) goto err; ret = regulator_set_voltage(regulator_sc[cpu], freq[cpu]->vdd_sc, MAX_VDD_SC); if (ret) goto err; ret = regulator_enable(regulator_sc[cpu]); if (ret) goto err; } return; err: pr_err("%s: Failed to initialize voltage regulators\n", __func__); BUG(); } /* Register with bus driver. */ static void __init bus_init(void) { bus_perf_client = msm_bus_scale_register_client(&bus_client_pdata); if (!bus_perf_client) { pr_err("%s: unable register bus client\n", __func__); BUG(); } } #ifdef CONFIG_CPU_FREQ_MSM static struct cpufreq_frequency_table freq_table[NR_CPUS][30]; static void __init cpufreq_table_init(void) { int cpu; for_each_possible_cpu(cpu) { int i, freq_cnt = 0; /* Construct the freq_table tables from acpu_freq_tbl. */ for (i = 0; acpu_freq_tbl[i].acpuclk_khz != 0 && freq_cnt < ARRAY_SIZE(*freq_table); i++) { if (acpu_freq_tbl[i].use_for_scaling[cpu]) { freq_table[cpu][freq_cnt].index = freq_cnt; freq_table[cpu][freq_cnt].frequency = acpu_freq_tbl[i].acpuclk_khz; freq_cnt++; } } /* freq_table not big enough to store all usable freqs. */ BUG_ON(acpu_freq_tbl[i].acpuclk_khz != 0); freq_table[cpu][freq_cnt].index = freq_cnt; freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END; pr_info("CPU%d: %d scaling frequencies supported.\n", cpu, freq_cnt); /* Register table with CPUFreq. */ cpufreq_frequency_table_get_attr(freq_table[cpu], cpu); } } #else static void __init cpufreq_table_init(void) {} #endif #define HOT_UNPLUG_KHZ MAX_AXI static int __cpuinit acpuclock_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { static int prev_khz[NR_CPUS]; int cpu = (int)hcpu; switch (action) { case CPU_DEAD: case CPU_DEAD_FROZEN: prev_khz[cpu] = acpuclk_8x60_get_rate(cpu); /* Fall through. */ case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: acpuclk_8x60_set_rate(cpu, HOT_UNPLUG_KHZ, SETRATE_HOTPLUG); break; case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: if (WARN_ON(!prev_khz[cpu])) return NOTIFY_BAD; acpuclk_8x60_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG); break; default: break; } return NOTIFY_OK; } static struct notifier_block __cpuinitdata acpuclock_cpu_notifier = { .notifier_call = acpuclock_cpu_callback, }; static unsigned int __init select_freq_plan(void) { uint32_t max_khz, initial_khz; struct clkctl_acpu_speed *f; #ifndef CONFIG_MSM_FORCE_MAX_CPU_TABLE uint32_t pte_efuse, speed_bin, pvs; pte_efuse = readl_relaxed(QFPROM_PTE_EFUSE_ADDR); speed_bin = pte_efuse & 0xF; if (speed_bin == 0xF) speed_bin = (pte_efuse >> 4) & 0xF; if (speed_bin == 0x1) { max_khz = MAX_FREQ_LIMIT; pvs = (pte_efuse >> 10) & 0x7; if (pvs == 0x7) pvs = (pte_efuse >> 13) & 0x7; switch (pvs) { case 0x0: case 0x7: acpu_freq_tbl = acpu_freq_tbl_slow; pr_info("ACPU PVS: Slow\n"); break; case 0x1: acpu_freq_tbl = acpu_freq_tbl_nom; pr_info("ACPU PVS: Nominal\n"); break; case 0x3: acpu_freq_tbl = acpu_freq_tbl_fast; pr_info("ACPU PVS: Fast\n"); break; default: acpu_freq_tbl = acpu_freq_tbl_slow; pr_warn("ACPU PVS: Unknown. Defaulting to slow.\n"); break; } } else if (speed_bin == 0x0 ) { max_khz = MAX_FREQ_LIMIT; pvs = (pte_efuse >> 10) & 0x7; if (pvs == 0x7) pvs = (pte_efuse >> 13) & 0x7; switch (pvs) { case 0x0: acpu_freq_tbl = acpu_freq_tbl_nom; pr_info("ACPU PVS: Nominal\n"); break; default: acpu_freq_tbl = acpu_freq_tbl_slow; pr_warn("ACPU PVS: Unknown. Defaulting to slow.\n"); break; } } else { max_khz = 1188000; acpu_freq_tbl = acpu_freq_tbl_1188mhz; } #else max_khz = 1782000; acpu_freq_tbl = acpu_freq_tbl_max; initial_khz = 1512000; // acpu_freq_tbl = acpu_freq_tbl_fast; #endif /* Truncate the table based to max_khz. */ for (f = acpu_freq_tbl; f->acpuclk_khz != 0; f++) { if (f->acpuclk_khz > max_khz) { f->acpuclk_khz = 0; break; } } f--; pr_info("Max ACPU freq: %u KHz\n", f->acpuclk_khz); /* set initial khz */ for (f = acpu_freq_tbl; f->acpuclk_khz != 0; f++) { if (f->acpuclk_khz > initial_khz) { break; } } f--; pr_info("Initial ACPU freq: %u KHz\n", f->acpuclk_khz); return f->acpuclk_khz; } static struct acpuclk_data acpuclk_8x60_data = { .set_rate = acpuclk_8x60_set_rate, .get_rate = acpuclk_8x60_get_rate, .power_collapse_khz = MAX_AXI, .wait_for_irq_khz = MAX_AXI, }; static int __init acpuclk_8x60_init(struct acpuclk_soc_data *soc_data) { unsigned int max_cpu_khz; int cpu; mutex_init(&drv_state.lock); spin_lock_init(&drv_state.l2_lock); /* Configure hardware. */ max_cpu_khz = select_freq_plan(); unselect_scplls(); scpll_set_refs(); for_each_possible_cpu(cpu) scpll_init(cpu); scpll_init(L2); regulator_init(); bus_init(); /* Improve boot time by ramping up CPUs immediately. */ for_each_online_cpu(cpu) acpuclk_8x60_set_rate(cpu, max_cpu_khz, SETRATE_INIT); acpuclk_register(&acpuclk_8x60_data); cpufreq_table_init(); register_hotcpu_notifier(&acpuclock_cpu_notifier); return 0; } struct acpuclk_soc_data acpuclk_8x60_soc_data __initdata = { .init = acpuclk_8x60_init, };
TLOIN-3X-WIP/android_kernel_samsung_msm8660-common
arch/arm/mach-msm/acpuclock-8x60.c
C
gpl-2.0
35,500
/* * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package com.oracle.truffle.api.instrument; /** * Program element "tags" such as <code>"STATEMENT"</code>, presumed to be singletons (best * implemented as enums) that define user-visible behavior for debugging and other * {@linkplain Instrumenter Instrumentation-based} tools. These categories should correspond to * program structures, for example "statement" and "assignment", that are meaningful * ("human-sensible") to guest language programmers. * <p> * An untagged Truffle node should be understood as an artifact of the guest language implementation * and should not be visible to guest language programmers. Nodes may also have more than one tag, * for example a variable assignment that is also a statement. Finally, the assignment of tags to * nodes could depending on the use-case of whatever tool is using them. * * @see Probe * @see StandardSyntaxTag * @since 0.8 or earlier */ @Deprecated public interface SyntaxTag { /** * Human-friendly name of guest language program elements belonging to the category, e.g. * "statement". * * @since 0.8 or earlier */ String name(); /** * Criteria and example uses for the tag. * * @since 0.8 or earlier */ String getDescription(); }
entlicher/truffle
truffle/com.oracle.truffle.api/src/com/oracle/truffle/api/instrument/SyntaxTag.java
Java
gpl-2.0
2,478
/*************************************************************************** test_template.cpp -------------------------------------- Date : Sun Sep 16 12:22:23 AKDT 2007 Copyright : (C) 2007 by Gary E. Sherman Email : sherman at mrcc dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include <QtTest> #include <QObject> #include <QString> #include <QObject> #include <qgsgeometry.h> #include <qgsspatialindex.h> #if QT_VERSION < 0x40701 // See http://hub.qgis.org/issues/4284 Q_DECLARE_METATYPE( QVariant ) #endif static QgsFeature _pointFeature( QgsFeatureId id, qreal x, qreal y ) { QgsFeature f( id ); f.setGeometry( QgsGeometry::fromPoint( QgsPoint( x, y ) ) ); return f; } static QList<QgsFeature> _pointFeatures() { /* * 2 | 1 * | * -----+----- * | * 3 | 4 */ QList<QgsFeature> feats; feats << _pointFeature( 1, 1, 1 ) << _pointFeature( 2, -1, 1 ) << _pointFeature( 3, -1, -1 ) << _pointFeature( 4, 1, -1 ); return feats; } class TestQgsSpatialIndex : public QObject { Q_OBJECT private slots: void testQuery() { QgsSpatialIndex index; foreach ( const QgsFeature& f, _pointFeatures() ) index.insertFeature( f ); QList<QgsFeatureId> fids = index.intersects( QgsRectangle( 0, 0, 10, 10 ) ); QVERIFY( fids.count() == 1 ); QVERIFY( fids[0] == 1 ); QList<QgsFeatureId> fids2 = index.intersects( QgsRectangle( -10, -10, 0, 10 ) ); QVERIFY( fids2.count() == 2 ); QVERIFY( fids2.contains( 2 ) ); QVERIFY( fids2.contains( 3 ) ); } void testCopy() { QgsSpatialIndex* index = new QgsSpatialIndex; foreach ( const QgsFeature& f, _pointFeatures() ) index->insertFeature( f ); // create copy of the index QgsSpatialIndex indexCopy( *index ); QVERIFY( index->refs() == 2 ); QVERIFY( indexCopy.refs() == 2 ); // test that copied index works QList<QgsFeatureId> fids1 = indexCopy.intersects( QgsRectangle( 0, 0, 10, 10 ) ); QVERIFY( fids1.count() == 1 ); QVERIFY( fids1[0] == 1 ); // check that the index is still shared QVERIFY( index->refs() == 2 ); QVERIFY( indexCopy.refs() == 2 ); // do a modification QgsFeature f2( _pointFeatures()[1] ); indexCopy.deleteFeature( f2 ); // check that the index is not shared anymore QVERIFY( index->refs() == 1 ); QVERIFY( indexCopy.refs() == 1 ); delete index; // test that copied index still works QList<QgsFeatureId> fids = indexCopy.intersects( QgsRectangle( 0, 0, 10, 10 ) ); QVERIFY( fids.count() == 1 ); QVERIFY( fids[0] == 1 ); } void benchmarkIntersect() { // add 50K features to the index QgsSpatialIndex index; for ( int i = 0; i < 100; ++i ) { for ( int k = 0; k < 500; ++k ) { QgsFeature f( i*1000 + k ); f.setGeometry( QgsGeometry::fromPoint( QgsPoint( i / 10, i % 10 ) ) ); index.insertFeature( f ); } } QBENCHMARK { for ( int i = 0; i < 100; ++i ) index.intersects( QgsRectangle( i / 10, i % 10, i / 10 + 1, i % 10 + 1 ) ); } } }; QTEST_MAIN( TestQgsSpatialIndex ) #include "moc_testqgsspatialindex.cxx"
mweisman/QGIS
tests/src/core/testqgsspatialindex.cpp
C++
gpl-2.0
3,954
<?php N2Loader::import('libraries.form.element.imagelistfromfolder'); class N2ElementImageListFromFolderValue extends N2ElementImageListFromFolder { function generateOptions(&$xml) { $this->values = array(); $html = ''; foreach ($xml->option AS $option) { $v = N2XmlHelper::getAttribute($option, 'value'); $image = N2Uri::pathToUri($v); $selected = $this->isSelected($this->parseValue($v)); if ($v != -1) { $this->values[] = $this->parseValue($image); $html .= N2Html::openTag("div", array("class" => "n2-radio-option n2-imagelist-option" . ($selected ? ' n2-active' : ''))); $html .= N2Html::image($image, (string)$option); $html .= N2Html::closeTag("div"); } else { $this->values[] = -1; $html .= N2Html::tag("div", array("class" => "n2-radio-option" . ($selected ? ' n2-active' : '')), ((string)$option)); } } return $html; } function parseValue($image) { return pathinfo($image, PATHINFO_FILENAME); } }
WordBenchNagoya/WordFes2016
wp/wp-content/plugins/smart-slider-3/nextend/library/libraries/form/element/imagelistfromfoldervalue.php
PHP
gpl-2.0
1,156
<?php global $w_this_lang; if($w_this_lang['code']=='all'){ $main_language['native_name'] = __('All languages', 'sitepress'); } if(empty($main_language)){ $main_language['native_name'] = $w_this_lang['display_name']; $main_language['translated_name'] = $w_this_lang['display_name']; $main_language['language_code'] = $w_this_lang['code']; if( $this->settings['icl_lso_flags'] || $icl_language_switcher_preview){ $flag = $this->get_flag($w_this_lang['code']); if($flag->from_template){ $wp_upload_dir = wp_upload_dir(); $main_language['country_flag_url'] = $wp_upload_dir['baseurl'] . '/flags/' . $flag->flag; }else{ $main_language['country_flag_url'] = ICL_PLUGIN_URL . '/res/flags/'.$flag->flag; } } } ?> <div id="lang_sel"<?php if ($this->settings['icl_lang_sel_type'] == 'list') echo ' style="display:none;"';?> <?php if($this->is_rtl()): ?>class="icl_rtl"<?php endif; ?> > <ul> <li><a href="#" class="lang_sel_sel icl-<?php echo $w_this_lang['code'] ?>"> <?php if( $this->settings['icl_lso_flags'] || $icl_language_switcher_preview):?> <img <?php if( !$this->settings['icl_lso_flags'] ):?>style="display:none"<?php endif?> class="iclflag" src="<?php echo $main_language['country_flag_url'] ?>" alt="<?php echo $main_language['language_code'] ?>" title="<?php echo $this->settings['icl_lso_native_lang'] ? esc_attr($main_language['native_name']) : esc_attr($main_language['translated_name']) ; ?>" /> &nbsp;<?php endif; if($icl_language_switcher_preview){ $lang_native = $main_language['native_name']; if($this->settings['icl_lso_native_lang']){ $lang_native_hidden = false; }else{ $lang_native_hidden = true; } $lang_translated = $main_language['translated_name']; if($this->settings['icl_lso_display_lang']){ $lang_translated_hidden = false; }else{ $lang_translated_hidden = true; } }else{ if($this->settings['icl_lso_native_lang']){ $lang_native = $main_language['native_name']; }else{ $lang_native = false; } if($this->settings['icl_lso_display_lang']){ $lang_translated = $main_language['translated_name']; }else{ $lang_translated = false; } $lang_native_hidden = false; $lang_translated_hidden = false; } echo icl_disp_language($lang_native, $lang_translated, $lang_native_hidden, $lang_translated_hidden); if(!isset($ie_ver) || $ie_ver > 6): ?></a><?php endif; ?> <?php if(!empty($active_languages)): ?> <?php if(isset($ie_ver) && $ie_ver <= 6): ?><table><tr><td><?php endif ?> <ul> <?php foreach($active_languages as $lang): ?> <li class="icl-<?php echo $lang['language_code'] ?>"> <a rel="alternate" hreflang="<?php echo $lang['language_code'] ?>" href="<?php echo apply_filters('WPML_filter_link', $lang['url'], $lang)?>"> <?php if( $this->settings['icl_lso_flags'] || $icl_language_switcher_preview):?> <img <?php if( !$this->settings['icl_lso_flags'] ):?>style="display:none"<?php endif?> class="iclflag" src="<?php echo $lang['country_flag_url'] ?>" alt="<?php echo $lang['language_code'] ?>" title="<?php echo $this->settings['icl_lso_native_lang'] ? esc_attr($lang['native_name']) : esc_attr($lang['translated_name']) ; ?>" />&nbsp; <?php endif; ?> <?php if($icl_language_switcher_preview){ $lang_native = $lang['native_name']; if($this->settings['icl_lso_native_lang']){ $lang_native_hidden = false; }else{ $lang_native_hidden = true; } $lang_translated = $lang['translated_name']; if($this->settings['icl_lso_display_lang']){ $lang_translated_hidden = false; }else{ $lang_translated_hidden = true; } }else{ if($this->settings['icl_lso_native_lang']){ $lang_native = $lang['native_name']; }else{ $lang_native = false; } if($this->settings['icl_lso_display_lang']){ $lang_translated = $lang['translated_name']; }else{ $lang_translated = false; } } echo icl_disp_language($lang_native, $lang_translated, $lang_native_hidden, $lang_translated_hidden); ?> </a> </li> <?php endforeach; ?> </ul> <?php if(isset($ie_ver) && $ie_ver <= 6): ?></td></tr></table></a><?php endif ?> <?php endif; ?> </li> </ul> </div>
sariha/wp-mmx
wp-content/plugins/sitepress-multilingual-cms/menu/language-selector.php
PHP
gpl-2.0
5,937
<?php /** * Elgg add action * * @package Elgg * @subpackage Core */ elgg_make_sticky_form('useradd'); // Get variables $username = get_input('username'); $password = get_input('password', null, false); $password2 = get_input('password2', null, false); $email = get_input('email'); $name = get_input('name'); // This param is not included in the useradd form by default, // but it allows sites to easily add the feature if necessary. $language = get_input('language', elgg_get_config('language')); $admin = get_input('admin'); if (is_array($admin)) { $admin = $admin[0]; } // no blank fields if ($username == '' || $password == '' || $password2 == '' || $email == '' || $name == '') { register_error(elgg_echo('register:fields')); forward(REFERER); } if (strcmp($password, $password2) != 0) { register_error(elgg_echo('RegistrationException:PasswordMismatch')); forward(REFERER); } // For now, just try and register the user try { $guid = register_user($username, $password, $name, $email, TRUE); if ($guid) { $new_user = get_entity($guid); if ($new_user && $admin && elgg_is_admin_logged_in()) { $new_user->makeAdmin(); } elgg_clear_sticky_form('useradd'); $new_user->admin_created = TRUE; // @todo ugh, saving a guid as metadata! $new_user->created_by_guid = elgg_get_logged_in_user_guid(); // The user language is set also by register_user(), but it defaults to // language of the current user (admin), so we need to fix it here. $new_user->language = $language; $subject = elgg_echo('useradd:subject', array(), $new_user->language); $body = elgg_echo('useradd:body', array( $name, elgg_get_site_entity()->name, elgg_get_site_entity()->url, $username, $password, ), $new_user->language); notify_user($new_user->guid, elgg_get_site_entity()->guid, $subject, $body); system_message(elgg_echo("adduser:ok", array(elgg_get_site_entity()->name))); } else { register_error(elgg_echo("adduser:bad")); } } catch (RegistrationException $r) { register_error($r->getMessage()); } forward(REFERER);
brusMX/elgg-iis
actions/useradd.php
PHP
gpl-2.0
2,069
#include "CNWSScriptVarTable.h" int CNWSScriptVarTable::DeleteIndex(CExoString &, unsigned long) { asm("leave"); asm("mov $0x081f3f5c, %eax"); asm("jmp *%eax"); } void CNWSScriptVarTable::DestroyFloat(CExoString &) { asm("leave"); asm("mov $0x081f4320, %eax"); asm("jmp *%eax"); } void CNWSScriptVarTable::DestroyInt(CExoString &) { asm("leave"); asm("mov $0x081f362c, %eax"); asm("jmp *%eax"); } void CNWSScriptVarTable::DestroyLocation(CExoString &) { asm("leave"); asm("mov $0x081f4458, %eax"); asm("jmp *%eax"); } void CNWSScriptVarTable::DestroyObject(CExoString &) { asm("leave"); asm("mov $0x081f43f0, %eax"); asm("jmp *%eax"); } void CNWSScriptVarTable::DestroyString(CExoString &) { asm("leave"); asm("mov $0x081f4388, %eax"); asm("jmp *%eax"); } float CNWSScriptVarTable::GetFloat(CExoString &) { asm("leave"); asm("mov $0x081f3ff0, %eax"); asm("jmp *%eax"); } int CNWSScriptVarTable::GetInt(CExoString &) { asm("leave"); asm("mov $0x081f3fc8, %eax"); asm("jmp *%eax"); } CScriptLocation CNWSScriptVarTable::GetLocation(CExoString &) { asm("leave"); asm("mov $0x081f40e8, %eax"); asm("jmp *%eax"); } unsigned long CNWSScriptVarTable::GetObject(CExoString &) { asm("leave"); asm("mov $0x081f40bc, %eax"); asm("jmp *%eax"); } CExoString CNWSScriptVarTable::GetString(CExoString &) { asm("leave"); asm("mov $0x081f4020, %eax"); asm("jmp *%eax"); } void CNWSScriptVarTable::LoadVarTable(CResGFF *, CResStruct *) { asm("leave"); asm("mov $0x081f3740, %eax"); asm("jmp *%eax"); } CNWSScriptVar * CNWSScriptVarTable::MatchIndex(CExoString &, unsigned long, int) { asm("leave"); asm("mov $0x081f32b4, %eax"); asm("jmp *%eax"); } void CNWSScriptVarTable::SaveVarTable(CResGFF *, CResStruct *) { asm("leave"); asm("mov $0x081f3b3c, %eax"); asm("jmp *%eax"); } void CNWSScriptVarTable::SetFloat(CExoString &, float) { asm("leave"); asm("mov $0x081f4190, %eax"); asm("jmp *%eax"); } void CNWSScriptVarTable::SetInt(CExoString &, int, int) { asm("leave"); asm("mov $0x081f3454, %eax"); asm("jmp *%eax"); } void CNWSScriptVarTable::SetLocation(CExoString &, CScriptLocation) { asm("leave"); asm("mov $0x081f4204, %eax"); asm("jmp *%eax"); } void CNWSScriptVarTable::SetObject(CExoString &, unsigned long) { asm("leave"); asm("mov $0x081f41e0, %eax"); asm("jmp *%eax"); } void CNWSScriptVarTable::SetString(CExoString &, CExoString &) { asm("leave"); asm("mov $0x081f41b4, %eax"); asm("jmp *%eax"); }
niv/nwnx2-linux
api/CNWSScriptVarTable.cpp
C++
gpl-2.0
2,644
/***************************************************************************** * m3u.c : M3U playlist format import ***************************************************************************** * Copyright (C) 2004 the VideoLAN team * $Id: 388e8a9bc8e6841236ae040b851d67810ef34dfb $ * * Authors: Clément Stenac <zorglub@videolan.org> * Sigmund Augdal Helberg <dnumgis@videolan.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. *****************************************************************************/ /***************************************************************************** * Preamble *****************************************************************************/ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include <vlc_common.h> #include <vlc_demux.h> #include <vlc_charset.h> #include "playlist.h" struct demux_sys_t { char *psz_prefix; char *(*pf_dup) (const char *); }; /***************************************************************************** * Local prototypes *****************************************************************************/ static int Demux( demux_t *p_demux); static int Control( demux_t *p_demux, int i_query, va_list args ); static void parseEXTINF( char *psz_string, char **ppsz_artist, char **ppsz_name, int *pi_duration ); static bool ContainsURL( demux_t *p_demux ); static bool CheckContentType( stream_t * p_stream, const char * psz_ctype ); static char *GuessEncoding (const char *str) { return IsUTF8 (str) ? strdup (str) : FromLatin1 (str); } static char *CheckUnicode (const char *str) { return IsUTF8 (str) ? strdup (str): NULL; } /***************************************************************************** * Import_M3U: main import function *****************************************************************************/ int Import_M3U( vlc_object_t *p_this ) { demux_t *p_demux = (demux_t *)p_this; const uint8_t *p_peek; CHECK_PEEK( p_peek, 8 ); char *(*pf_dup) (const char *); if( POKE( p_peek, "RTSPtext", 8 ) /* QuickTime */ || POKE( p_peek, "\xef\xbb\xbf" "#EXTM3U", 10) /* BOM at start */ || demux_IsPathExtension( p_demux, ".m3u8" ) || demux_IsForced( p_demux, "m3u8" ) || CheckContentType( p_demux->s, "application/vnd.apple.mpegurl" ) ) pf_dup = CheckUnicode; /* UTF-8 */ else if( POKE( p_peek, "#EXTM3U", 7 ) || demux_IsPathExtension( p_demux, ".m3u" ) || demux_IsPathExtension( p_demux, ".vlc" ) || demux_IsForced( p_demux, "m3u" ) || ContainsURL( p_demux ) || CheckContentType( p_demux->s, "audio/x-mpegurl") ) pf_dup = GuessEncoding; else return VLC_EGENERIC; STANDARD_DEMUX_INIT_MSG( "found valid M3U playlist" ); p_demux->p_sys->psz_prefix = FindPrefix( p_demux ); p_demux->p_sys->pf_dup = pf_dup; return VLC_SUCCESS; } static bool ContainsURL( demux_t *p_demux ) { const uint8_t *p_peek, *p_peek_end; int i_peek; i_peek = stream_Peek( p_demux->s, &p_peek, 1024 ); if( i_peek <= 0 ) return false; p_peek_end = p_peek + i_peek; while( p_peek + sizeof( "https://" ) < p_peek_end ) { /* One line starting with a URL is enough */ if( !strncasecmp( (const char *)p_peek, "http://", 7 ) || !strncasecmp( (const char *)p_peek, "mms://", 6 ) || !strncasecmp( (const char *)p_peek, "rtsp://", 7 ) || !strncasecmp( (const char *)p_peek, "https://", 8 ) || !strncasecmp( (const char *)p_peek, "ftp://", 6 ) ) { return true; } /* Comments and blank lines are ignored */ else if( *p_peek != '#' && *p_peek != '\n' && *p_peek != '\r') { return false; } while( p_peek < p_peek_end && *p_peek != '\n' ) p_peek++; if ( *p_peek == '\n' ) p_peek++; } return false; } static bool CheckContentType( stream_t * p_stream, const char * psz_ctype ) { char *psz_check = stream_ContentType( p_stream ); if( !psz_check ) return false; int i_res = strncasecmp( psz_check, psz_ctype, strlen( psz_check ) ); free( psz_check ); return ( i_res == 0 ) ? true : false; } /***************************************************************************** * Deactivate: frees unused data *****************************************************************************/ void Close_M3U( vlc_object_t *p_this ) { demux_t *p_demux = (demux_t *)p_this; free( p_demux->p_sys->psz_prefix ); free( p_demux->p_sys ); } static int Demux( demux_t *p_demux ) { char *psz_line; char *psz_name = NULL; char *psz_artist = NULL; char *psz_album_art = NULL; int i_parsed_duration = 0; mtime_t i_duration = -1; const char**ppsz_options = NULL; char * (*pf_dup) (const char *) = p_demux->p_sys->pf_dup; int i_options = 0; bool b_cleanup = false; input_item_t *p_input; input_item_t *p_current_input = GetCurrentItem(p_demux); input_item_node_t *p_subitems = input_item_node_Create( p_current_input ); psz_line = stream_ReadLine( p_demux->s ); while( psz_line ) { char *psz_parse = psz_line; /* Skip leading tabs and spaces */ while( *psz_parse == ' ' || *psz_parse == '\t' || *psz_parse == '\n' || *psz_parse == '\r' ) psz_parse++; if( *psz_parse == '#' ) { /* Parse extra info */ /* Skip leading tabs and spaces */ while( *psz_parse == ' ' || *psz_parse == '\t' || *psz_parse == '\n' || *psz_parse == '\r' || *psz_parse == '#' ) psz_parse++; if( !*psz_parse ) goto error; if( !strncasecmp( psz_parse, "EXTINF:", sizeof("EXTINF:") -1 ) ) { /* Extended info */ psz_parse += sizeof("EXTINF:") - 1; free(psz_name); free(psz_artist); parseEXTINF( psz_parse, &psz_artist, &psz_name, &i_parsed_duration ); if( i_parsed_duration >= 0 ) i_duration = i_parsed_duration * INT64_C(1000000); if( psz_name ) psz_name = pf_dup( psz_name ); if( psz_artist ) psz_artist = pf_dup( psz_artist ); } else if( !strncasecmp( psz_parse, "EXTVLCOPT:", sizeof("EXTVLCOPT:") -1 ) ) { /* VLC Option */ char *psz_option; psz_parse += sizeof("EXTVLCOPT:") -1; if( !*psz_parse ) goto error; psz_option = pf_dup( psz_parse ); if( psz_option ) INSERT_ELEM( ppsz_options, i_options, i_options, psz_option ); } /* Special case for jamendo which provide the albumart */ else if( !strncasecmp( psz_parse, "EXTALBUMARTURL:", sizeof( "EXTALBUMARTURL:" ) -1 ) ) { psz_parse += sizeof( "EXTALBUMARTURL:" ) - 1; free( psz_album_art ); psz_album_art = pf_dup( psz_parse ); } } else if( !strncasecmp( psz_parse, "RTSPtext", sizeof("RTSPtext") -1 ) ) { ;/* special case to handle QuickTime RTSPtext redirect files */ } else if( *psz_parse ) { char *psz_mrl; psz_parse = pf_dup( psz_parse ); if( !psz_name && psz_parse ) /* Use filename as name for relative entries */ psz_name = strdup( psz_parse ); psz_mrl = ProcessMRL( psz_parse, p_demux->p_sys->psz_prefix ); b_cleanup = true; if( !psz_mrl ) { free( psz_parse ); goto error; } p_input = input_item_NewExt( psz_mrl, psz_name, i_options, ppsz_options, 0, i_duration ); free( psz_parse ); free( psz_mrl ); if ( !EMPTY_STR(psz_artist) ) input_item_SetArtist( p_input, psz_artist ); if( psz_name ) input_item_SetTitle( p_input, psz_name ); if( !EMPTY_STR(psz_album_art) ) input_item_SetArtURL( p_input, psz_album_art ); input_item_node_AppendItem( p_subitems, p_input ); vlc_gc_decref( p_input ); } error: /* Fetch another line */ free( psz_line ); psz_line = stream_ReadLine( p_demux->s ); if( !psz_line ) b_cleanup = true; if( b_cleanup ) { /* Cleanup state */ while( i_options-- ) free( (char*)ppsz_options[i_options] ); FREENULL( ppsz_options ); i_options = 0; FREENULL( psz_name ); FREENULL( psz_artist ); FREENULL( psz_album_art ); i_parsed_duration = 0; i_duration = -1; b_cleanup = false; } } input_item_node_PostAndDelete( p_subitems ); vlc_gc_decref(p_current_input); var_Destroy( p_demux, "m3u-extvlcopt" ); return 0; /* Needed for correct operation of go back */ } static int Control( demux_t *p_demux, int i_query, va_list args ) { VLC_UNUSED(p_demux); VLC_UNUSED(i_query); VLC_UNUSED(args); return VLC_EGENERIC; } static void parseEXTINF(char *psz_string, char **ppsz_artist, char **ppsz_name, int *pi_duration) { char *end = NULL; char *psz_item = NULL; end = psz_string + strlen( psz_string ); /* ignore whitespaces */ for (; psz_string < end && ( *psz_string == '\t' || *psz_string == ' ' ); psz_string++ ); /* duration: read to next comma */ psz_item = psz_string; psz_string = strchr( psz_string, ',' ); if ( psz_string ) { *psz_string = '\0'; *pi_duration = atoi( psz_item ); } else { return; } if ( psz_string < end ) /* continue parsing if possible */ psz_string++; /* analyse the remaining string */ psz_item = strstr( psz_string, " - " ); /* here we have the 0.8.2+ format with artist */ if ( psz_item ) { /* *** "EXTINF:time,artist - name" */ *psz_item = '\0'; *ppsz_artist = psz_string; *ppsz_name = psz_item + 3; /* points directly after ' - ' */ return; } /* reaching this point means: 0.8.1- with artist or something without artist */ if ( *psz_string == ',' ) { /* *** "EXTINF:time,,name" */ psz_string++; *ppsz_name = psz_string; return; } psz_item = psz_string; psz_string = strchr( psz_string, ',' ); if ( psz_string ) { /* *** "EXTINF:time,artist,name" */ *psz_string = '\0'; *ppsz_artist = psz_item; *ppsz_name = psz_string+1; } else { /* *** "EXTINF:time,name" */ *ppsz_name = psz_item; } return; }
Jobq/vlc-mdc
modules/demux/playlist/m3u.c
C
gpl-2.0
11,841
/* * Copyright (c) 1998-2012 Caucho Technology -- all rights reserved * * This file is part of Resin(R) Open Source * * Each copy or derived work must preserve the copyright notice and this * notice unmodified. * * Resin Open Source is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Resin Open Source is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, or any warranty * of NON-INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with Resin Open Source; if not, write to the * Free SoftwareFoundation, Inc. * 59 Temple Place, Suite 330 * Boston, MA 02111-1307 USA * * @author Scott Ferguson */ package com.caucho.sql.spy; import com.caucho.util.Alarm; import com.caucho.util.CurrentTime; import com.caucho.util.L10N; import javax.sql.ConnectionPoolDataSource; import javax.sql.PooledConnection; import java.io.PrintWriter; import java.sql.SQLException; import java.util.concurrent.atomic.AtomicInteger; import java.util.logging.Logger; /** * Spying on a driver. */ public class SpyConnectionPoolDataSource implements ConnectionPoolDataSource { protected final static Logger log = Logger.getLogger(SpyConnectionPoolDataSource.class.getName()); protected final static L10N L = new L10N(SpyConnectionPoolDataSource.class); private String _id; private AtomicInteger _connCount = new AtomicInteger(); // The underlying data source private ConnectionPoolDataSource _dataSource; /** * Creates a new SpyDriver. */ public SpyConnectionPoolDataSource(ConnectionPoolDataSource dataSource, String id) { _dataSource = dataSource; _id = id; } /** * Returns the pooled connection. */ @Override public PooledConnection getPooledConnection() throws SQLException { long start = start(); try { PooledConnection conn = _dataSource.getPooledConnection(); String connId = _id + "." + _connCount.getAndIncrement(); log(start, "getConnectionPool() -> " + connId + ":" + conn); return new SpyPooledConnection(conn, connId); } catch (SQLException e) { log(start, "exn-connect(" + e + ")"); throw e; } } /** * Returns the XAConnection. */ @Override public PooledConnection getPooledConnection(String user, String password) throws SQLException { long start = start(); try { PooledConnection conn = _dataSource.getPooledConnection(user, password); String connId = _id + "." + _connCount.getAndIncrement(); log(start, "getPooledConnection(" + user + ") -> " + connId + ":" + conn); return new SpyPooledConnection(conn, connId); } catch (SQLException e) { log(start, "exn-connect(" + e + ")"); throw e; } } /** * Returns the login timeout. */ @Override public int getLoginTimeout() throws SQLException { return _dataSource.getLoginTimeout(); } /** * Sets the login timeout. */ @Override public void setLoginTimeout(int timeout) throws SQLException { _dataSource.setLoginTimeout(timeout); } /** * Returns the log writer */ @Override public PrintWriter getLogWriter() throws SQLException { return _dataSource.getLogWriter(); } /** * Sets the log writer. */ @Override public void setLogWriter(PrintWriter log) throws SQLException { _dataSource.setLogWriter(log); } public Logger getParentLogger() { return null; } protected long start() { return CurrentTime.getExactTime(); } protected void log(long start, String msg) { long delta = CurrentTime.getExactTime() - start; log.fine("[" + delta + "ms] " + _id + ":" + msg); } public String toString() { return getClass().getSimpleName() + "[id=" + _id + ",data-source=" + _dataSource + "]"; } }
WelcomeHUME/svn-caucho-com-resin
modules/resin/src/com/caucho/sql/spy/SpyConnectionPoolDataSource.java
Java
gpl-2.0
4,259
/* * Copyright (c) 1998-2012 Caucho Technology -- all rights reserved * * This file is part of Resin(R) Open Source * * Each copy or derived work must preserve the copyright notice and this * notice unmodified. * * Resin Open Source is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Resin Open Source is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, or any warranty * of NON-INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with Resin Open Source; if not, write to the * Free Software Foundation, Inc. * 59 Temple Place, Suite 330 * Boston, MA 02111-1307 USA * * @author Scott Ferguson */ package com.caucho.amber.query; import com.caucho.amber.entity.Entity; /** * Callback when an entity is created, destroyed or modified. */ abstract public class CacheUpdate { private CachedQuery _query; protected CacheUpdate(CachedQuery query) { _query = query; } /** * Called when an entity is added. */ public void add(Entity obj) { update(); } /** * Called when an entity is deleted. */ public void delete(Object key) { update(); } /** * Called when an entity is deleted. */ public void update(Entity object) { update(); } /** * Update the query. */ protected void update() { _query.update(); } }
am-immanuel/quercus
modules/resin/src/com/caucho/amber/query/CacheUpdate.java
Java
gpl-2.0
1,726
<?php namespace Illuminate\Support\Facades; /** * @method static bool matchesType(string $actual, string $type) * @method static bool isJson() * @method static bool expectsJson() * @method static bool wantsJson() * @method static bool accepts(string|array $contentTypes) * @method static bool prefers(string|array $contentTypes) * @method static bool acceptsAnyContentType() * @method static bool acceptsJson() * @method static bool acceptsHtml() * @method static string format($default = 'html') * @method static string|array old(string|null $key = null, string|array|null $default = null) * @method static void flash() * @method static void flashOnly(array|mixed $keys) * @method static void flashExcept(array|mixed $keys) * @method static void flush() * @method static string|array|null server(string|null $key = null, string|array|null $default = null) * @method static bool hasHeader(string $key) * @method static string|array|null header(string|null $key = null, string|array|null $default = null) * @method static string|null bearerToken() * @method static bool exists(string|array $key) * @method static bool has(string|array $key) * @method static bool hasAny(string|array $key) * @method static bool filled(string|array $key) * @method static bool anyFilled(string|array $key) * @method static array keys() * @method static array all(array|mixed|null $keys = null) * @method static string|array|null input(string|null $key = null, string|array|null $default = null) * @method static array only(array|mixed $keys) * @method static array except(array|mixed $keys) * @method static string|array|null query(string|null $key = null, string|array|null $default = null) * @method static string|array|null post(string|null $key = null, string|array|null $default = null) * @method static bool hasCookie(string $key) * @method static string|array|null cookie(string|null $key = null, string|array|null $default = null) * @method static array allFiles() * @method static bool hasFile(string $key) * @method static \Illuminate\Http\UploadedFile|\Illuminate\Http\UploadedFile[]|array|null file(string|null $key = null, mixed $default = null) * @method static \Illuminate\Http\Request capture() * @method static \Illuminate\Http\Request instance() * @method static string method() * @method static string root() * @method static string url() * @method static string fullUrl() * @method static string fullUrlWithQuery(array $query) * @method static string path() * @method static string decodedPath() * @method static string|null segment(int $index, string|null $default = null) * @method static array segments() * @method static bool is(mixed ...$patterns) * @method static bool routeIs(mixed ...$patterns) * @method static bool fullUrlIs(mixed ...$patterns) * @method static bool ajax() * @method static bool pjax() * @method static bool prefetch() * @method static bool secure() * @method static string|null ip() * @method static array ips() * @method static string userAgent() * @method static \Illuminate\Http\Request merge(array $input) * @method static \Illuminate\Http\Request replace(array $input) * @method static \Symfony\Component\HttpFoundation\ParameterBag|mixed json(string|null $key = null, mixed $default = null) * @method static \Illuminate\Http\Request createFrom(\Illuminate\Http\Request $from, \Illuminate\Http\Request|null $to = null) * @method static \Illuminate\Http\Request createFromBase(\Symfony\Component\HttpFoundation\Request $request) * @method static \Illuminate\Http\Request duplicate(array|null $query = null, array|null $request = null, array|null $attributes = null, array|null $cookies = null, array|null $files = null, array|null $server = null) * @method static mixed filterFiles(mixed $files) * @method static \Illuminate\Session\Store session() * @method static \Illuminate\Session\Store|null getSession() * @method static void setLaravelSession(\Illuminate\Contracts\Session\Session $session) * @method static mixed user(string|null $guard = null) * @method static \Illuminate\Routing\Route|object|string route(string|null $param = null, string|null $default = null) * @method static string fingerprint() * @method static \Illuminate\Http\Request setJson(\Symfony\Component\HttpFoundation\ParameterBag $json) * @method static \Closure getUserResolver() * @method static \Illuminate\Http\Request setUserResolver(\Closure $callback) * @method static \Closure getRouteResolver() * @method static \Illuminate\Http\Request setRouteResolver(\Closure $callback) * @method static array toArray() * @method static bool offsetExists(string $offset) * @method static mixed offsetGet(string $offset) * @method static void offsetSet(string $offset, mixed $value) * @method static void offsetUnset(string $offset) * * @see \Illuminate\Http\Request */ class Request extends Facade { /** * Get the registered name of the component. * * @return string */ protected static function getFacadeAccessor() { return 'request'; } }
matrix-msu/Kora3
vendor/laravel/framework/src/Illuminate/Support/Facades/Request.php
PHP
gpl-2.0
5,075
<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE TS> <TS version="2.0" language="fr_FR"> <context> <name>BigList</name> <message> <location filename="ui_list.ui" line="14"/> <source>Form</source> <translation>Formularz</translation> </message> <message> <location filename="ui_list.ui" line="51"/> <source>{field Name}</source> <translation>{nazwa pola}</translation> </message> <message> <location filename="ui_list.ui" line="81"/> <source>...</source> <translation>...</translation> </message> </context> <context> <name>CaptureAction</name> <message> <location filename="maptools.py" line="23"/> <source>Capture</source> <translation>Przechwyć</translation> </message> </context> <context> <name>DeleteFeatureDialog</name> <message> <location filename="ui_deletefeature.ui" line="20"/> <source>Dialog</source> <translation>Okno</translation> </message> <message> <location filename="ui_deletefeature.ui" line="80"/> <source>Do you really want to delete this feature?</source> <translation>Czy na pewno chcesz usunąć ten obiekt?</translation> </message> <message> <location filename="ui_deletefeature.ui" line="92"/> <source>Cancel</source> <translation>Anuluj</translation> </message> <message> <location filename="ui_deletefeature.ui" line="99"/> <source>Delete</source> <translation>Usuń</translation> </message> <message> <location filename="ui_deletefeature.ui" line="120"/> <source>Delete feature?</source> <translation>Usunąć obiekt?</translation> </message> </context> <context> <name>Dialog</name> <message> <location filename="ui_errorlist.ui" line="14"/> <source>Please corrent the following errors</source> <translation>Proszę poprawić następujące błędy</translation> </message> <message> <location filename="ui_errorlist.ui" line="31"/> <source>Please correct the following errors</source> <translation>Proszę poprawić następujące błędy</translation> </message> <message> <location filename="ui_errorlist.ui" line="98"/> <source>OK</source> <translation>OK</translation> </message> </context> <context> <name>DrawingWindow</name> <message> <location filename="ui_drawingpad.ui" line="14"/> <source>Dialog</source> <translation>Okno</translation> </message> <message> <location filename="ui_drawingpad.ui" line="260"/> <source>Clear Drawing</source> <translation>Wyczyść rysunek</translation> </message> <message> <location filename="ui_drawingpad.ui" line="166"/> <source>...</source> <translation>...</translation> </message> <message> <location filename="ui_drawingpad.ui" line="91"/> <source>Red</source> <translation>Czerwony</translation> </message> <message> <location filename="ui_drawingpad.ui" line="181"/> <source>buttonGroup</source> <translation>buttonGroup</translation> </message> <message> <location filename="ui_drawingpad.ui" line="116"/> <source>Blue</source> <translation>Niebieski</translation> </message> <message> <location filename="ui_drawingpad.ui" line="141"/> <source>Black</source> <translation>Czarny</translation> </message> <message> <location filename="ui_drawingpad.ui" line="317"/> <source>Save</source> <translation>Zapisz</translation> </message> <message> <location filename="ui_drawingpad.ui" line="329"/> <source>Cancel</source> <translation>Anuluj</translation> </message> <message> <location filename="ui_drawingpad.ui" line="263"/> <source>Clear the current drawing</source> <translation>Wyczyść aktualny rysunek</translation> </message> <message> <location filename="ui_drawingpad.ui" line="275"/> <source>Red Pen</source> <translation>Czerwone pióro</translation> </message> <message> <location filename="ui_drawingpad.ui" line="278"/> <source>Change pen colour to red</source> <translation>Zmień kolor pióra na czerwony</translation> </message> <message> <location filename="ui_drawingpad.ui" line="290"/> <source>Blue Pen</source> <translation>Niebieskie pióro</translation> </message> <message> <location filename="ui_drawingpad.ui" line="293"/> <source>Change pen colour to blue</source> <translation>Zmień kolor pióra na niebieski</translation> </message> <message> <location filename="ui_drawingpad.ui" line="305"/> <source>Black Pen</source> <translation>Pióro czarne</translation> </message> <message> <location filename="ui_drawingpad.ui" line="308"/> <source>Change pen colour to black</source> <translation>Zmień kolor pióra na czarny</translation> </message> <message> <location filename="ui_drawingpad.ui" line="320"/> <source>Save the current image</source> <translation>Zapisz aktualny obraz</translation> </message> <message> <location filename="ui_drawingpad.ui" line="332"/> <source>Cancel the current image</source> <translation>Anuluj aktualny obraz</translation> </message> <message> <location filename="ui_drawingpad.ui" line="341"/> <source>Take Map Snapshot</source> <translation>Zrób zrzut ekranu mapy</translation> </message> <message> <location filename="ui_drawingpad.ui" line="353"/> <source>Eraser</source> <translation>Gumka</translation> </message> </context> <context> <name>EndCaptureAction</name> <message> <location filename="maptools.py" line="13"/> <source>End Capture</source> <translation>Zakończ przechwytywanie</translation> </message> </context> <context> <name>Form</name> <message> <location filename="ui_sync.ui" line="14"/> <source>Form</source> <translation>Formularz</translation> </message> <message> <location filename="ui_dataentrywidget.ui" line="73"/> <source>Save</source> <translation>Zapisz</translation> </message> <message> <location filename="ui_dataentrywidget.ui" line="85"/> <source>Cancel</source> <translation>Anuluj</translation> </message> <message> <location filename="ui_dataentrywidget.ui" line="97"/> <source>Delete</source> <translation>Usuń</translation> </message> <message> <location filename="ui_infodock.ui" line="129"/> <source>about:blank</source> <translation>about:blank</translation> </message> <message> <location filename="ui_infodock.ui" line="231"/> <source>&lt;</source> <translation>&lt;</translation> </message> <message> <location filename="ui_infodock.ui" line="257"/> <source>X of Y</source> <translation>X of Y</translation> </message> <message> <location filename="ui_infodock.ui" line="264"/> <source>&gt;</source> <translation>&gt;</translation> </message> <message> <location filename="ui_projectwidget.ui" line="74"/> <source>Image</source> <translation>Obraz</translation> </message> <message> <location filename="ui_projectwidget.ui" line="95"/> <source>Name</source> <translation>Nazwa</translation> </message> <message> <location filename="ui_projectwidget.ui" line="114"/> <source>Description</source> <translation>Opis</translation> </message> <message> <location filename="ui_projectwidget.ui" line="155"/> <source>Version</source> <translation>Wersja</translation> </message> <message> <location filename="ui_sync.ui" line="29"/> <source>Sync All</source> <translation>Synchronizuj wszystko</translation> </message> <message> <location filename="ui_sync.ui" line="69"/> <source>...</source> <translation>...</translation> </message> <message> <location filename="ui_sync.ui" line="141"/> <source>1</source> <translation>1</translation> </message> </context> <context> <name>GPSAction</name> <message> <location filename="gps_action.py" line="26"/> <source>Enable GPS</source> <translation>Włącz GPS</translation> </message> <message> <location filename="gps_action.py" line="55"/> <source>Connecting</source> <translation>Łączenie</translation> </message> </context> <context> <name>HelpViewer</name> <message> <location filename="ui_helpviewer.ui" line="14"/> <source>Help Viewer</source> <translation>Okno pomocy</translation> </message> <message> <location filename="ui_helpviewer.ui" line="24"/> <source>about:blank</source> <translation>about:blank</translation> </message> <message> <location filename="ui_helpviewer.ui" line="38"/> <source>Cancel</source> <translation>Anuluj</translation> </message> </context> <context> <name>ListModules</name> <message> <location filename="ui_listmodules.ui" line="17"/> <source>Select form to open</source> <translation>Wybierz formularz do otwarcia</translation> </message> </context> <context> <name>MainWindow</name> <message> <location filename="ui_mainwindow.ui" line="20"/> <source>IntraMaps Roam: Mobile Data Collection</source> <translation>IntraMaps Roam: Pozyskiwanie Mobilnych Danych</translation> </message> <message> <location filename="ui_mainwindow.ui" line="189"/> <source>Project {} Loading</source> <translation>Ładowanie projektu {}</translation> </message> <message> <location filename="ui_mainwindow.ui" line="207"/> <source>Please wait....</source> <translation>Proszę czekać...</translation> </message> <message> <location filename="ui_mainwindow.ui" line="257"/> <source>sidetoolbar</source> <translation>sidetoolbar</translation> </message> <message> <location filename="ui_mainwindow.ui" line="315"/> <source>projecttoolbar</source> <translation>projecttoolbar</translation> </message> <message> <location filename="ui_mainwindow.ui" line="367"/> <source>Map</source> <translation>Mapa</translation> </message> <message> <location filename="ui_mainwindow.ui" line="370"/> <source>Map </source> <translation>Mapa </translation> </message> <message> <location filename="ui_mainwindow.ui" line="385"/> <source>Projects</source> <translation>Projekty</translation> </message> <message> <location filename="ui_mainwindow.ui" line="388"/> <source>Projects </source> <translation>Projekty </translation> </message> <message> <location filename="ui_mainwindow.ui" line="403"/> <source>Settings</source> <translation>Opcje</translation> </message> <message> <location filename="ui_mainwindow.ui" line="406"/> <source>Settings </source> <translation>Opcje </translation> </message> <message> <location filename="ui_mainwindow.ui" line="418"/> <source>Quit</source> <translation>Zamknij</translation> </message> <message> <location filename="ui_mainwindow.ui" line="421"/> <source>Quit </source> <translation>Zamknij </translation> </message> <message> <location filename="ui_mainwindow.ui" line="433"/> <source>Home View</source> <translation>Ekran początkowy</translation> </message> <message> <location filename="ui_mainwindow.ui" line="445"/> <source>Pan</source> <translation>Przesuń</translation> </message> <message> <location filename="ui_mainwindow.ui" line="457"/> <source>Zoom In</source> <translation>Zbliż</translation> </message> <message> <location filename="ui_mainwindow.ui" line="469"/> <source>Zoom Out</source> <translation>Oddal</translation> </message> <message> <location filename="ui_mainwindow.ui" line="478"/> <source>Aerial Photo</source> <translation>Zdjęcie lotnicze</translation> </message> <message> <location filename="ui_mainwindow.ui" line="490"/> <source>Edit Tools</source> <translation>Narzędzia edycji</translation> </message> <message> <location filename="ui_mainwindow.ui" line="502"/> <source>Edit Attributes</source> <translation>Edytuj atrybuty</translation> </message> <message> <location filename="ui_mainwindow.ui" line="511"/> <source>Enable GPS</source> <translation>Włącz GPS</translation> </message> <message> <location filename="ui_mainwindow.ui" line="523"/> <source>Sync </source> <translation>Synchronizacja </translation> </message> <message> <location filename="ui_mainwindow.ui" line="526"/> <source>Sync </source> <translation>Synchronizacja </translation> </message> <message> <location filename="ui_mainwindow.ui" line="541"/> <source>Move Feature</source> <translation>Przesuń obiekt</translation> </message> <message> <location filename="ui_mainwindow.ui" line="553"/> <source>Capture at GPS</source> <translation>Przechwyć z GPS</translation> </message> <message> <location filename="ui_mainwindow.ui" line="568"/> <source>Data Entry</source> <translation>Wprowadzanie danych</translation> </message> <message> <location filename="ui_mainwindow.ui" line="583"/> <source>Select</source> <translation>Zaznacz</translation> </message> </context> <context> <name>MainWindowPy</name> <message> <location filename="mainwindow.py" line="391"/> <source>Capture</source> <translation>Przechwyć</translation> </message> <message> <location filename="mainwindow.py" line="450"/> <source>Form errors</source> <translation>Błędy formularza</translation> </message> <message> <location filename="mainwindow.py" line="450"/> <source>Looks like some forms couldn&apos;t be loaded</source> <translation>Wygląda na to, że niektóre formuły nie załadowały się</translation> </message> <message> <location filename="mainwindow.py" line="322"/> <source>Seems something has gone wrong. Press for more details</source> <translation>Coś poszło nie tak. Kliknij, by dowiedzieć się więcej</translation> </message> </context> <context> <name>MessageBarItems</name> <message> <location filename="messagebaritems.py" line="40"/> <source>Dismiss</source> <translation>Odrzuć</translation> </message> <message> <location filename="messagebaritems.py" line="52"/> <source>Oops</source> <translation>Ups</translation> </message> </context> <context> <name>apphelpwidget</name> <message> <location filename="ui_helppage.ui" line="14"/> <source>Form</source> <translation>Formularz</translation> </message> <message> <location filename="ui_helppage.ui" line="90"/> <source>about:blank</source> <translation>about:blank</translation> </message> </context> <context> <name>datatimerpicker</name> <message> <location filename="ui_datatimerpicker.ui" line="20"/> <source>Form</source> <translation>Formularz</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="76"/> <source>Set as current date and time</source> <translation>Ustaw jako obecną datę i czas</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="143"/> <source>1</source> <translation>1</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="148"/> <source>2</source> <translation>2</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="153"/> <source>3</source> <translation>3</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="158"/> <source>4</source> <translation>4</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="227"/> <source>5</source> <translation>5</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="168"/> <source>6</source> <translation>6</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="173"/> <source>7</source> <translation>7</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="178"/> <source>8</source> <translation>8</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="183"/> <source>9</source> <translation>9</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="232"/> <source>10</source> <translation>10</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="193"/> <source>11</source> <translation>11</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="198"/> <source>12</source> <translation>12</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="222"/> <source>0</source> <translation>0</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="237"/> <source>15</source> <translation>15</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="242"/> <source>20</source> <translation>20</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="247"/> <source>25</source> <translation>25</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="252"/> <source>30</source> <translation>30</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="257"/> <source>35</source> <translation>35</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="262"/> <source>40</source> <translation>40</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="267"/> <source>45</source> <translation>45</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="272"/> <source>50</source> <translation>50</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="277"/> <source>55</source> <translation>55</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="312"/> <source>AM</source> <translation>AM</translation> </message> <message> <location filename="ui_datatimerpicker.ui" line="334"/> <source>PM</source> <translation>PM</translation> </message> </context> <context> <name>imageviewer</name> <message> <location filename="ui_imageviewer.ui" line="14"/> <source>Form</source> <translation>Formularz</translation> </message> <message> <location filename="ui_imageviewer.ui" line="65"/> <source>Tap image to close</source> <translation>Dotknij obraz, by zamknąć</translation> </message> <message> <location filename="ui_imageviewer.ui" line="99"/> <source>TextLabel</source> <translation>TextLabel</translation> </message> </context> <context> <name>settingsWidget</name> <message> <location filename="ui_settings.ui" line="14"/> <source>Form</source> <translation>Formularz</translation> </message> <message> <location filename="ui_settings.ui" line="45"/> <source>Application</source> <translation>Applikacja</translation> </message> <message> <location filename="ui_settings.ui" line="54"/> <source>Fullscreen</source> <translation>Pełny ekran</translation> </message> <message> <location filename="ui_settings.ui" line="61"/> <source>Use popout keyboard</source> <translation>Użyj klawiatury ekranowej</translation> </message> <message> <location filename="ui_settings.ui" line="74"/> <source>GPS</source> <translation>GPS</translation> </message> <message> <location filename="ui_settings.ui" line="85"/> <source>Connect via</source> <translation>Połącz przez</translation> </message> <message> <location filename="ui_settings.ui" line="95"/> <source>Refresh List</source> <translation>Odśwież listę</translation> </message> <message> <location filename="ui_settings.ui" line="104"/> <source> Zoom to my location when GPS enabled</source> <translation> Zbliż do mojej lokalizacji, gdy GPS jest włączony</translation> </message> <message> <location filename="ui_settings.ui" line="130"/> <source>About</source> <translation>O programie</translation> </message> <message utf8="true"> <location filename="ui_settings.ui" line="164"/> <source>&lt;!DOCTYPE HTML PUBLIC &quot;-//W3C//DTD HTML 4.0//EN&quot; &quot;http://www.w3.org/TR/REC-html40/strict.dtd&quot;&gt; &lt;html&gt;&lt;head&gt;&lt;meta name=&quot;qrichtext&quot; content=&quot;1&quot; /&gt;&lt;style type=&quot;text/css&quot;&gt; p, li { white-space: pre-wrap; } &lt;/style&gt;&lt;/head&gt;&lt;body style=&quot; font-family:&apos;Segoe UI&apos;; font-weight:400; font-style:normal;&quot;&gt; &lt;p style=&quot; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;&quot;&gt;&lt;span style=&quot; font-family:&apos;MS Shell Dlg 2&apos;; font-size:8pt;&quot;&gt;Created By &lt;/span&gt;&lt;a href=&quot;mapsolutions.com.au&quot;&gt;&lt;span style=&quot; font-family:&apos;MS Shell Dlg 2&apos;; font-size:8pt; text-decoration: underline; color:#0000ff;&quot;&gt;DMS Australia&lt;/span&gt;&lt;/a&gt;&lt;/p&gt; &lt;p style=&quot; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;&quot;&gt;&lt;span style=&quot; font-family:&apos;MS Shell Dlg 2&apos;; font-size:8pt;&quot;&gt;© 2013 Digital Mapping Solutions&lt;/span&gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</source> <translation>&lt;!DOCTYPE HTML PUBLIC &quot;-//W3C//DTD HTML 4.0//EN&quot; &quot;http://www.w3.org/TR/REC-html40/strict.dtd&quot;&gt; &lt;html&gt;&lt;head&gt;&lt;meta name=&quot;qrichtext&quot; content=&quot;1&quot; /&gt;&lt;style type=&quot;text/css&quot;&gt; p, li { white-space: pre-wrap; } &lt;/style&gt;&lt;/head&gt;&lt;body style=&quot; font-family:&apos;Segoe UI&apos;; font-weight:400; font-style:normal;&quot;&gt; &lt;p style=&quot; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;&quot;&gt;&lt;span style=&quot; font-family:&apos;MS Shell Dlg 2&apos;; font-size:8pt;&quot;&gt;Stworzone przez &lt;/span&gt;&lt;a href=&quot;mapsolutions.com.au&quot;&gt;&lt;span style=&quot; font-family:&apos;MS Shell Dlg 2&apos;; font-size:8pt; text-decoration: underline; color:#0000ff;&quot;&gt;DMS Australia&lt;/span&gt;&lt;/a&gt;&lt;/p&gt; &lt;p style=&quot; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;&quot;&gt;&lt;span style=&quot; font-family:&apos;MS Shell Dlg 2&apos;; font-size:8pt;&quot;&gt;© 2013 Digital Mapping Solutions&lt;/span&gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</translation> </message> <message> <location filename="ui_settings.ui" line="184"/> <source>that&apos;s running on QGIS API:</source> <translation>Program wykorzystuje API QGIS:</translation> </message> <message> <location filename="ui_settings.ui" line="191"/> <source>{API}</source> <translation>{API}</translation> </message> <message> <location filename="ui_settings.ui" line="208"/> <source>You are running Roam version:</source> <translation>Używasz wersji Roam:</translation> </message> <message> <location filename="ui_settings.ui" line="218"/> <source>TextLabel</source> <translation>TextLabel</translation> </message> </context> </TS>
lydonchandra/Roam
src/roam/i18n/pl_PL.ts
TypeScript
gpl-2.0
25,990
/* * Translated default messages for the jQuery validation plugin. * Locale: DE (German, Deutsch) */ $.extend($.validator.messages, { required: "Dieses Feld ist ein Pflichtfeld.", maxlength: $.validator.format("Geben Sie bitte maximal {0} Zeichen ein."), minlength: $.validator.format("Geben Sie bitte mindestens {0} Zeichen ein."), rangelength: $.validator.format("Geben Sie bitte mindestens {0} und maximal {1} Zeichen ein."), email: "Geben Sie bitte eine gültige E-Mail Adresse ein.", url: "Geben Sie bitte eine gültige URL ein.", date: "Bitte geben Sie ein gültiges Datum ein.", number: "Geben Sie bitte eine Nummer ein.", digits: "Geben Sie bitte nur Ziffern ein.", equalTo: "Bitte denselben Wert wiederholen.", range: $.validator.format("Geben Sie bitte einen Wert zwischen {0} und {1} ein."), max: $.validator.format("Geben Sie bitte einen Wert kleiner oder gleich {0} ein."), min: $.validator.format("Geben Sie bitte einen Wert größer oder gleich {0} ein."), creditcard: "Geben Sie bitte eine gültige Kreditkarten-Nummer ein." });
acappellamaniac/eva_website
sites/all/modules/civicrm/bower_components/jquery-validation/src/localization/messages_de.js
JavaScript
gpl-2.0
1,082
/* * Copyright (c) 2013 Google Inc. * Copyright (C) 2015 Intel Corp. * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but without any warranty; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc. */ /* This file is derived from the flashrom project. */ #include <arch/io.h> #include <bootstate.h> #include <console/console.h> #include <delay.h> #include <device/pci_ids.h> #include <rules.h> #include <soc/lpc.h> #include <soc/pci_devs.h> #include <spi_flash.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #if ENV_SMM #define pci_read_config_byte(dev, reg, targ)\ *(targ) = pci_read_config8(dev, reg) #define pci_read_config_word(dev, reg, targ)\ *(targ) = pci_read_config16(dev, reg) #define pci_read_config_dword(dev, reg, targ)\ *(targ) = pci_read_config32(dev, reg) #define pci_write_config_byte(dev, reg, val)\ pci_write_config8(dev, reg, val) #define pci_write_config_word(dev, reg, val)\ pci_write_config16(dev, reg, val) #define pci_write_config_dword(dev, reg, val)\ pci_write_config32(dev, reg, val) #else /* ENV_SMM */ #include <device/device.h> #include <device/pci.h> #define pci_read_config_byte(dev, reg, targ)\ *(targ) = pci_read_config8(dev, reg) #define pci_read_config_word(dev, reg, targ)\ *(targ) = pci_read_config16(dev, reg) #define pci_read_config_dword(dev, reg, targ)\ *(targ) = pci_read_config32(dev, reg) #define pci_write_config_byte(dev, reg, val)\ pci_write_config8(dev, reg, val) #define pci_write_config_word(dev, reg, val)\ pci_write_config16(dev, reg, val) #define pci_write_config_dword(dev, reg, val)\ pci_write_config32(dev, reg, val) #endif /* ENV_SMM */ typedef struct spi_slave ich_spi_slave; static int ichspi_lock = 0; typedef struct ich9_spi_regs { uint32_t bfpr; uint16_t hsfs; uint16_t hsfc; uint32_t faddr; uint32_t _reserved0; uint32_t fdata[16]; uint32_t frap; uint32_t freg[5]; uint32_t _reserved1[3]; uint32_t pr[5]; uint32_t _reserved2[2]; uint8_t ssfs; uint8_t ssfc[3]; uint16_t preop; uint16_t optype; uint8_t opmenu[8]; } __attribute__((packed)) ich9_spi_regs; typedef struct ich_spi_controller { int locked; uint8_t *opmenu; int menubytes; uint16_t *preop; uint16_t *optype; uint32_t *addr; uint8_t *data; unsigned databytes; uint8_t *status; uint16_t *control; } ich_spi_controller; static ich_spi_controller cntlr; enum { SPIS_SCIP = 0x0001, SPIS_GRANT = 0x0002, SPIS_CDS = 0x0004, SPIS_FCERR = 0x0008, SSFS_AEL = 0x0010, SPIS_LOCK = 0x8000, SPIS_RESERVED_MASK = 0x7ff0, SSFS_RESERVED_MASK = 0x7fe2 }; enum { SPIC_SCGO = 0x000002, SPIC_ACS = 0x000004, SPIC_SPOP = 0x000008, SPIC_DBC = 0x003f00, SPIC_DS = 0x004000, SPIC_SME = 0x008000, SSFC_SCF_MASK = 0x070000, SSFC_RESERVED = 0xf80000 }; enum { HSFS_FDONE = 0x0001, HSFS_FCERR = 0x0002, HSFS_AEL = 0x0004, HSFS_BERASE_MASK = 0x0018, HSFS_BERASE_SHIFT = 3, HSFS_SCIP = 0x0020, HSFS_FDOPSS = 0x2000, HSFS_FDV = 0x4000, HSFS_FLOCKDN = 0x8000 }; enum { HSFC_FGO = 0x0001, HSFC_FCYCLE_MASK = 0x0006, HSFC_FCYCLE_SHIFT = 1, HSFC_FDBC_MASK = 0x3f00, HSFC_FDBC_SHIFT = 8, HSFC_FSMIE = 0x8000 }; enum { SPI_OPCODE_TYPE_READ_NO_ADDRESS = 0, SPI_OPCODE_TYPE_WRITE_NO_ADDRESS = 1, SPI_OPCODE_TYPE_READ_WITH_ADDRESS = 2, SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS = 3 }; #if IS_ENABLED(CONFIG_DEBUG_SPI_FLASH) static u8 readb_(void *addr) { u8 v = read8(addr); printk(BIOS_DEBUG, "0x%p --> 0x%2.2x\n", addr, v); return v; } static u16 readw_(void *addr) { u16 v = read16(addr); printk(BIOS_DEBUG, "0x%p --> 0x%4.4x\n", addr, v); return v; } static u32 readl_(void *addr) { u32 v = read32(addr); printk(BIOS_DEBUG, "0x%p --> 0x%8.8x\n", addr, v); return v; } static void writeb_(u8 b, void *addr) { printk(BIOS_DEBUG, "0x%p <-- 0x%2.2x\n", addr, b); write8(addr, b); } static void writew_(u16 b, void *addr) { printk(BIOS_DEBUG, "0x%p <-- 0x%4.4x\n", addr, b); write16(addr, b); } static void writel_(u32 b, void *addr) { printk(BIOS_DEBUG, "0x%p <-- 0x%8.8x\n", addr, b); write32(addr, b); } #else /* CONFIG_DEBUG_SPI_FLASH ^^^ enabled vvv NOT enabled */ #define readb_(a) read8(a) #define readw_(a) read16(a) #define readl_(a) read32(a) #define writeb_(val, addr) write8(addr, val) #define writew_(val, addr) write16(addr, val) #define writel_(val, addr) write32(addr, val) #endif /* CONFIG_DEBUG_SPI_FLASH ^^^ NOT enabled */ static void write_reg(const void *value, void *dest, uint32_t size) { const uint8_t *bvalue = value; uint8_t *bdest = dest; while (size >= 4) { writel_(*(const uint32_t *)bvalue, bdest); bdest += 4; bvalue += 4; size -= 4; } while (size) { writeb_(*bvalue, bdest); bdest++; bvalue++; size--; } } static void read_reg(void *src, void *value, uint32_t size) { uint8_t *bsrc = src; uint8_t *bvalue = value; while (size >= 4) { *(uint32_t *)bvalue = readl_(bsrc); bsrc += 4; bvalue += 4; size -= 4; } while (size) { *bvalue = readb_(bsrc); bsrc++; bvalue++; size--; } } struct spi_slave *spi_setup_slave(unsigned int bus, unsigned int cs) { ich_spi_slave *slave = malloc(sizeof(*slave)); if (!slave) { printk(BIOS_ERR, "ICH SPI: Bad allocation\n"); return NULL; } memset(slave, 0, sizeof(*slave)); slave->bus = bus; slave->cs = cs; return slave; } static ich9_spi_regs *spi_regs(void) { device_t dev; uint32_t sbase; #if ENV_SMM dev = PCI_DEV(0, LPC_DEV, LPC_FUNC); #else dev = dev_find_slot(0, PCI_DEVFN(LPC_DEV, LPC_FUNC)); #endif if (!dev) { printk(BIOS_ERR, "%s: PCI device not found", __func__); return NULL; } pci_read_config_dword(dev, SBASE, &sbase); sbase &= ~0x1ff; return (void *)sbase; } void spi_init(void) { ich9_spi_regs *ich9_spi; ich9_spi = spi_regs(); if (!ich9_spi) { printk(BIOS_ERR, "Not initialising spi as %s returned NULL\n", __func__); return; } ichspi_lock = readw_(&ich9_spi->hsfs) & HSFS_FLOCKDN; cntlr.opmenu = ich9_spi->opmenu; cntlr.menubytes = sizeof(ich9_spi->opmenu); cntlr.optype = &ich9_spi->optype; cntlr.addr = &ich9_spi->faddr; cntlr.data = (uint8_t *)ich9_spi->fdata; cntlr.databytes = sizeof(ich9_spi->fdata); cntlr.status = &ich9_spi->ssfs; cntlr.control = (uint16_t *)ich9_spi->ssfc; cntlr.preop = &ich9_spi->preop; } #if ENV_RAMSTAGE static void spi_init_cb(void *unused) { spi_init(); } BOOT_STATE_INIT_ENTRY(BS_DEV_INIT, BS_ON_ENTRY, spi_init_cb, NULL); #endif /* ENV_RAMSTAGE */ int spi_claim_bus(struct spi_slave *slave) { /* Handled by ICH automatically. */ return 0; } void spi_release_bus(struct spi_slave *slave) { /* Handled by ICH automatically. */ } typedef struct spi_transaction { const uint8_t *out; uint32_t bytesout; uint8_t *in; uint32_t bytesin; uint8_t type; uint8_t opcode; uint32_t offset; } spi_transaction; static inline void spi_use_out(spi_transaction *trans, unsigned bytes) { trans->out += bytes; trans->bytesout -= bytes; } static inline void spi_use_in(spi_transaction *trans, unsigned bytes) { trans->in += bytes; trans->bytesin -= bytes; } static void spi_setup_type(spi_transaction *trans) { trans->type = 0xFF; /* Try to guess spi type from read/write sizes. */ if (trans->bytesin == 0) { if (trans->bytesout > 4) /* * If bytesin = 0 and bytesout > 4, we presume this is * a write data operation, which is accompanied by an * address. */ trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS; else trans->type = SPI_OPCODE_TYPE_WRITE_NO_ADDRESS; return; } if (trans->bytesout == 1) { /* and bytesin is > 0 */ trans->type = SPI_OPCODE_TYPE_READ_NO_ADDRESS; return; } if (trans->bytesout == 4) { /* and bytesin is > 0 */ trans->type = SPI_OPCODE_TYPE_READ_WITH_ADDRESS; } /* Fast read command is called with 5 bytes instead of 4 */ if (trans->out[0] == SPI_OPCODE_FAST_READ && trans->bytesout == 5) { trans->type = SPI_OPCODE_TYPE_READ_WITH_ADDRESS; --trans->bytesout; } } static int spi_setup_opcode(spi_transaction *trans) { uint16_t optypes; uint8_t opmenu[cntlr.menubytes]; trans->opcode = trans->out[0]; spi_use_out(trans, 1); if (!ichspi_lock) { /* The lock is off, so just use index 0. */ writeb_(trans->opcode, cntlr.opmenu); optypes = readw_(cntlr.optype); optypes = (optypes & 0xfffc) | (trans->type & 0x3); writew_(optypes, cntlr.optype); return 0; } else { /* The lock is on. See if what we need is on the menu. */ uint8_t optype; uint16_t opcode_index; /* Write Enable is handled as atomic prefix */ if (trans->opcode == SPI_OPCODE_WREN) return 0; read_reg(cntlr.opmenu, opmenu, sizeof(opmenu)); for (opcode_index = 0; opcode_index < cntlr.menubytes; opcode_index++) { if (opmenu[opcode_index] == trans->opcode) break; } if (opcode_index == cntlr.menubytes) { printk(BIOS_DEBUG, "ICH SPI: Opcode %x not found\n", trans->opcode); return -1; } optypes = readw_(cntlr.optype); optype = (optypes >> (opcode_index * 2)) & 0x3; if (trans->type == SPI_OPCODE_TYPE_WRITE_NO_ADDRESS && optype == SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS && trans->bytesout >= 3) { /* We guessed wrong earlier. Fix it up. */ trans->type = optype; } if (optype != trans->type) { printk(BIOS_DEBUG, "ICH SPI: Transaction doesn't fit type %d\n", optype); return -1; } return opcode_index; } } static int spi_setup_offset(spi_transaction *trans) { /* Separate the SPI address and data. */ switch (trans->type) { case SPI_OPCODE_TYPE_READ_NO_ADDRESS: case SPI_OPCODE_TYPE_WRITE_NO_ADDRESS: return 0; case SPI_OPCODE_TYPE_READ_WITH_ADDRESS: case SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS: trans->offset = ((uint32_t)trans->out[0] << 16) | ((uint32_t)trans->out[1] << 8) | ((uint32_t)trans->out[2] << 0); spi_use_out(trans, 3); return 1; default: printk(BIOS_DEBUG, "Unrecognized SPI transaction type %#x\n", trans->type); return -1; } } /* * Wait for up to 400ms til status register bit(s) turn 1 (in case wait_til_set * below is True) or 0. In case the wait was for the bit(s) to set - write * those bits back, which would cause resetting them. * * Return the last read status value on success or -1 on failure. */ static int ich_status_poll(u16 bitmask, int wait_til_set) { int timeout = 40000; /* This will result in 400 ms */ u16 status = 0; wait_til_set &= 1; while (timeout--) { status = readw_(cntlr.status); if (wait_til_set ^ ((status & bitmask) == 0)) { if (wait_til_set) writew_((status & bitmask), cntlr.status); return status; } udelay(10); } printk(BIOS_ERR, "ICH SPI: SCIP timeout, read %x, expected %x\n", status, bitmask); return -1; } unsigned int spi_crop_chunk(unsigned int cmd_len, unsigned int buf_len) { return min(cntlr.databytes, buf_len); } int spi_xfer(struct spi_slave *slave, const void *dout, unsigned int bytesout, void *din, unsigned int bytesin) { uint16_t control; int16_t opcode_index; int with_address; int status; spi_transaction trans = { dout, bytesout, din, bytesin, 0xff, 0xff, 0 }; /* There has to always at least be an opcode. */ if (!bytesout || !dout) { printk(BIOS_DEBUG, "ICH SPI: No opcode for transfer\n"); return -1; } /* Make sure if we read something we have a place to put it. */ if (bytesin != 0 && !din) { printk(BIOS_DEBUG, "ICH SPI: Read but no target buffer\n"); return -1; } if (ich_status_poll(SPIS_SCIP, 0) == -1) return -1; writew_(SPIS_CDS | SPIS_FCERR, cntlr.status); spi_setup_type(&trans); opcode_index = spi_setup_opcode(&trans); if (opcode_index < 0) return -1; with_address = spi_setup_offset(&trans); if (with_address < 0) return -1; if (trans.opcode == SPI_OPCODE_WREN) { /* * Treat Write Enable as Atomic Pre-Op if possible * in order to prevent the Management Engine from * issuing a transaction between WREN and DATA. */ if (!ichspi_lock) writew_(trans.opcode, cntlr.preop); return 0; } /* Preset control fields */ control = SPIC_SCGO | ((opcode_index & 0x07) << 4); /* Issue atomic preop cycle if needed */ if (readw_(cntlr.preop)) control |= SPIC_ACS; if (!trans.bytesout && !trans.bytesin) { /* SPI addresses are 24 bit only */ if (with_address) writel_(trans.offset & 0x00FFFFFF, cntlr.addr); /* * This is a 'no data' command (like Write Enable), its * bytesout size was 1, decremented to zero while executing * spi_setup_opcode() above. Tell the chip to send the * command. */ writew_(control, cntlr.control); /* wait for the result */ status = ich_status_poll(SPIS_CDS | SPIS_FCERR, 1); if (status == -1) return -1; if (status & SPIS_FCERR) { printk(BIOS_ERR, "ICH SPI: Command transaction error\n"); return -1; } return 0; } /* * Check if this is a write command attempting to transfer more bytes * than the controller can handle. Iterations for writes are not * supported here because each SPI write command needs to be preceded * and followed by other SPI commands, and this sequence is controlled * by the SPI chip driver. */ if (trans.bytesout > cntlr.databytes) { printk(BIOS_DEBUG, "ICH SPI: Too much to write. Does your SPI chip driver use" " CONTROLLER_PAGE_LIMIT?\n"); return -1; } /* * Read or write up to databytes bytes at a time until everything has * been sent. */ while (trans.bytesout || trans.bytesin) { uint32_t data_length; /* SPI addresses are 24 bit only */ writel_(trans.offset & 0x00FFFFFF, cntlr.addr); if (trans.bytesout) data_length = min(trans.bytesout, cntlr.databytes); else data_length = min(trans.bytesin, cntlr.databytes); /* Program data into FDATA0 to N */ if (trans.bytesout) { write_reg(trans.out, cntlr.data, data_length); spi_use_out(&trans, data_length); if (with_address) trans.offset += data_length; } /* Add proper control fields' values */ control &= ~((cntlr.databytes - 1) << 8); control |= SPIC_DS; control |= (data_length - 1) << 8; /* write it */ writew_(control, cntlr.control); /* Wait for Cycle Done Status or Flash Cycle Error. */ status = ich_status_poll(SPIS_CDS | SPIS_FCERR, 1); if (status == -1) return -1; if (status & SPIS_FCERR) { printk(BIOS_ERR, "ICH SPI: Data transaction error\n"); return -1; } if (trans.bytesin) { read_reg(cntlr.data, trans.in, data_length); spi_use_in(&trans, data_length); if (with_address) trans.offset += data_length; } } /* Clear atomic preop now that xfer is done */ writew_(0, cntlr.preop); return 0; }
mytbk/coreboot-cubietruck
src/soc/intel/braswell/spi.c
C
gpl-2.0
15,100
##### # FCKeditor - The text editor for Internet - http://www.fckeditor.net # Copyright (C) 2003-2007 Frederico Caldeira Knabben # # == BEGIN LICENSE == # # Licensed under the terms of any of the following licenses at your # choice: # # - GNU General Public License Version 2 or later (the "GPL") # http://www.gnu.org/licenses/gpl.html # # - GNU Lesser General Public License Version 2.1 or later (the "LGPL") # http://www.gnu.org/licenses/lgpl.html # # - Mozilla Public License Version 1.1 or later (the "MPL") # http://www.mozilla.org/MPL/MPL-1.1.html # # == END LICENSE == # # This is the File Manager Connector for Perl. ##### sub RemoveFromStart { local($sourceString, $charToRemove) = @_; $sPattern = '^' . $charToRemove . '+' ; $sourceString =~ s/^$charToRemove+//g; return $sourceString; } sub RemoveFromEnd { local($sourceString, $charToRemove) = @_; $sPattern = $charToRemove . '+$' ; $sourceString =~ s/$charToRemove+$//g; return $sourceString; } sub ConvertToXmlAttribute { local($value) = @_; return $value; # return utf8_encode(htmlspecialchars($value)); } sub specialchar_cnv { local($ch) = @_; $ch =~ s/&/&amp;/g; # & $ch =~ s/\"/&quot;/g; #" $ch =~ s/\'/&#39;/g; # ' $ch =~ s/</&lt;/g; # < $ch =~ s/>/&gt;/g; # > return($ch); } sub JS_cnv { local($ch) = @_; $ch =~ s/\"/\\\"/g; #" return($ch); } 1;
digital-one/kempston_parkes
build/assets/httpdocs/admin_/classes/components/formattedTextArea/fckeditor/editor/filemanager/connectors/perl/util.pl
Perl
gpl-2.0
1,373
/* * kernel/ksysfs.c - sysfs attributes in /sys/kernel, which * are not related to any other subsystem * * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org> * * This file is release under the GPLv2 * */ #include <linux/kobject.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/export.h> #include <linux/init.h> #include <linux/kexec.h> #include <linux/profile.h> #include <linux/stat.h> #include <linux/sched.h> #include <linux/capability.h> #define KERNEL_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) #define KERNEL_ATTR_RW(_name) \ static struct kobj_attribute _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) #define KERNEL_ATTR_READ_ONLY(_name) \ static struct kobj_attribute _name##_attr = \ __ATTR(_name, 0444, _name##_show, _name##_store) #if defined(CONFIG_HOTPLUG) /* current uevent sequence number */ static ssize_t uevent_seqnum_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%llu\n", (unsigned long long)uevent_seqnum); } KERNEL_ATTR_RO(uevent_seqnum); /* uevent helper program, used during early boot */ static ssize_t uevent_helper_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%s\n", uevent_helper); } static ssize_t uevent_helper_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { if (count+1 > UEVENT_HELPER_PATH_LEN) return -ENOENT; memcpy(uevent_helper, buf, count); uevent_helper[count] = '\0'; if (count && uevent_helper[count-1] == '\n') uevent_helper[count-1] = '\0'; return count; } KERNEL_ATTR_RW(uevent_helper); #endif #ifdef CONFIG_PROFILING static ssize_t profiling_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", prof_on); } static ssize_t profiling_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret; if (prof_on) return -EEXIST; /* * This eventually calls into get_option() which * has a ton of callers and is not const. It is * easiest to cast it away here. */ profile_setup((char *)buf); ret = profile_init(); if (ret) return ret; ret = create_proc_profile(); if (ret) return ret; return count; } KERNEL_ATTR_RW(profiling); #endif #ifdef CONFIG_KEXEC static ssize_t kexec_loaded_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", !!kexec_image); } KERNEL_ATTR_RO(kexec_loaded); static ssize_t kexec_crash_loaded_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", !!kexec_crash_image); } KERNEL_ATTR_RO(kexec_crash_loaded); static ssize_t kexec_crash_size_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%zu\n", crash_get_memory_size()); } static ssize_t kexec_crash_size_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long cnt; int ret; if (strict_strtoul(buf, 0, &cnt)) return -EINVAL; ret = crash_shrink_memory(cnt); return ret < 0 ? ret : count; } KERNEL_ATTR_RW(kexec_crash_size); static ssize_t vmcoreinfo_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%lx %x\n", paddr_vmcoreinfo_note(), (unsigned int)vmcoreinfo_max_size); } KERNEL_ATTR_RO(vmcoreinfo); #endif /* CONFIG_KEXEC */ /* whether file capabilities are enabled */ static ssize_t fscaps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", file_caps_enabled); } KERNEL_ATTR_RO(fscaps); /* * Make /sys/kernel/notes give the raw contents of our kernel .notes section. */ extern const void __start_notes __attribute__((weak)); extern const void __stop_notes __attribute__((weak)); #define notes_size (&__stop_notes - &__start_notes) static ssize_t notes_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { memcpy(buf, &__start_notes + off, count); return count; } static struct bin_attribute notes_attr = { .attr = { .name = "notes", .mode = S_IRUGO, }, .read = &notes_read, }; struct kobject *kernel_kobj; EXPORT_SYMBOL_GPL(kernel_kobj); static struct attribute * kernel_attrs[] = { &fscaps_attr.attr, #if defined(CONFIG_HOTPLUG) &uevent_seqnum_attr.attr, &uevent_helper_attr.attr, #endif #ifdef CONFIG_PROFILING &profiling_attr.attr, #endif #ifdef CONFIG_KEXEC &kexec_loaded_attr.attr, &kexec_crash_loaded_attr.attr, &kexec_crash_size_attr.attr, &vmcoreinfo_attr.attr, #endif NULL }; static struct attribute_group kernel_attr_group = { .attrs = kernel_attrs, }; static unsigned int Lgentle_fair_sleepers = 0; static unsigned int Larch_power = 0; extern void relay_gfs(unsigned int gfs); extern void relay_ap(unsigned int ap); static ssize_t gentle_fair_sleepers_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", Lgentle_fair_sleepers); } static ssize_t gentle_fair_sleepers_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (input != 0 && input != 1) input = 0; Lgentle_fair_sleepers = input; relay_gfs(Lgentle_fair_sleepers); return count; } KERNEL_ATTR_RW(gentle_fair_sleepers); static ssize_t arch_power_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", Larch_power); } static ssize_t arch_power_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (input != 0 && input != 1) input = 0; Larch_power = input; relay_ap(Larch_power); return count; } KERNEL_ATTR_RW(arch_power); static struct attribute * sched_features_attrs[] = { &gentle_fair_sleepers_attr.attr, &arch_power_attr.attr, NULL }; static struct attribute_group sched_features_attr_group = { .attrs = sched_features_attrs, }; /* Initialize fast charge sysfs folder */ static struct kobject *sched_features_kobj; static int __init ksysfs_init(void) { int error; kernel_kobj = kobject_create_and_add("kernel", NULL); if (!kernel_kobj) { error = -ENOMEM; goto exit; } error = sysfs_create_group(kernel_kobj, &kernel_attr_group); if (error) goto kset_exit; sched_features_kobj = kobject_create_and_add("sched", kernel_kobj); error = sysfs_create_group(sched_features_kobj, &sched_features_attr_group); if (error) kobject_put(sched_features_kobj); if (notes_size > 0) { notes_attr.size = notes_size; error = sysfs_create_bin_file(kernel_kobj, &notes_attr); if (error) goto group_exit; } return 0; group_exit: sysfs_remove_group(kernel_kobj, &kernel_attr_group); kset_exit: kobject_put(kernel_kobj); exit: return error; } core_initcall(ksysfs_init);
willizambrano01/Evolution_victara
kernel/ksysfs.c
C
gpl-2.0
7,055
/******************************************************************************* Copyright (C) Marvell International Ltd. and its affiliates This software file (the "File") is owned and distributed by Marvell International Ltd. and/or its affiliates ("Marvell") under the following alternative licensing terms. Once you have made an election to distribute the File under one of the following license alternatives, please (i) delete this introductory statement regarding license alternatives, (ii) delete the two license alternatives that you have not elected to use and (iii) preserve the Marvell copyright notice above. ******************************************************************************** Marvell GPL License Option If you received this File from Marvell, you may opt to use, redistribute and/or modify this File in accordance with the terms and conditions of the General Public License Version 2, June 1991 (the "GPL License"), a copy of which is available along with the File in the license.txt file or by writing to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or on the worldwide web at http://www.gnu.org/licenses/gpl.txt. THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY DISCLAIMED. The GPL License provides additional details about this warranty disclaimer. *******************************************************************************/ /******************************************************************************* * file_name - mvLinuxIalHt.c * * DESCRIPTION: implementation for Linux IAL. * * DEPENDENCIES: * mvLinuxIalHt.h * mvLinuxIalLib.h * Linux Os header files * Core driver header files * * *******************************************************************************/ /* includes */ #ifndef LINUX_VERSION_CODE #include <linux/version.h> #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) #error "This driver works only with kernel 2.4.0 or higher!" #endif #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))) #error "This driver does not support kernel 2.5!" #endif #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/kdev_t.h> #include <linux/hdreg.h> #ifdef CONFIG_MV_INCLUDE_INTEG_SATA #include "ctrlEnv/mvCtrlEnvLib.h" #include "mvSysSataApi.h" #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) #include <linux/blk.h> #include "scsi.h" #include "hosts.h" #else #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #endif #include <linux/timer.h> #include <linux/spinlock.h> #include <asm/dma.h> #include <asm/system.h> #include <asm/io.h> #include "mvLinuxIalHt.h" #include "mvRegs.h" #include "mvIALCommon.h" #include "mvLinuxIalSmart.h" extern Scsi_Host_Template driver_template; static int ncq_disable = 0; static int pm_ncq_disable = 0; static void mv_ial_init_log(void); static char mv_ial_proc_version[]="Version_1_1"; extern void release_ata_mem(struct mv_comp_info * pInfo); extern MV_BOOLEAN IALCompletion(struct mvSataAdapter *pSataAdapter, MV_SATA_SCSI_CMD_BLOCK *pCmdBlock); static struct pci_device_id mvSata_pci_table[] = { {MV_SATA_VENDOR_ID, MV_SATA_DEVICE_ID_5080, PCI_ANY_ID, PCI_ANY_ID, 0, 0}, {MV_SATA_VENDOR_ID, MV_SATA_DEVICE_ID_5081, PCI_ANY_ID, PCI_ANY_ID, 0, 0}, {MV_SATA_VENDOR_ID, MV_SATA_DEVICE_ID_5040, PCI_ANY_ID, PCI_ANY_ID, 0, 0}, {MV_SATA_VENDOR_ID, MV_SATA_DEVICE_ID_5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0}, {MV_SATA_VENDOR_ID, MV_SATA_DEVICE_ID_6081, PCI_ANY_ID, PCI_ANY_ID, 0, 0}, {MV_SATA_VENDOR_ID, MV_SATA_DEVICE_ID_6041, PCI_ANY_ID, PCI_ANY_ID, 0, 0}, {MV_SATA_VENDOR_ID, MV_SATA_DEVICE_ID_6042, PCI_ANY_ID, PCI_ANY_ID, 0, 0}, {MV_SATA_VENDOR_ID, MV_SATA_DEVICE_ID_7042, PCI_ANY_ID, PCI_ANY_ID, 0, 0}, {0,} }; int adapterId = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) #ifndef __devexit_p #define __devexit_p(x) x #endif static void mv_ial_ht_select_queue_depths (struct Scsi_Host* pHost, struct scsi_device* pDevs); static inline struct Scsi_Host *scsi_host_alloc(Scsi_Host_Template *t, size_t s) { return scsi_register(t, s); } static inline void scsi_host_put(struct Scsi_Host *h) { scsi_unregister(h); } #define scsi_scan_host(x...) #define scsi_remove_host(x...) #else static int mv_ial_ht_slave_configure (struct scsi_device* pDevs); static int __devinit mv_ial_probe_device(struct pci_dev *pci_dev, const struct pci_device_id *ent); static void __devexit mv_ial_remove_device(struct pci_dev *pci_dev); MODULE_DEVICE_TABLE(pci, mvSata_pci_table); static char mv_hot_plug_name[] = "mvSata"; static struct pci_driver mv_ial_pci_driver = { .name = mv_hot_plug_name, .id_table = mvSata_pci_table, .probe = mv_ial_probe_device, .remove = __devexit_p(mv_ial_remove_device), }; #ifdef CONFIG_MV_INCLUDE_INTEG_SATA static int __devinit mv_ial_init_soc_sata(void); #endif IAL_ADAPTER_T *pSocAdapter = NULL; static int __init mv_ial_init(void) { mv_ial_init_log(); mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "mvSata init.\n"); driver_template.module = THIS_MODULE; if (ncq_disable) disableNCQ = MV_TRUE; if (pm_ncq_disable) disablePM_NCQ = MV_TRUE; #ifdef CONFIG_MV_INCLUDE_INTEG_SATA if (MV_FALSE == mvCtrlPwrClckGet(SATA_UNIT_ID, 0)) { printk("\nWarning Sata is Powered Off\n"); } else { printk("Integrated Sata device found\n"); mv_ial_init_soc_sata(); } #endif return (int)pci_register_driver(&mv_ial_pci_driver); } static void __exit mv_ial_exit(void) { #ifdef CONFIG_MV_INCLUDE_INTEG_SATA mv_ial_remove_device(NULL); #endif pci_unregister_driver(&mv_ial_pci_driver); mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "mvSata exit.\n"); } module_init(mv_ial_init); module_exit(mv_ial_exit); #endif static void mv_ial_init_log(void) { #ifdef MV_LOGGER char *szModules[] = {"Core Driver", "SAL", "Common IAL", "Linux IAL" }; #if defined (MV_LOG_DEBUG) mvLogRegisterModule(MV_CORE_DRIVER_LOG_ID, MV_DEBUG_ENABLE_ALL, szModules[MV_CORE_DRIVER_LOG_ID]); mvLogRegisterModule(MV_SAL_LOG_ID, MV_DEBUG_ENABLE_ALL, szModules[MV_SAL_LOG_ID]); mvLogRegisterModule(MV_IAL_COMMON_LOG_ID, MV_DEBUG_ENABLE_ALL, szModules[MV_IAL_COMMON_LOG_ID]); mvLogRegisterModule(MV_IAL_LOG_ID, MV_DEBUG_ENABLE_ALL, szModules[MV_IAL_LOG_ID]); #elif defined (MV_LOG_ERROR) mvLogRegisterModule(MV_CORE_DRIVER_LOG_ID, MV_DEBUG_FATAL_ERROR | MV_DEBUG_ERROR | MV_DEBUG_INFO, szModules[MV_CORE_DRIVER_LOG_ID]); mvLogRegisterModule(MV_SAL_LOG_ID, MV_DEBUG_FATAL_ERROR | MV_DEBUG_ERROR | MV_DEBUG_INFO, szModules[MV_SAL_LOG_ID]); mvLogRegisterModule(MV_IAL_COMMON_LOG_ID, MV_DEBUG_FATAL_ERROR | MV_DEBUG_ERROR | MV_DEBUG_INFO, szModules[MV_IAL_COMMON_LOG_ID]); mvLogRegisterModule(MV_IAL_LOG_ID, MV_DEBUG_FATAL_ERROR | MV_DEBUG_ERROR | MV_DEBUG_INFO, szModules[MV_IAL_LOG_ID]); #endif #endif } /**************************************************************** * Name: set_device_regs * * Description: initialize the device registers. * * Parameters: pMvSataAdapter, pointer to the Device data structure. * pcidev, pointer to the pci device data structure. * * Returns: =0 ->success, < 0 ->failure. * ****************************************************************/ static int set_device_regs(MV_SATA_ADAPTER *pMvSataAdapter, struct pci_dev *pcidev) { pMvSataAdapter->intCoalThre[0]= MV_IAL_HT_SACOALT_DEFAULT; pMvSataAdapter->intCoalThre[1]= MV_IAL_HT_SACOALT_DEFAULT; pMvSataAdapter->intTimeThre[0] = MV_IAL_HT_SAITMTH_DEFAULT; pMvSataAdapter->intTimeThre[1] = MV_IAL_HT_SAITMTH_DEFAULT; pMvSataAdapter->pciCommand = MV_PCI_COMMAND_REG_DEFAULT; pMvSataAdapter->pciSerrMask = MV_PCI_SERR_MASK_REG_ENABLE_ALL; pMvSataAdapter->pciInterruptMask = MV_PCI_INTERRUPT_MASK_REG_ENABLE_ALL; pMvSataAdapter->mvSataEventNotify = mv_ial_lib_event_notify; return 0; } static int mv_ial_get_num_of_ports(const struct pci_device_id *id) { switch(id->device) { case MV_SATA_DEVICE_ID_5080: case MV_SATA_DEVICE_ID_5081: case MV_SATA_DEVICE_ID_6081: return 8; case MV_SATA_DEVICE_ID_5040: case MV_SATA_DEVICE_ID_5041: case MV_SATA_DEVICE_ID_6041: case MV_SATA_DEVICE_ID_6042: case MV_SATA_DEVICE_ID_7042: return 4; default: mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_FATAL_ERROR, "getMaxNumberOfPorts() Unknown device ID.\n"); return 0; } } static void mv_ial_free_scsi_hosts(IAL_ADAPTER_T *pAdapter, MV_BOOLEAN freeAdapter) { int i; for (i = 0; i < pAdapter->maxHosts; i++) { if (pAdapter->host[i] != NULL) { mv_ial_lib_prd_destroy(pAdapter->host[i]); scsi_host_put(pAdapter->host[i]->scsihost); pAdapter->host[i] = NULL; } } pAdapter->activeHosts = 0; if (MV_TRUE == freeAdapter) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "[%d] freeing Adapter data structure.\n", pAdapter->mvSataAdapter.adapterId); kfree(pAdapter); } } static int __devinit mv_ial_probe_device(struct pci_dev *pcidev, const struct pci_device_id *id) { MV_SATA_ADAPTER *pMvSataAdapter; IAL_ADAPTER_T *pAdapter; MV_U8 i; pci_set_drvdata(pcidev, NULL); if (pci_enable_device(pcidev)) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "pci_enable_device() failed\n"); return -ENODEV; } pci_set_master(pcidev); if (0 == pci_set_dma_mask(pcidev, 0xffffffffffffffffULL)) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG,"using 64-bit DMA.\n"); } else if (0 == pci_set_dma_mask(pcidev, 0xffffffffUL)) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "using 32-bit DMA.\n"); } else { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "DMA 32-bit not supported" " in the system\n"); pci_disable_device(pcidev); return -ENODEV; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) if (pci_request_regions(pcidev, mv_hot_plug_name) != 0) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "pci_request_regions() failed\n"); pci_disable_device(pcidev); return -ENOMEM; } #endif pAdapter = (IAL_ADAPTER_T*)kmalloc(sizeof(IAL_ADAPTER_T), GFP_ATOMIC); if (pAdapter == NULL) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "IAL Adapter allocation failed\n"); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) pci_release_regions(pcidev); #endif pci_disable_device(pcidev); return -ENOMEM; } memset(pAdapter, 0, sizeof(IAL_ADAPTER_T)); pAdapter->activeHosts = 0; pAdapter->maxHosts = mv_ial_get_num_of_ports(id); if (pAdapter->maxHosts == 0) { mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "mv_ial_get_num_of_ports() failed\n"); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) pci_release_regions(pcidev); #endif pci_disable_device(pcidev); return -ENOMEM; } for (i = 0; i < pAdapter->maxHosts; i++) { struct Scsi_Host *pshost = scsi_host_alloc(&driver_template, sizeof(IAL_HOST_T)); if (pshost == NULL) { mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "Scsi_Host allocation failed\n"); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) pci_release_regions(pcidev); #endif pci_disable_device(pcidev); return -ENOMEM; } pAdapter->host[i] = HOSTDATA(pshost); memset(pAdapter->host[i], 0, sizeof(IAL_HOST_T)); pAdapter->host[i]->scsihost = pshost; pAdapter->host[i]->pAdapter = pAdapter; pAdapter->host[i]->channelIndex = (MV_U8)i; pAdapter->activeHosts |= (1 << i); } pAdapter->pcidev = pcidev; pMvSataAdapter = &(pAdapter->mvSataAdapter); pMvSataAdapter->IALData = pAdapter; spin_lock_init (&pAdapter->adapter_lock); for (i = 0; i < pAdapter->maxHosts; i++) { pAdapter->host[i]->scsi_cmnd_done_head = NULL; pAdapter->host[i]->scsi_cmnd_done_tail = NULL; } pAdapter->host[0]->scsihost->base = pci_resource_start(pcidev, 0); for (i = 1; i < pAdapter->maxHosts; i++) { if (pAdapter->host[i] != NULL) pAdapter->host[i]->scsihost->base = pAdapter->host[0]->scsihost->base; } pMvSataAdapter->adapterIoBaseAddress = (MV_BUS_ADDR_T)ioremap(pAdapter->host[0]->scsihost->base, pci_resource_len(pcidev, 0)); if (!pMvSataAdapter->adapterIoBaseAddress) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "Failed to remap memory io spcae\n"); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) pci_release_regions(pcidev); #endif pci_disable_device(pcidev); return -ENOMEM; } else { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "io base address 0x%08lx\n", (ulong)pMvSataAdapter->adapterIoBaseAddress); } pMvSataAdapter->adapterId = adapterId++; /* get the revision ID */ if (pci_read_config_byte(pcidev, PCI_REVISION_ID, &pAdapter->rev_id)) { printk(KERN_WARNING "mvSata: Failed to get revision id.\n"); iounmap(pMvSataAdapter->adapterIoBaseAddress); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) pci_release_regions(pcidev); #endif pci_disable_device(pcidev); return -ENOMEM; } pMvSataAdapter->pciConfigRevisionId = pAdapter->rev_id; pMvSataAdapter->pciConfigDeviceId = id->device; if (set_device_regs(pMvSataAdapter, pcidev)) { iounmap(pMvSataAdapter->adapterIoBaseAddress); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) pci_release_regions(pcidev); #endif pci_disable_device(pcidev); return -ENOMEM; } /*Do not allow hotplug handler to work*/ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) init_MUTEX(&pAdapter->rescan_mutex); atomic_set(&pAdapter->stopped, 1); #endif if (mvSataInitAdapter(pMvSataAdapter) == MV_FALSE) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "[%d]: core failed to initialize the adapter\n", pMvSataAdapter->adapterId); iounmap(pMvSataAdapter->adapterIoBaseAddress); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) pci_release_regions(pcidev); #endif pci_disable_device(pcidev); return -ENOMEM; } if (mv_ial_lib_allocate_edma_queues(pAdapter)) { mvLogMsg(MV_IAL_LOG_ID,MV_DEBUG_ERROR, "Failed to allocate memory for EDMA queues\n"); iounmap(pMvSataAdapter->adapterIoBaseAddress); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) pci_release_regions(pcidev); #endif pci_disable_device(pcidev); return -ENOMEM; } for (i = 0; i < pAdapter->maxHosts; i++) { if ((pAdapter->activeHosts & (1 << i)) == 0) { continue; } if (mv_ial_lib_prd_init(pAdapter->host[i])) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "Failed to init PRD memory manager - host %d\n", i); iounmap(pMvSataAdapter->adapterIoBaseAddress); mv_ial_lib_free_edma_queues(pAdapter); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) pci_release_regions(pcidev); #endif pci_disable_device(pcidev); return -ENOMEM; } } pAdapter->ataScsiAdapterExt = (MV_SAL_ADAPTER_EXTENSION*)kmalloc(sizeof(MV_SAL_ADAPTER_EXTENSION), GFP_ATOMIC); if (pAdapter->ataScsiAdapterExt == NULL) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR,"[%d]: out of memory, failed to allocate MV_SAL_ADAPTER_EXTENSION\n", pAdapter->mvSataAdapter.adapterId); iounmap(pMvSataAdapter->adapterIoBaseAddress); mv_ial_lib_free_edma_queues(pAdapter); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) pci_release_regions(pcidev); #endif pci_disable_device(pcidev); return -ENOMEM; } mvSataScsiInitAdapterExt(pAdapter->ataScsiAdapterExt, pMvSataAdapter); /* let SAL report only the BUS RESET UA event*/ pAdapter->ataScsiAdapterExt->UAMask = MV_BIT0; /* enable device interrupts even if no storage devices connected now*/ #ifdef MV_SUPPORT_MSI { int err; if ((err = pci_enable_msi(pcidev))) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "[%d]: Unable to allocate MSI interrupt Error: %d\n", pMvSataAdapter->adapterId, err); } } #endif if (request_irq(pcidev->irq, mv_ial_lib_int_handler, (IRQF_DISABLED | IRQF_SAMPLE_RANDOM | IRQF_SHARED), "mvSata", pAdapter) < 0) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "[%d]: unable to allocate IRQ for controler\n", pMvSataAdapter->adapterId); #ifdef MV_SUPPORT_MSI pci_disable_msi(pAdapter->pcidev); #endif kfree(pAdapter->ataScsiAdapterExt); iounmap(pMvSataAdapter->adapterIoBaseAddress); mv_ial_lib_free_edma_queues(pAdapter); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) pci_release_regions(pcidev); #endif pci_disable_device(pcidev); return -ENOMEM; } for (i = 0; i < pAdapter->maxHosts; i++) { if ((pAdapter->activeHosts & (1 << i)) == 0) { continue; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) scsi_set_device(pAdapter->host[i]->scsihost, &pcidev->dev); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) scsi_set_pci_device(pAdapter->host[i]->scsihost, pcidev); #endif pAdapter->host[i]->scsihost->irq = pcidev->irq; /* each SATA channel will emulate scsi host !!!*/ if (pMvSataAdapter->sataAdapterGeneration == MV_SATA_GEN_I) { pAdapter->host[i]->scsihost->max_id = 1; } else { pAdapter->host[i]->scsihost->max_id = MV_SATA_PM_MAX_PORTS; } pAdapter->host[i]->scsihost->max_lun = 1; pAdapter->host[i]->scsihost->max_channel = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) pAdapter->host[i]->scsihost->select_queue_depths = mv_ial_ht_select_queue_depths; #endif } if (MV_FALSE == mvAdapterStartInitialization(pMvSataAdapter, &pAdapter->ialCommonExt, pAdapter->ataScsiAdapterExt)) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "[%d]: mvAdapterStartInitialization" " Failed\n", pMvSataAdapter->adapterId); free_irq (pcidev->irq, pMvSataAdapter); #ifdef MV_SUPPORT_MSI pci_disable_msi(pAdapter->pcidev); #endif kfree(pAdapter->ataScsiAdapterExt); iounmap(pMvSataAdapter->adapterIoBaseAddress); mv_ial_lib_free_edma_queues(pAdapter); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) pci_release_regions(pcidev); #endif pci_disable_device(pcidev); return -ENOMEM; } pci_set_drvdata(pcidev, pAdapter); for (i = 0; i < pAdapter->maxHosts; i++) { if ((pAdapter->activeHosts & (1 << i)) == 0) { continue; } mv_ial_block_requests(pAdapter, i); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) if (scsi_add_host(pAdapter->host[i]->scsihost, &pcidev->dev) != 0) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "[%d]: scsi_add_host() failed.\n" , pMvSataAdapter->adapterId); free_irq (pcidev->irq, pMvSataAdapter); #ifdef MV_SUPPORT_MSI pci_disable_msi(pAdapter->pcidev); #endif kfree(pAdapter->ataScsiAdapterExt); iounmap(pMvSataAdapter->adapterIoBaseAddress); mv_ial_lib_free_edma_queues(pAdapter); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); pci_release_regions(pcidev); pci_disable_device(pcidev); return -ENODEV; } #endif } pAdapter->stopAsyncTimer = MV_FALSE; init_timer(&pAdapter->asyncStartTimer); pAdapter->asyncStartTimer.data = (unsigned long)pAdapter; pAdapter->asyncStartTimer.function = asyncStartTimerFunction; pAdapter->asyncStartTimer.expires = jiffies + MV_LINUX_ASYNC_TIMER_PERIOD; add_timer (&pAdapter->asyncStartTimer); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) for (i = 0; i < pAdapter->maxHosts; i++) { if ((pAdapter->activeHosts & (1 << i)) != 0) { scsi_scan_host(pAdapter->host[i]->scsihost); } } /*Enable hotplug handler*/ atomic_set(&pAdapter->stopped, 0); #endif return 0; } #ifdef CONFIG_MV_INCLUDE_INTEG_SATA static int __devinit mv_ial_init_soc_sata(void) { MV_SATA_ADAPTER *pMvSataAdapter; IAL_ADAPTER_T *pAdapter; MV_U8 i; #if defined(CONFIG_MV78200) || defined(CONFIG_MV632X) if (MV_FALSE == mvSocUnitIsMappedToThisCpu(SATA)) { printk(KERN_INFO"Integrated SATA is not mapped to this CPU\n"); return -ENODEV; } #endif mvSysSataWinInit(); pAdapter = (IAL_ADAPTER_T*)kmalloc(sizeof(IAL_ADAPTER_T), GFP_ATOMIC); if (pAdapter == NULL) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "IAL Adapter allocation failed\n"); return -ENOMEM; } pSocAdapter = pAdapter; memset(pAdapter, 0, sizeof(IAL_ADAPTER_T)); pAdapter->activeHosts = 0; if(MV_5182_DEV_ID == mvCtrlModelGet()) pAdapter->maxHosts = MV_SATA_5182_PORT_NUM; else if(MV_5082_DEV_ID == mvCtrlModelGet()) pAdapter->maxHosts = MV_SATA_5082_PORT_NUM; else if(MV_6082_DEV_ID == mvCtrlModelGet()) pAdapter->maxHosts = MV_SATA_6082_PORT_NUM; #ifdef MV88F6281 else if(MV_6281_DEV_ID == mvCtrlModelGet()) pAdapter->maxHosts = MV_SATA_6281_PORT_NUM; else if(MV_6192_DEV_ID == mvCtrlModelGet()) pAdapter->maxHosts = MV_SATA_6192_PORT_NUM; else if(MV_6190_DEV_ID == mvCtrlModelGet()) pAdapter->maxHosts = MV_SATA_6190_PORT_NUM; else if(MV_6282_DEV_ID == mvCtrlModelGet()) pAdapter->maxHosts = MV_SATA_6282_PORT_NUM; #endif else if ((mvCtrlModelGet() == MV_78100_DEV_ID) || (mvCtrlModelGet() == MV_78200_DEV_ID) || (mvCtrlModelGet() == MV_78XX0_DEV_ID)) pAdapter->maxHosts = MV_SATA_78XX0_PORT_NUM; else if (mvCtrlModelGet() == MV_76100_DEV_ID) pAdapter->maxHosts = MV_SATA_76100_PORT_NUM; else if (mvCtrlModelGet() == MV_6323_DEV_ID) pAdapter->maxHosts = MV_SATA_6323_PORT_NUM; else if ((mvCtrlModelGet() == MV_6510_DEV_ID) || (mvCtrlModelGet() == MV_6530_DEV_ID) || (mvCtrlModelGet() == MV_6550_DEV_ID) || (mvCtrlModelGet() == MV_6560_DEV_ID)) pAdapter->maxHosts = MV_SATA_65XX_PORT_NUM; else if ((mvCtrlModelGet() == MV_78460_DEV_ID) || (mvCtrlModelGet() == MV_78260_DEV_ID) || (mvCtrlModelGet() == MV_78230_DEV_ID) || (mvCtrlModelGet() == MV_78160_DEV_ID) || (mvCtrlModelGet() == MV_78130_DEV_ID)) pAdapter->maxHosts = MV_SATA_78460_PORT_NUM; else if (mvCtrlModelGet() == MV_6710_DEV_ID) pAdapter->maxHosts = MV_SATA_6710_PORT_NUM; for (i = 0; i < pAdapter->maxHosts; i++) { if (MV_FALSE == mvCtrlPwrClckGet(SATA_UNIT_ID, (MV_U32)i)) { printk("Warning: SATA %d is powered off\n", i); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); return -ENOMEM; } } for (i = 0; i < pAdapter->maxHosts; i++) { struct Scsi_Host *pshost = scsi_host_alloc(&driver_template, sizeof(IAL_HOST_T)); if (pshost == NULL) { mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "Scsi_Host allocation failed\n"); return -ENOMEM; } pAdapter->host[i] = HOSTDATA(pshost); memset(pAdapter->host[i], 0, sizeof(IAL_HOST_T)); pAdapter->host[i]->scsihost = pshost; pAdapter->host[i]->pAdapter = pAdapter; pAdapter->host[i]->channelIndex = (MV_U8)i; pAdapter->activeHosts |= (1 << i); } pAdapter->pcidev = NULL; pMvSataAdapter = &(pAdapter->mvSataAdapter); pMvSataAdapter->IALData = pAdapter; spin_lock_init (&pAdapter->adapter_lock); for (i = 0; i < pAdapter->maxHosts; i++) { pAdapter->host[i]->scsi_cmnd_done_head = NULL; pAdapter->host[i]->scsi_cmnd_done_tail = NULL; } pAdapter->host[0]->scsihost->base = 0/*pci_resource_start(pcidev, 0)*/; for (i = 1; i < pAdapter->maxHosts; i++) { if (pAdapter->host[i] != NULL) pAdapter->host[i]->scsihost->base = pAdapter->host[0]->scsihost->base; } pMvSataAdapter->adapterIoBaseAddress = (MV_BUS_ADDR_T)(INTER_REGS_BASE + MV_SATA_REGS_OFFSET - 0x20000); mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "io base address 0x%08lx\n", (ulong)pMvSataAdapter->adapterIoBaseAddress); pMvSataAdapter->adapterId = adapterId++; /* get the revision ID */ pMvSataAdapter->pciConfigRevisionId = 0; pMvSataAdapter->pciConfigDeviceId = mvCtrlModelGet(); if (set_device_regs(pMvSataAdapter, NULL)) { mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); return -ENOMEM; } /*Do not allow hotplug handler to work*/ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) init_MUTEX(&pAdapter->rescan_mutex); atomic_set(&pAdapter->stopped, 1); #endif if (mvSataInitAdapter(pMvSataAdapter) == MV_FALSE) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "[%d]: core failed to initialize the adapter\n", pMvSataAdapter->adapterId); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); return -ENOMEM; } if (mv_ial_lib_allocate_edma_queues(pAdapter)) { mvLogMsg(MV_IAL_LOG_ID,MV_DEBUG_ERROR, "Failed to allocate memory for EDMA queues\n"); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); return -ENOMEM; } for (i = 0; i < pAdapter->maxHosts; i++) { if ((pAdapter->activeHosts & (1 << i)) == 0) { continue; } if (mv_ial_lib_prd_init(pAdapter->host[i])) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "Failed to init PRD memory manager - host %d\n", i); mv_ial_lib_free_edma_queues(pAdapter); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); return -ENOMEM; } } pAdapter->ataScsiAdapterExt = (MV_SAL_ADAPTER_EXTENSION*)kmalloc(sizeof(MV_SAL_ADAPTER_EXTENSION), GFP_ATOMIC); if (pAdapter->ataScsiAdapterExt == NULL) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR,"[%d]: out of memory, failed to allocate MV_SAL_ADAPTER_EXTENSION\n", pAdapter->mvSataAdapter.adapterId); mv_ial_lib_free_edma_queues(pAdapter); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); return -ENOMEM; } mvSataScsiInitAdapterExt(pAdapter->ataScsiAdapterExt, pMvSataAdapter); /* let SAL report only the BUS RESET UA event*/ pAdapter->ataScsiAdapterExt->UAMask = MV_BIT0; /* enable device interrupts even if no storage devices connected now*/ if (request_irq(SATA_IRQ_NUM, mv_ial_lib_int_handler, (IRQF_DISABLED | IRQF_SAMPLE_RANDOM | IRQF_SHARED), "mvSata", pAdapter) < 0) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "[%d]: unable to allocate IRQ for controler\n", pMvSataAdapter->adapterId); kfree(pAdapter->ataScsiAdapterExt); mv_ial_lib_free_edma_queues(pAdapter); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); return -ENOMEM; } for (i = 0; i < pAdapter->maxHosts; i++) { if ((pAdapter->activeHosts & (1 << i)) == 0) { continue; } pAdapter->host[i]->scsihost->irq = SATA_IRQ_NUM; pAdapter->host[i]->scsihost->max_id = MV_SATA_PM_MAX_PORTS; pAdapter->host[i]->scsihost->max_lun = 1; pAdapter->host[i]->scsihost->max_channel = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) pAdapter->host[i]->scsihost->select_queue_depths = mv_ial_ht_select_queue_depths; #endif } if (MV_FALSE == mvAdapterStartInitialization(pMvSataAdapter, &pAdapter->ialCommonExt, pAdapter->ataScsiAdapterExt)) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "[%d]: mvAdapterStartInitialization" " Failed\n", pMvSataAdapter->adapterId); free_irq (SATA_IRQ_NUM, pMvSataAdapter); kfree(pAdapter->ataScsiAdapterExt); mv_ial_lib_free_edma_queues(pAdapter); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); return -ENOMEM; } for (i = 0; i < pAdapter->maxHosts; i++) { if ((pAdapter->activeHosts & (1 << i)) == 0) { continue; } mv_ial_block_requests(pAdapter, i); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) if (scsi_add_host(pAdapter->host[i]->scsihost, NULL) != 0) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "[%d]: scsi_add_host() failed.\n" , pMvSataAdapter->adapterId); free_irq (SATA_IRQ_NUM , pMvSataAdapter); kfree(pAdapter->ataScsiAdapterExt); mv_ial_lib_free_edma_queues(pAdapter); mv_ial_free_scsi_hosts(pAdapter, MV_TRUE); return -ENODEV; } #endif } pAdapter->stopAsyncTimer = MV_FALSE; init_timer(&pAdapter->asyncStartTimer); pAdapter->asyncStartTimer.data = (unsigned long)pAdapter; pAdapter->asyncStartTimer.function = asyncStartTimerFunction; pAdapter->asyncStartTimer.expires = jiffies + MV_LINUX_ASYNC_TIMER_PERIOD; add_timer (&pAdapter->asyncStartTimer); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) for (i = 0; i < pAdapter->maxHosts; i++) { if ((pAdapter->activeHosts & (1 << i)) != 0) { scsi_scan_host(pAdapter->host[i]->scsihost); } } /*Enable hotplug handler*/ atomic_set(&pAdapter->stopped, 0); #endif return 0; } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) /**************************************************************** * Name: mv_ial_ht_detect * * Description: Detect and initialize our boards. * * Parameters: tpnt - Pointer to SCSI host template structure. * * Returns: Number of adapters installed. * ****************************************************************/ int mv_ial_ht_detect (Scsi_Host_Template *tpnt) { int num_hosts=0; struct pci_dev *pcidev = NULL; int index; struct pci_device_id *id = &mvSata_pci_table[0]; mv_ial_init_log(); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) if (!pci_present()) { printk ("mvSata: PCI BIOS not present\n"); return 0; } #endif if (sizeof(struct mv_comp_info) > sizeof(Scsi_Pointer)) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "WARNING mv_comp_info must be " "re-defined - its too big"); return -1; } index = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) spin_unlock_irq (&io_request_lock); #endif while (1) { if (id[index].device == 0) { break; } pcidev = NULL; while ((pcidev = pci_find_device (MV_SATA_VENDOR_ID, id[index].device, pcidev)) != NULL) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "PCI device found, DeviceId 0x%x " "BAR0=%lx\n", id[index].device, pci_resource_start(pcidev,0)); if (mv_ial_probe_device(pcidev, &id[index]) == 0) { IAL_ADAPTER_T *pAdapter = pci_get_drvdata(pcidev); num_hosts += pAdapter->maxHosts; } } index ++; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) spin_lock_irq (&io_request_lock); #endif return num_hosts; } #endif /**************************************************************** * Name: mv_ial_ht_release * * Description: release scsi host * * Parameters: SCpnt - Pointer to SCSI host structure. * * Returns: 0 on success, otherwise of failure. * ****************************************************************/ int mv_ial_ht_release (struct Scsi_Host *pHost) { IAL_ADAPTER_T *pAdapter = MV_IAL_ADAPTER (pHost); MV_U8 channel; MV_SATA_ADAPTER * pMvSataAdapter = &pAdapter->mvSataAdapter; unsigned long lock_flags; struct scsi_cmnd *cmnds_done_list = NULL; IAL_HOST_T *ial_host = HOSTDATA(pHost); channel = ial_host->channelIndex; pAdapter->activeHosts &= ~ (1 << channel); mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, ": release host %d\n", pHost->host_no); spin_lock_irqsave (&pAdapter->adapter_lock, lock_flags); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) if (pAdapter->stopAsyncTimer != MV_TRUE) { /* Delete any pending timers */ pAdapter->stopAsyncTimer = MV_TRUE; del_timer_sync(&pAdapter->asyncStartTimer); } #endif if (pMvSataAdapter->sataChannel[channel]) { mvSataDisableChannelDma(pMvSataAdapter, channel); mvSataFlushDmaQueue(pMvSataAdapter, channel, MV_FLUSH_TYPE_CALLBACK); mv_ial_lib_free_channel(pAdapter, channel); } /* Check if there are commands in the done queue to be completed */ cmnds_done_list = mv_ial_lib_get_first_cmnd (pAdapter, channel); if (cmnds_done_list) { unsigned long flags_io_request_lock; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) spin_lock_irqsave(&io_request_lock, flags_io_request_lock); #else spin_lock_irqsave(ial_host->scsihost->host_lock, flags_io_request_lock); #endif mv_ial_lib_do_done(cmnds_done_list); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) spin_unlock_irqrestore(&io_request_lock, flags_io_request_lock); #else spin_unlock_irqrestore(ial_host->scsihost->host_lock, flags_io_request_lock); #endif } if (0 == pAdapter->activeHosts) { mvSataShutdownAdapter(pMvSataAdapter); } pAdapter->host[channel] = NULL; mv_ial_lib_prd_destroy(ial_host); spin_unlock_irqrestore (&pAdapter->adapter_lock, lock_flags); scsi_remove_host(pHost); scsi_host_put(pHost); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) if (0 == pAdapter->activeHosts) { struct pci_dev *dev = pAdapter->pcidev; mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "[%d] freeing Adapter resources.\n", pAdapter->mvSataAdapter.adapterId); free_irq (pAdapter->pcidev->irq, pMvSataAdapter); #ifdef MV_SUPPORT_MSI pci_disable_msi(pAdapter->pcidev); #endif kfree(pAdapter->ataScsiAdapterExt); iounmap(pMvSataAdapter->adapterIoBaseAddress); mv_ial_lib_free_edma_queues(pAdapter); kfree(pAdapter); pci_disable_device(dev); } #endif return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) static void __devexit mv_ial_remove_device(struct pci_dev *pdev) { IAL_ADAPTER_T *pAdapter = (pdev != NULL) ? pci_get_drvdata(pdev) : pSocAdapter; int numhosts; int i; unsigned long lock_flags; if (pAdapter == NULL) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_FATAL_ERROR, "mv_ial_remove_device() No valid Adapter IAL structure found.\n"); return; } numhosts = pAdapter->maxHosts; /*flush hotplug rescan worker*/ atomic_inc(&pAdapter->stopped); flush_scheduled_work(); /* Delete any pending timers */ spin_lock_irqsave (&pAdapter->adapter_lock, lock_flags); pAdapter->stopAsyncTimer = MV_TRUE; del_timer_sync(&pAdapter->asyncStartTimer); spin_unlock_irqrestore(&pAdapter->adapter_lock, lock_flags); for (i = 0; i < numhosts; i++) { if (pAdapter->host[i] != NULL) { mv_ial_ht_release (pAdapter->host[i]->scsihost); } } if (pdev != NULL) /* pci device */ { free_irq (pAdapter->pcidev->irq, &pAdapter->mvSataAdapter); #ifdef MV_SUPPORT_MSI pci_disable_msi(pAdapter->pcidev); #endif kfree(pAdapter->ataScsiAdapterExt); iounmap(pAdapter->mvSataAdapter.adapterIoBaseAddress); mv_ial_lib_free_edma_queues(pAdapter); kfree(pAdapter); pci_release_regions(pdev); pci_disable_device(pdev); } #ifdef CONFIG_MV_INCLUDE_INTEG_SATA else /* Soc sata*/ { free_irq (SATA_IRQ_NUM, &pAdapter->mvSataAdapter); kfree(pAdapter->ataScsiAdapterExt); mv_ial_lib_free_edma_queues(pAdapter); kfree(pAdapter); } #endif } #endif /**************************************************************** * Name: mv_ial_ht_ata_cmd * * Description: handles mv_sata ata IOCTL special drive command (HDIO_DRIVE_CMD) * * Parameters: scsidev - Device to which we are issuing command * arg - User provided data for issuing command * * Returns: 0 on success, otherwise of failure. * ****************************************************************/ static int mv_ial_ht_ata_cmd(struct scsi_device *scsidev, void __user *arg) { int rc = 0; u8 scsi_cmd[MAX_COMMAND_SIZE]; u8 args[4], *argbuf = NULL, *sensebuf = NULL; int argsize = 0; enum dma_data_direction data_dir; int cmd_result; if (arg == NULL) return -EINVAL; if (copy_from_user(args, arg, sizeof(args))) return -EFAULT; sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); if (!sensebuf) return -ENOMEM; memset(scsi_cmd, 0, sizeof(scsi_cmd)); if (args[3]) { argsize = SECTOR_SIZE * args[3]; argbuf = kmalloc(argsize, GFP_KERNEL); if (argbuf == NULL) { rc = -ENOMEM; goto error; } scsi_cmd[1] = (4 << 1); /* PIO Data-in */ scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev, block count in sector count field */ data_dir = DMA_FROM_DEVICE; } else { scsi_cmd[1] = (3 << 1); /* Non-data */ scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */ data_dir = DMA_NONE; } scsi_cmd[0] = ATA_16; scsi_cmd[4] = args[2]; if (args[0] == WIN_SMART) { /* hack -- ide driver does this too... */ scsi_cmd[6] = args[3]; scsi_cmd[8] = args[1]; scsi_cmd[10] = 0x4f; scsi_cmd[12] = 0xc2; } else { scsi_cmd[6] = args[1]; } scsi_cmd[14] = args[0]; /* Good values for timeout and retries? Values below from scsi_ioctl_send_command() for default case... */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, sensebuf, (10*HZ), 5, 0); #else cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, sensebuf, (10*HZ), 5, 0, NULL); #endif if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ u8 *desc = sensebuf + 8; cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */ /* If we set cc then ATA pass-through will cause a * check condition even if no error. Filter that. */ if (cmd_result & SAM_STAT_CHECK_CONDITION) { struct scsi_sense_hdr sshdr; scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sshdr); if (sshdr.sense_key==0 && sshdr.asc==0 && sshdr.ascq==0) cmd_result &= ~SAM_STAT_CHECK_CONDITION; } /* Send userspace a few ATA registers (same as drivers/ide) */ if (sensebuf[0] == 0x72 && /* format is "descriptor" */ desc[0] == 0x09 ) { /* code is "ATA Descriptor" */ args[0] = desc[13]; /* status */ args[1] = desc[3]; /* error */ args[2] = desc[5]; /* sector count (0:7) */ if (copy_to_user(arg, args, sizeof(args))) rc = -EFAULT; } } if (cmd_result) { rc = -EIO; goto error; } if ((argbuf) && copy_to_user(arg + sizeof(args), argbuf, argsize)) rc = -EFAULT; error: if (sensebuf) kfree(sensebuf); if (argbuf) kfree(argbuf); return rc; } /**************************************************************** * Name: mv_ial_ht_ioctl * * Description: mv_sata scsi ioctl * * Parameters: scsidev - Device to which we are issuing command * cmd - ioctl command * arg - User provided data for issuing command * * Returns: 0 on success, otherwise of failure. * ****************************************************************/ int mv_ial_ht_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) { int rc = -ENOTTY; /* No idea how this happens.... */ if (!scsidev) return -ENXIO; if (arg == NULL) return -EINVAL; switch (cmd) { case HDIO_DRIVE_CMD: if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; rc = mv_ial_ht_ata_cmd(scsidev, arg); break; default: rc = -ENOTTY; } return rc; } /**************************************************************** * Name: mv_ial_ht_queuecommand * * Description: Process a queued command from the SCSI manager. * * Parameters: SCpnt - Pointer to SCSI command structure. * done - Pointer to done function to call. * * Returns: Status code. * ****************************************************************/ int mv_ial_ht_queuecommand (struct scsi_cmnd * SCpnt, void (*done) (struct scsi_cmnd *)) { IAL_ADAPTER_T *pAdapter = MV_IAL_ADAPTER(SCpnt->device->host); MV_SATA_ADAPTER *pMvSataAdapter; IAL_HOST_T *pHost = HOSTDATA(SCpnt->device->host); MV_U8 channel = pHost->channelIndex; int build_prd_table = 0; unchar *cmd = (unchar *) SCpnt->cmnd; struct mv_comp_info *completion_info; unsigned long lock_flags; struct scsi_cmnd *cmnds_done_list = NULL; mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, " :queuecommand host=%d, bus=%d, channel=%d\n", SCpnt->device->host->host_no, SCpnt->device->channel, channel); if (done == NULL) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, ": in queuecommand, done function can't be NULL\n"); return 0; } if ((pAdapter == NULL) || (channel >= MV_SATA_CHANNELS_NUM)|| (pAdapter->host[channel] == NULL)) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_FATAL_ERROR,": in queuecommand, " "command queued for released host!!\n"); SCpnt->result = DID_NO_CONNECT << 16; done(SCpnt); return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) spin_unlock_irq (&io_request_lock); #else spin_unlock_irq(pHost->scsihost->host_lock); #endif spin_lock_irqsave (&pAdapter->adapter_lock, lock_flags); if (SCpnt->retries > 0) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR,": retry command host=%d, bus=%d" " SCpnt = %p\n", SCpnt->device->host->host_no, channel, SCpnt); } if (MV_TRUE == pAdapter->host[channel]->hostBlocked) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR,": command received for " "blocked host=%d, bus=%d, channel=%d, SCpnt = %p\n", SCpnt->device->host->host_no, SCpnt->device->channel, channel, SCpnt); #if 0 spin_unlock_irqrestore (&pAdapter->adapter_lock, lock_flags); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) spin_lock_irq (&io_request_lock); #else spin_lock_irq(pHost->scsihost->host_lock); #endif return SCSI_MLQUEUE_HOST_BUSY; #endif } pMvSataAdapter = &pAdapter->mvSataAdapter; SCpnt->result = DID_ERROR << 16; SCpnt->scsi_done = done; completion_info = ( struct mv_comp_info *) &(SCpnt->SCp); completion_info->pSALBlock = (MV_SATA_SCSI_CMD_BLOCK *) kmalloc(sizeof(MV_SATA_SCSI_CMD_BLOCK), GFP_ATOMIC); if (completion_info->pSALBlock == NULL) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "in queuecommand: Failed to allocate SAL Block\n"); spin_unlock_irqrestore (&pAdapter->adapter_lock, lock_flags); done(SCpnt); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) spin_lock_irq (&io_request_lock); #else spin_lock_irq(pHost->scsihost->host_lock); #endif return -1; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) completion_info->kmap_buffer = 0; #endif /* prepare the SAL Block paramters*/ if ((*cmd == READ_6) || (*cmd == READ_10) || (*cmd == READ_16) || (*cmd == WRITE_6) || (*cmd == WRITE_10) || (*cmd == WRITE_16)) { build_prd_table = 1; } else if((pAdapter->ataScsiAdapterExt->ataDriveData[channel][SCpnt->device->id].identifyInfo.deviceType == MV_SATA_DEVICE_TYPE_ATAPI_DEVICE) && use_sg(SCpnt)) { /* for the 60x1 devices don't use DMA for control commands as the BMDMA will not write date to DRAM in case on underrun. */ if(!(pAdapter->mvSataAdapter.sataAdapterGeneration == MV_SATA_GEN_II)){ mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "in queuecommand: PRD for non data command for ATAPI device\n"); build_prd_table = 1; } } if((pAdapter->ataScsiAdapterExt->ataDriveData[channel][SCpnt->device->id].identifyInfo.deviceType == MV_SATA_DEVICE_TYPE_ATAPI_DEVICE)) { BUG_ON(((unsigned int)SCpnt->cmnd) & 0x1); } completion_info->pSALBlock->singleDataRegion = MV_FALSE; /* prepare the SAL Block paramters*/ if(build_prd_table) { if(pAdapter->ataScsiAdapterExt->ataDriveData[channel][SCpnt->device->id].identifyInfo.deviceType == MV_SATA_DEVICE_TYPE_ATAPI_DEVICE) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "in queuecommand: Data command for ATAPI device\n"); } else { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "in queuecommand: Data command for ATA device\n"); } if (mv_ial_lib_generate_prd(pMvSataAdapter, SCpnt, completion_info)) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "in queuecommand: illegal requested buffer\n"); spin_unlock_irqrestore (&pAdapter->adapter_lock, lock_flags); done(SCpnt); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) spin_lock_irq (&io_request_lock); #else spin_lock_irq(pHost->scsihost->host_lock); #endif return -1; } completion_info->pSALBlock->pDataBuffer = NULL; } else { completion_info->cpu_PRDpnt = NULL; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) if (use_sg(SCpnt)) { completion_info->kmap_buffer = 1; } #else completion_info->pSALBlock->pDataBuffer = SCpnt->request_buffer; #endif } completion_info->SCpnt = SCpnt; completion_info->pSALBlock->bus = channel; completion_info->pSALBlock->target = SCpnt->device->id; completion_info->pSALBlock->lun = SCpnt->device->lun; completion_info->pSALBlock->pSalAdapterExtension = pAdapter->ataScsiAdapterExt; completion_info->pSALBlock->pIalAdapterExtension = &pAdapter->ialCommonExt; completion_info->pSALBlock->completionCallBack = IALCompletion; completion_info->pSALBlock->IALData = SCpnt; completion_info->pSALBlock->dataBufferLength = scsi_bufflen(SCpnt); completion_info->pSALBlock->pSenseBuffer = SCpnt->sense_buffer; completion_info->pSALBlock->ScsiCdb = SCpnt->cmnd; completion_info->pSALBlock->ScsiCdbLength = SCpnt->cmd_len; completion_info->pSALBlock->senseBufferLength = SCSI_SENSE_BUFFERSIZE; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) if (completion_info->kmap_buffer) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) struct scatterlist *sg; sg = (struct scatterlist *) SCpnt->request_buffer; #endif mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "SCpnt %p, cmd %x need to use" " temp data buffer.lengh %d \n", SCpnt, *cmd , scsi_bufflen(SCpnt)); completion_info->pSALBlock->pDataBuffer = kmalloc(scsi_bufflen(SCpnt), GFP_ATOMIC); if (completion_info->pSALBlock->pDataBuffer == NULL) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "in queuecommand: Failed to allocate temp buffer for kmap\n"); spin_unlock_irqrestore (&pAdapter->adapter_lock, lock_flags); done(SCpnt); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) spin_lock_irq (&io_request_lock); #else spin_lock_irq(pHost->scsihost->host_lock); #endif return -1; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) completion_info->pSALBlock->dataBufferLength = sg->length; #else completion_info->pSALBlock->dataBufferLength = scsi_bufflen(SCpnt); #endif if( SCpnt->sc_data_direction == DMA_TO_DEVICE) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) struct scatterlist *sg; MV_U8* pBuffer; sg = (struct scatterlist *) SCpnt->request_buffer; mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "SCpnt %p, cmd %x kmap temp data buffer and copy data.lengh %d \n", SCpnt, *cmd ,sg->length); pBuffer = kmap_atomic(sg->page, KM_USER0) + sg->offset; memcpy(completion_info->pSALBlock->pDataBuffer, pBuffer , sg->length); kunmap_atomic(pBuffer - sg->offset, KM_USER0); #else scsi_sg_copy_to_buffer(SCpnt, completion_info->pSALBlock->pDataBuffer, scsi_bufflen(SCpnt)); #endif } } #endif switch(SCpnt->sc_data_direction) { case DMA_FROM_DEVICE: completion_info->pSALBlock->dataDirection = MV_SCSI_COMMAND_DATA_DIRECTION_IN; break; case DMA_TO_DEVICE: completion_info->pSALBlock->dataDirection = MV_SCSI_COMMAND_DATA_DIRECTION_OUT; break; default: completion_info->pSALBlock->dataDirection = MV_SCSI_COMMAND_DATA_DIRECTION_NON; } if (*cmd != SCSI_OPCODE_MVSATA_SMART) { mvExecuteScsiCommand(completion_info->pSALBlock, MV_TRUE); } else { mvScsiAtaSendSmartCommand(pMvSataAdapter, completion_info->pSALBlock); } /* * Check if there is valid commands to be completed. This is usually * an immediate completed commands such as INQUIRY etc... */ cmnds_done_list = mv_ial_lib_get_first_cmnd(pAdapter, channel); spin_unlock_irqrestore(&pAdapter->adapter_lock, lock_flags); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) spin_lock_irq (&io_request_lock); #else spin_lock_irq(pHost->scsihost->host_lock); #endif if (cmnds_done_list) { mv_ial_lib_do_done(cmnds_done_list); } return 0; } /**************************************************************** * Name: mv_ial_ht_bus_reset * * Description: reset given devise, all pending commands will be aborted * with status DID_RESET. * * Parameters: SCpnt - Pointer to SCSI command structure. * * Returns: Status code. * ****************************************************************/ int mv_ial_ht_bus_reset (struct scsi_cmnd *SCpnt) { IAL_ADAPTER_T *pAdapter = MV_IAL_ADAPTER(SCpnt->device->host); MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; IAL_HOST_T *pHost = HOSTDATA(SCpnt->device->host); MV_U8 channel = pHost->channelIndex; unsigned long lock_flags; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) spin_unlock_irq (&io_request_lock); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) spin_unlock_irq(pHost->scsihost->host_lock); #endif spin_lock_irqsave (&pAdapter->adapter_lock, lock_flags); mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "Bus Reset: host=%d, channel=%d, target=%d\n", SCpnt->device->host->host_no, SCpnt->device->channel, SCpnt->device->id); if (pMvSataAdapter->sataChannel[channel] == NULL) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "trying to reset disabled channel, host=%d, channel=%d\n", SCpnt->device->host->host_no, channel); spin_unlock_irqrestore (&pAdapter->adapter_lock, lock_flags); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) spin_lock_irq(&io_request_lock); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) spin_lock_irq(pHost->scsihost->host_lock); #endif return FAILED; } mvSataDisableChannelDma(pMvSataAdapter, channel); /* Flush pending commands */ mvSataFlushDmaQueue (pMvSataAdapter, channel, MV_FLUSH_TYPE_CALLBACK); /* Hardware reset channel */ mvSataChannelHardReset(pMvSataAdapter, channel); if (pMvSataAdapter->sataChannel[channel]) { mvRestartChannel(&pAdapter->ialCommonExt, channel, pAdapter->ataScsiAdapterExt, MV_TRUE); mv_ial_block_requests(pAdapter, channel); } /* don't call scsi done for the commands on this channel*/ mv_ial_lib_get_first_cmnd(pAdapter, channel); spin_unlock_irqrestore(&pAdapter->adapter_lock, lock_flags); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) spin_lock_irq(&io_request_lock); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) spin_lock_irq(pHost->scsihost->host_lock); #endif return SUCCESS; } static MV_VOID mvAta2HostString(IN MV_U16 *source, OUT MV_U16 *target, IN MV_U32 wordsCount ) { MV_U32 i; for (i=0 ; i < wordsCount; i++) { target[i] = MV_LE16_TO_CPU(target[i]); } } /**************************************************************** * Name: mv_ial_ht_proc_info * * Description: /proc file * * Parameters: * * Returns: * ****************************************************************/ int mv_ial_ht_proc_info(struct Scsi_Host *pshost, char *buffer, char **start, off_t offset, int length, int inout) { int len = 0, temp, pmPort; IAL_ADAPTER_T *pAdapter; MV_SATA_ADAPTER *pMvSataAdapter; IAL_HOST_T *pHost = HOSTDATA(pshost); unsigned long lock_flags; pAdapter = MV_IAL_ADAPTER(pshost); pMvSataAdapter = &pAdapter->mvSataAdapter; temp = pHost->channelIndex; spin_lock_irqsave (&pAdapter->adapter_lock, lock_flags); if (inout == 1) { /* Writing to file */ /* The format is 'int_coal <sata unit> <coal_threshold> <timeout>' */ int i; /* Check signature 'int_coal' at start of buffer */ if (!strncmp (buffer, "int_coal", strlen ("int_coal"))) { int sata_unit; u32 time_thre, coal_thre; i = sscanf (buffer + strlen ("int_coal"), "%d %d %d\n", &sata_unit, &coal_thre, &time_thre); if (i == 3) { /* Three matched inputs */ mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "[%d]: Modifying interrupt coalescing of unit %d to %d threshold and %d timer\n",pMvSataAdapter->adapterId, sata_unit, coal_thre, time_thre); mvSataSetIntCoalParams (pMvSataAdapter, sata_unit, coal_thre, time_thre); } else { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "[%d]: Error in interrupt coalescing parameters\n", pMvSataAdapter->adapterId); } } /* Check signature 'sata_phy_shutdown' at start of buffer */ else if (!strncmp (buffer, "sata_phy_shutdown", strlen ("sata_phy_shutdown"))) { int sata_phy; i = sscanf (buffer + strlen ("sata_phy_shutdown"), "%d\n", &sata_phy); if (i == 1) { /* Three matched inputs */ if (mvSataIsStorageDeviceConnected (pMvSataAdapter, sata_phy, NULL) == MV_TRUE) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "[%d,%d]: Warning - shutting down a phy that is connected to a storage device\n", pMvSataAdapter->adapterId, sata_phy); } if (mvSataChannelPhyShutdown (pMvSataAdapter, sata_phy) == MV_TRUE) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "[%d,%d]: Shutting down SATA phy\n", pMvSataAdapter->adapterId, sata_phy); } } else { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "[%d]: Error in shutting down SATA phy parameters\n", pMvSataAdapter->adapterId); } } else if (!strncmp (buffer, "sata_phy_powerup", strlen ("sata_phy_powerup"))) { int sata_phy; i = sscanf (buffer + strlen ("sata_phy_powerup"), "%d\n", &sata_phy); if (i == 1) { /* Three matched inputs */ if (mvSataChannelPhyPowerOn (pMvSataAdapter, sata_phy) == MV_TRUE) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "[%d,%d]: Turning on SATA phy\n", pMvSataAdapter->adapterId, sata_phy); } } else { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG,"[%d]: Error in powering up SATA phy parameters\n", pMvSataAdapter->adapterId); } } spin_unlock_irqrestore (&pAdapter->adapter_lock, lock_flags); return length; } else { /* Reading from file */ int i; /* * Write to the file the time stamp which is difference between last * jiffies and current one. Next to it write the HZ parameter which * indicates how many time jiffies parameter is incremented in a * second. */ len += snprintf (buffer + len,length - len, "%s\n", mv_ial_proc_version); if (len >= length) { goto out; } len += snprintf (buffer + len,length - len, "\nTimeStamp :\n%ld\t%d\n", jiffies, HZ); if (len >= length) { goto out; } /* Write the number of interrupts this adapter generated within the * sampling time. */ len += snprintf (buffer + len,length - len, "\nNumber of interrupts generated by the adapter is : \n%d\n", pAdapter->procNumOfInterrupts); if (len >= length) { goto out; } if (pAdapter->pcidev) { len += snprintf (buffer + len, length - len, "\nPCI location: Bus %d, Slot %d\n", pAdapter->pcidev->bus->number, PCI_SLOT(pAdapter->pcidev->devfn)); if (len >= length) { goto out; } len += snprintf (buffer + len, length - len, "DeviceID: %x, Rev %x," " adapterId %d, channel %d \n", pAdapter->mvSataAdapter.pciConfigDeviceId, pAdapter->mvSataAdapter.pciConfigRevisionId, pAdapter->mvSataAdapter.adapterId, pHost->channelIndex); if (len >= length) { goto out; } } else /*integrated sata*/ { len += snprintf (buffer + len, length - len, "\nIntegrated Sata adapterId %d, " "channel %d\n",pAdapter->mvSataAdapter.adapterId, pHost->channelIndex); if (len >= length) { goto out; } } if (pMvSataAdapter->sataChannel[temp]) { if (pMvSataAdapter->sataChannel[temp]->deviceType == MV_SATA_DEVICE_TYPE_PM) { len += snprintf (buffer + len, length - len, "Port Multiplier connected, switching mode: %s\n", (pMvSataAdapter->sataChannel[temp]->FBSEnabled == MV_TRUE) ? "FBS":"CBS"); if (len >= length) { goto out; } } } /* * Check if channel connected. * If not connected write -1 on a line * If connected, write a line that has - * 1.. Adapter number * 2.. SCSI channel number (equivalent to SATA channel number). * 3.. ID * 4.. LUN (always 0) * 5.. vendor name * 6.. number of outstanding commands accumulated * 7.. total sampling of outstanding commands * 8.. total sectors transferred * 9.. flag if queued / non-queued (1/0) * 10. flag if LBA 48 or not (1/0) * 11. flag if the storage device can be removed or not * (1 means can't be removed / 0 can be removed). */ len += snprintf (buffer + len,length - len,"\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t\t%s\t%s\n", "Adapter", "Channel", "Id", "LUN", "TO", "TS", "Vendor", "Mode", "LBA48"); if (len >= length) { goto out; } if ((len + 100) >= length) { goto out; } for (i = 0 ; i < 80 ; i++) buffer [len + i] = '-'; len += i; len += snprintf (buffer + len,length - len, "\n"); if (len >= length) { goto out; } if (pMvSataAdapter->sataChannel[temp]) { for (pmPort = 0; pmPort < MV_SATA_PM_MAX_PORTS; pmPort++) { if (pmPort > 0 && (pMvSataAdapter->sataChannel[temp]->deviceType != MV_SATA_DEVICE_TYPE_PM)) { break; } if (pAdapter->ataScsiAdapterExt->ataDriveData[temp][pmPort].driveReady == MV_FALSE) { continue; } len += snprintf (buffer + len,length - len, "%d\t%d\t%d\t%d\t%u\t%u\t", pAdapter->mvSataAdapter.adapterId, temp, pmPort, 0, pAdapter->ataScsiAdapterExt->ataDriveData[temp][pmPort].stats.totalIOs, pAdapter->ataScsiAdapterExt->ataDriveData[temp][pmPort].stats.totalSectorsTransferred); if (len >= length) { goto out; } /* * Copy first 10 characters of the vendor name from the IDENTIFY * DEVICE ATA command result buffer */ if ((len+10) >= length) { goto out; } memcpy (buffer+len, pAdapter->ataScsiAdapterExt->ataDriveData[temp][pmPort].identifyInfo.model, 10); mvAta2HostString((MV_U16 *)(buffer+len), (MV_U16 *)(buffer+len), 5); /* * Clean spaces in vendor name and swap odd and even characters. * The swap is due to the format of the IDENTIFY DEVICE command */ for (i=0 ; i<10 ; i+=2) { char ch = buffer[len + i]; buffer[len + i] = buffer[len+1 + i]; buffer[len+1 + i] = ch; if (buffer[len + i] == ' ') { buffer[len + i + 1] = ' '; break; } if (buffer[len+1 + i] == ' ') { break; } } if ((len + 10) >= length) { goto out; } for (; i < 10; i++) { buffer[len + i] = ' '; } len += 10; len += snprintf (buffer + len,length - len, "\t%s \t%d\n", (pMvSataAdapter->sataChannel[temp]->queuedDMA == MV_EDMA_MODE_QUEUED) ? "TCQ" : (pMvSataAdapter->sataChannel[temp]->queuedDMA == MV_EDMA_MODE_NATIVE_QUEUING) ? "NCQ":"Normal", (pAdapter->ataScsiAdapterExt->ataDriveData[temp][pmPort].identifyInfo.LBA48Supported == MV_TRUE) ? 1 : 0); if (len >= length) { goto out; } } } if ((!pMvSataAdapter->sataChannel[temp]) && (mvSataIsStorageDeviceConnected (pMvSataAdapter, temp, NULL) == MV_TRUE)) len += snprintf (buffer + len,length - len, "Storage device connected to channel %d is malfunction\n", temp); if (len >= length) { goto out; } len += snprintf (buffer + len,length - len,"\n\n\nTO - Total Outstanding commands accumulated\n"); if (len >= length) { goto out; } len += snprintf (buffer + len,length - len,"TSA - Total number of IOs accumulated\n"); if (len >= length) { goto out; } len += snprintf (buffer + len,length - len,"TS - Total number of sectors transferred (both read/write)\n"); if (len >= length) { goto out; } len += snprintf (buffer + len,length - len,"Mode - EDMA mode (TCQ|NCQ|Normal)\n"); if (len >= length) { goto out; } len += snprintf (buffer + len,length - len,"LBA48 - Large Block Address 48 feature set enabled\n"); if (len >= length) { goto out; } } out: spin_unlock_irqrestore (&pAdapter->adapter_lock, lock_flags); return(len); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) /**************************************************************** * Name: mv_ial_ht_proc_info (kernel version < 2.6) * * Description: /proc file * * Parameters: * * Returns: * ****************************************************************/ int mv_ial_ht_proc_info24(char *buffer, char **start, off_t offset, int length, int inode, int inout) { struct Scsi_Host *pshost = 0; for (pshost = scsi_hostlist; pshost; pshost = pshost->next) { if (pshost->host_no == inode) { return mv_ial_ht_proc_info(pshost, buffer, start, offset,length, inout); } } return -EINVAL; } #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) static int mv_ial_ht_slave_configure (struct scsi_device* pDevs) { IAL_HOST_T *pHost = HOSTDATA (pDevs->host); struct Scsi_Host* scsiHost = pDevs->host; struct scsi_device* pDevice = NULL; mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "[%d]: slave configure\n", pHost->pAdapter->mvSataAdapter.adapterId); if (pHost->use128Entries == MV_TRUE) { pHost->scsihost->can_queue = MV_SATA_GEN2E_SW_QUEUE_SIZE; } else { pHost->scsihost->can_queue = MV_SATA_SW_QUEUE_SIZE; } mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "[%d %d]: adjust host[channel] queue depth" " to %d\n", pHost->pAdapter->mvSataAdapter.adapterId, pHost->channelIndex, pHost->scsihost->can_queue); shost_for_each_device(pDevice, scsiHost) { int deviceQDepth = 2; if(pHost->pAdapter->ataScsiAdapterExt->ataDriveData[pHost->channelIndex][pDevice->id].identifyInfo.deviceType == MV_SATA_DEVICE_TYPE_ATAPI_DEVICE) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "[%d %d %d]: ATAPI device found\n", pHost->pAdapter->mvSataAdapter.adapterId, pHost->channelIndex, pDevice->id); pDevice->use_10_for_rw = 1; pDevice->use_10_for_ms = 1; scsi_adjust_queue_depth(pDevice, 0, 1); // pHost->scsihost->max_cmd_len = 12; // blk_queue_max_sectors(pDevice->request_queue, 256); blk_queue_max_hw_sectors(pDevice->request_queue, 256); } else { if (pHost->mode != MV_EDMA_MODE_NOT_QUEUED) { deviceQDepth = 31; if (pHost->scsihost->can_queue >= 32) { deviceQDepth = 32; } } mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "[%d %d %d]: adjust device queue " "depth to %d\n", pHost->pAdapter->mvSataAdapter.adapterId, pDevice->channel, pDevice->id, deviceQDepth); scsi_adjust_queue_depth(pDevice, MSG_SIMPLE_TAG, deviceQDepth); #ifdef MV_SUPPORT_1MBYTE_IOS if(pHost->pAdapter->ataScsiAdapterExt->ataDriveData[pHost->channelIndex][pDevice->id].identifyInfo.LBA48Supported == MV_TRUE) { //blk_queue_max_sectors(pDevice->request_queue, 2048); blk_queue_max_hw_sectors(pDevice->request_queue, 2048); mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "[%d %d %d]: set device max sectors to 2048 \n", pHost->pAdapter->mvSataAdapter.adapterId, pHost->channelIndex, pDevice->id); } #endif } } scsiHost->max_cmd_len = 16; return 0; } #else static void mv_ial_ht_select_queue_depths (struct Scsi_Host* pHost, struct scsi_device* pDevs) { IAL_HOST_T *ial_host = HOSTDATA (pHost); struct scsi_device* pDevice; if (ial_host != NULL) { /* linux 2.4 queue depth is not tunable, so we set the device queue */ /* to the max value (MV_SATA_SW_QUEUE_SIZE), and limit the number queued */ /* commands using the cmd_per_lun */ /* set can_queue to the max number of queued commands per host (sata */ /* channel). This may casue startvation if PortMultiplier is connected*/ pHost->cmd_per_lun = 31; if (ial_host->mode != MV_EDMA_MODE_NOT_QUEUED) { if (ial_host->use128Entries == MV_TRUE) { pHost->can_queue = MV_SATA_GEN2E_SW_QUEUE_SIZE; pHost->cmd_per_lun = 32; } else { pHost->can_queue = MV_SATA_SW_QUEUE_SIZE; } } else { pHost->can_queue = MV_DEFAULT_QUEUE_DEPTH; } /*always allocate the max number of commands */ for (pDevice = pDevs; pDevice; pDevice = pDevice->next) { if (pDevice->host == pHost) { pDevice->queue_depth = MV_SATA_SW_QUEUE_SIZE; } } mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG, "[%d %d]: adjust queue depth to %d\n", ial_host->pAdapter->mvSataAdapter.adapterId, ial_host->channelIndex, pHost->can_queue); } } #endif int mv_ial_ht_abort(struct scsi_cmnd *SCpnt) { IAL_ADAPTER_T *pAdapter; IAL_HOST_T *pHost; MV_SATA_ADAPTER *pMvSataAdapter; MV_U8 channel; unsigned long lock_flags; struct scsi_cmnd *cmnds_done_list = NULL; mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "abort command %p\n", SCpnt); if (SCpnt == NULL) { return FAILED; } pHost = HOSTDATA(SCpnt->device->host); channel = pHost->channelIndex; pAdapter = MV_IAL_ADAPTER(SCpnt->device->host); pMvSataAdapter = &pAdapter->mvSataAdapter; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) if (SCpnt->serial_number != SCpnt->serial_number_at_timeout) { mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "[%d %d %d]: abort failed, " "serial number mismatch\n",SCpnt->device->host->host_no, channel, SCpnt->device->id); return FAILED; } spin_unlock_irq (&io_request_lock); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) spin_unlock_irq (pHost->scsihost->host_lock); #endif spin_lock_irqsave (&pAdapter->adapter_lock, lock_flags); mvRestartChannel(&pAdapter->ialCommonExt, channel, pAdapter->ataScsiAdapterExt, MV_TRUE); mv_ial_block_requests(pAdapter, channel); cmnds_done_list = mv_ial_lib_get_first_cmnd(pAdapter, channel); spin_unlock_irqrestore (&pAdapter->adapter_lock, lock_flags); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) spin_lock_irq(&io_request_lock); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) spin_lock_irq (pHost->scsihost->host_lock); #endif if (cmnds_done_list) { scsi_report_bus_reset(SCpnt->device->host, SCpnt->device->channel); mv_ial_lib_do_done(cmnds_done_list); return SUCCESS; } mvLogMsg(MV_IAL_LOG_ID, MV_DEBUG_ERROR, "[%d %d %d]: command abort failed\n", SCpnt->device->host->host_no, SCpnt->device->channel, SCpnt->device->id); return FAILED; } Scsi_Host_Template driver_template = mvSata; MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("Marvell Serial ATA PCI-X Adapter"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) #include "scsi_module.c" #endif module_param_named(noncq, ncq_disable, bool, 0444); MODULE_PARM_DESC(ncq, "Disable use of NCQ (Default: false)"); module_param_named(nopmncq, pm_ncq_disable, bool, 0444); MODULE_PARM_DESC(pmncq, "Disable use of NCQ (Default: false)");
sancome/linux-3.x
arch/arm/plat-armada/mv_drivers_lsp/mv_sata/mvLinuxIalHt.c
C
gpl-2.0
75,336
/* * Deadline Scheduling Class (SCHED_DEADLINE) * * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS). * * Tasks that periodically executes their instances for less than their * runtime won't miss any of their deadlines. * Tasks that are not periodic or sporadic or that tries to execute more * than their reserved bandwidth will be slowed down (and may potentially * miss some of their deadlines), and won't affect any other task. * * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>, * Juri Lelli <juri.lelli@gmail.com>, * Michael Trimarchi <michael@amarulasolutions.com>, * Fabio Checconi <fchecconi@gmail.com> */ #include "sched.h" #include <linux/slab.h> struct dl_bandwidth def_dl_bandwidth; static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) { return container_of(dl_se, struct task_struct, dl); } static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) { return container_of(dl_rq, struct rq, dl); } static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se) { struct task_struct *p = dl_task_of(dl_se); struct rq *rq = task_rq(p); return &rq->dl; } static inline int on_dl_rq(struct sched_dl_entity *dl_se) { return !RB_EMPTY_NODE(&dl_se->rb_node); } static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) { struct sched_dl_entity *dl_se = &p->dl; return dl_rq->rb_leftmost == &dl_se->rb_node; } void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) { raw_spin_lock_init(&dl_b->dl_runtime_lock); dl_b->dl_period = period; dl_b->dl_runtime = runtime; } extern unsigned long to_ratio(u64 period, u64 runtime); void init_dl_bw(struct dl_bw *dl_b) { raw_spin_lock_init(&dl_b->lock); raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock); if (global_rt_runtime() == RUNTIME_INF) dl_b->bw = -1; else dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime()); raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock); dl_b->total_bw = 0; } void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq) { dl_rq->rb_root = RB_ROOT; #ifdef CONFIG_SMP /* zero means no -deadline tasks */ dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; dl_rq->dl_nr_migratory = 0; dl_rq->overloaded = 0; dl_rq->pushable_dl_tasks_root = RB_ROOT; #else init_dl_bw(&dl_rq->dl_bw); #endif } #ifdef CONFIG_SMP static inline int dl_overloaded(struct rq *rq) { return atomic_read(&rq->rd->dlo_count); } static inline void dl_set_overload(struct rq *rq) { if (!rq->online) return; cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); /* * Must be visible before the overload count is * set (as in sched_rt.c). * * Matched by the barrier in pull_dl_task(). */ smp_wmb(); atomic_inc(&rq->rd->dlo_count); } static inline void dl_clear_overload(struct rq *rq) { if (!rq->online) return; atomic_dec(&rq->rd->dlo_count); cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); } static void update_dl_migration(struct dl_rq *dl_rq) { if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) { if (!dl_rq->overloaded) { dl_set_overload(rq_of_dl_rq(dl_rq)); dl_rq->overloaded = 1; } } else if (dl_rq->overloaded) { dl_clear_overload(rq_of_dl_rq(dl_rq)); dl_rq->overloaded = 0; } } static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { struct task_struct *p = dl_task_of(dl_se); dl_rq = &rq_of_dl_rq(dl_rq)->dl; dl_rq->dl_nr_total++; if (p->nr_cpus_allowed > 1) dl_rq->dl_nr_migratory++; update_dl_migration(dl_rq); } static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { struct task_struct *p = dl_task_of(dl_se); dl_rq = &rq_of_dl_rq(dl_rq)->dl; dl_rq->dl_nr_total--; if (p->nr_cpus_allowed > 1) dl_rq->dl_nr_migratory--; update_dl_migration(dl_rq); } /* * The list of pushable -deadline task is not a plist, like in * sched_rt.c, it is an rb-tree with tasks ordered by deadline. */ static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) { struct dl_rq *dl_rq = &rq->dl; struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node; struct rb_node *parent = NULL; struct task_struct *entry; int leftmost = 1; BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); while (*link) { parent = *link; entry = rb_entry(parent, struct task_struct, pushable_dl_tasks); if (dl_entity_preempt(&p->dl, &entry->dl)) link = &parent->rb_left; else { link = &parent->rb_right; leftmost = 0; } } if (leftmost) dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks; rb_link_node(&p->pushable_dl_tasks, parent, link); rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); } static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) { struct dl_rq *dl_rq = &rq->dl; if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) return; if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) { struct rb_node *next_node; next_node = rb_next(&p->pushable_dl_tasks); dl_rq->pushable_dl_tasks_leftmost = next_node; } rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); RB_CLEAR_NODE(&p->pushable_dl_tasks); } static inline int has_pushable_dl_tasks(struct rq *rq) { return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root); } static int push_dl_task(struct rq *rq); #else static inline void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) { } static inline void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) { } static inline void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { } static inline void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { } #endif /* CONFIG_SMP */ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags); static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags); /* * We are being explicitly informed that a new instance is starting, * and this means that: * - the absolute deadline of the entity has to be placed at * current time + relative deadline; * - the runtime of the entity has to be set to the maximum value. * * The capability of specifying such event is useful whenever a -deadline * entity wants to (try to!) synchronize its behaviour with the scheduler's * one, and to (try to!) reconcile itself with its own scheduling * parameters. */ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se, struct sched_dl_entity *pi_se) { struct dl_rq *dl_rq = dl_rq_of_se(dl_se); struct rq *rq = rq_of_dl_rq(dl_rq); WARN_ON(!dl_se->dl_new || dl_se->dl_throttled); /* * We use the regular wall clock time to set deadlines in the * future; in fact, we must consider execution overheads (time * spent on hardirq context, etc.). */ dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; dl_se->runtime = pi_se->dl_runtime; dl_se->dl_new = 0; } /* * Pure Earliest Deadline First (EDF) scheduling does not deal with the * possibility of a entity lasting more than what it declared, and thus * exhausting its runtime. * * Here we are interested in making runtime overrun possible, but we do * not want a entity which is misbehaving to affect the scheduling of all * other entities. * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS) * is used, in order to confine each entity within its own bandwidth. * * This function deals exactly with that, and ensures that when the runtime * of a entity is replenished, its deadline is also postponed. That ensures * the overrunning entity can't interfere with other entity in the system and * can't make them miss their deadlines. Reasons why this kind of overruns * could happen are, typically, a entity voluntarily trying to overcome its * runtime, or it just underestimated it during sched_setscheduler_ex(). */ static void replenish_dl_entity(struct sched_dl_entity *dl_se, struct sched_dl_entity *pi_se) { struct dl_rq *dl_rq = dl_rq_of_se(dl_se); struct rq *rq = rq_of_dl_rq(dl_rq); BUG_ON(pi_se->dl_runtime <= 0); /* * This could be the case for a !-dl task that is boosted. * Just go with full inherited parameters. */ if (dl_se->dl_deadline == 0) { dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; dl_se->runtime = pi_se->dl_runtime; } /* * We keep moving the deadline away until we get some * available runtime for the entity. This ensures correct * handling of situations where the runtime overrun is * arbitrary large. */ while (dl_se->runtime <= 0) { dl_se->deadline += pi_se->dl_period; dl_se->runtime += pi_se->dl_runtime; } /* * At this point, the deadline really should be "in * the future" with respect to rq->clock. If it's * not, we are, for some reason, lagging too much! * Anyway, after having warn userspace abut that, * we still try to keep the things running by * resetting the deadline and the budget of the * entity. */ if (dl_time_before(dl_se->deadline, rq_clock(rq))) { static bool lag_once = false; if (!lag_once) { lag_once = true; printk_sched("sched: DL replenish lagged to much\n"); } dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; dl_se->runtime = pi_se->dl_runtime; } } /* * Here we check if --at time t-- an entity (which is probably being * [re]activated or, in general, enqueued) can use its remaining runtime * and its current deadline _without_ exceeding the bandwidth it is * assigned (function returns true if it can't). We are in fact applying * one of the CBS rules: when a task wakes up, if the residual runtime * over residual deadline fits within the allocated bandwidth, then we * can keep the current (absolute) deadline and residual budget without * disrupting the schedulability of the system. Otherwise, we should * refill the runtime and set the deadline a period in the future, * because keeping the current (absolute) deadline of the task would * result in breaking guarantees promised to other tasks. * * This function returns true if: * * runtime / (deadline - t) > dl_runtime / dl_period , * * IOW we can't recycle current parameters. * * Notice that the bandwidth check is done against the period. For * task with deadline equal to period this is the same of using * dl_deadline instead of dl_period in the equation above. */ static bool dl_entity_overflow(struct sched_dl_entity *dl_se, struct sched_dl_entity *pi_se, u64 t) { u64 left, right; /* * left and right are the two sides of the equation above, * after a bit of shuffling to use multiplications instead * of divisions. * * Note that none of the time values involved in the two * multiplications are absolute: dl_deadline and dl_runtime * are the relative deadline and the maximum runtime of each * instance, runtime is the runtime left for the last instance * and (deadline - t), since t is rq->clock, is the time left * to the (absolute) deadline. Even if overflowing the u64 type * is very unlikely to occur in both cases, here we scale down * as we want to avoid that risk at all. Scaling down by 10 * means that we reduce granularity to 1us. We are fine with it, * since this is only a true/false check and, anyway, thinking * of anything below microseconds resolution is actually fiction * (but still we want to give the user that illusion >;). */ left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); right = ((dl_se->deadline - t) >> DL_SCALE) * (pi_se->dl_runtime >> DL_SCALE); return dl_time_before(right, left); } /* * When a -deadline entity is queued back on the runqueue, its runtime and * deadline might need updating. * * The policy here is that we update the deadline of the entity only if: * - the current deadline is in the past, * - using the remaining runtime with the current deadline would make * the entity exceed its bandwidth. */ static void update_dl_entity(struct sched_dl_entity *dl_se, struct sched_dl_entity *pi_se) { struct dl_rq *dl_rq = dl_rq_of_se(dl_se); struct rq *rq = rq_of_dl_rq(dl_rq); /* * The arrival of a new instance needs special treatment, i.e., * the actual scheduling parameters have to be "renewed". */ if (dl_se->dl_new) { setup_new_dl_entity(dl_se, pi_se); return; } if (dl_time_before(dl_se->deadline, rq_clock(rq)) || dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; dl_se->runtime = pi_se->dl_runtime; } } /* * If the entity depleted all its runtime, and if we want it to sleep * while waiting for some new execution time to become available, we * set the bandwidth enforcement timer to the replenishment instant * and try to activate it. * * Notice that it is important for the caller to know if the timer * actually started or not (i.e., the replenishment instant is in * the future or in the past). */ static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted) { struct dl_rq *dl_rq = dl_rq_of_se(dl_se); struct rq *rq = rq_of_dl_rq(dl_rq); ktime_t now, act; ktime_t soft, hard; unsigned long range; s64 delta; if (boosted) return 0; /* * We want the timer to fire at the deadline, but considering * that it is actually coming from rq->clock and not from * hrtimer's time base reading. */ act = ns_to_ktime(dl_se->deadline); now = hrtimer_cb_get_time(&dl_se->dl_timer); delta = ktime_to_ns(now) - rq_clock(rq); act = ktime_add_ns(act, delta); /* * If the expiry time already passed, e.g., because the value * chosen as the deadline is too small, don't even try to * start the timer in the past! */ if (ktime_us_delta(act, now) < 0) return 0; hrtimer_set_expires(&dl_se->dl_timer, act); soft = hrtimer_get_softexpires(&dl_se->dl_timer); hard = hrtimer_get_expires(&dl_se->dl_timer); range = ktime_to_ns(ktime_sub(hard, soft)); __hrtimer_start_range_ns(&dl_se->dl_timer, soft, range, HRTIMER_MODE_ABS, 0); return hrtimer_active(&dl_se->dl_timer); } /* * This is the bandwidth enforcement timer callback. If here, we know * a task is not on its dl_rq, since the fact that the timer was running * means the task is throttled and needs a runtime replenishment. * * However, what we actually do depends on the fact the task is active, * (it is on its rq) or has been removed from there by a call to * dequeue_task_dl(). In the former case we must issue the runtime * replenishment and add the task back to the dl_rq; in the latter, we just * do nothing but clearing dl_throttled, so that runtime and deadline * updating (and the queueing back to dl_rq) will be done by the * next call to enqueue_task_dl(). */ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) { struct sched_dl_entity *dl_se = container_of(timer, struct sched_dl_entity, dl_timer); struct task_struct *p = dl_task_of(dl_se); struct rq *rq = task_rq(p); raw_spin_lock(&rq->lock); /* * We need to take care of a possible races here. In fact, the * task might have changed its scheduling policy to something * different from SCHED_DEADLINE or changed its reservation * parameters (through sched_setscheduler()). */ if (!dl_task(p) || dl_se->dl_new) goto unlock; sched_clock_tick(); update_rq_clock(rq); dl_se->dl_throttled = 0; if (p->on_rq) { enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); if (task_has_dl_policy(rq->curr)) check_preempt_curr_dl(rq, p, 0); else resched_task(rq->curr); #ifdef CONFIG_SMP /* * Queueing this task back might have overloaded rq, * check if we need to kick someone away. */ if (has_pushable_dl_tasks(rq)) push_dl_task(rq); #endif } unlock: raw_spin_unlock(&rq->lock); return HRTIMER_NORESTART; } void init_dl_task_timer(struct sched_dl_entity *dl_se) { struct hrtimer *timer = &dl_se->dl_timer; if (hrtimer_active(timer)) { hrtimer_try_to_cancel(timer); return; } hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); timer->function = dl_task_timer; } static int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) { int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); int rorun = dl_se->runtime <= 0; if (!rorun && !dmiss) return 0; /* * If we are beyond our current deadline and we are still * executing, then we have already used some of the runtime of * the next instance. Thus, if we do not account that, we are * stealing bandwidth from the system at each deadline miss! */ if (dmiss) { dl_se->runtime = rorun ? dl_se->runtime : 0; dl_se->runtime -= rq_clock(rq) - dl_se->deadline; } return 1; } /* * Update the current task's runtime statistics (provided it is still * a -deadline task and has not been removed from the dl_rq). */ static void update_curr_dl(struct rq *rq) { struct task_struct *curr = rq->curr; struct sched_dl_entity *dl_se = &curr->dl; u64 delta_exec; if (!dl_task(curr) || !on_dl_rq(dl_se)) return; /* * Consumed budget is computed considering the time as * observed by schedulable tasks (excluding time spent * in hardirq context, etc.). Deadlines are instead * computed using hard walltime. This seems to be the more * natural solution, but the full ramifications of this * approach need further study. */ delta_exec = rq_clock_task(rq) - curr->se.exec_start; if (unlikely((s64)delta_exec < 0)) delta_exec = 0; schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); curr->se.sum_exec_runtime += delta_exec; account_group_exec_runtime(curr, delta_exec); curr->se.exec_start = rq_clock_task(rq); cpuacct_charge(curr, delta_exec); sched_rt_avg_update(rq, delta_exec); dl_se->runtime -= delta_exec; if (dl_runtime_exceeded(rq, dl_se)) { __dequeue_task_dl(rq, curr, 0); if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted))) dl_se->dl_throttled = 1; else enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); if (!is_leftmost(curr, &rq->dl)) resched_task(curr); } /* * Because -- for now -- we share the rt bandwidth, we need to * account our runtime there too, otherwise actual rt tasks * would be able to exceed the shared quota. * * Account to the root rt group for now. * * The solution we're working towards is having the RT groups scheduled * using deadline servers -- however there's a few nasties to figure * out before that can happen. */ if (rt_bandwidth_enabled()) { struct rt_rq *rt_rq = &rq->rt; raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq->rt_time += delta_exec; /* * We'll let actual RT tasks worry about the overflow here, we * have our own CBS to keep us inline -- see above. */ raw_spin_unlock(&rt_rq->rt_runtime_lock); } } #ifdef CONFIG_SMP static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu); static inline u64 next_deadline(struct rq *rq) { struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu); if (next && dl_prio(next->prio)) return next->dl.deadline; else return 0; } static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) { struct rq *rq = rq_of_dl_rq(dl_rq); if (dl_rq->earliest_dl.curr == 0 || dl_time_before(deadline, dl_rq->earliest_dl.curr)) { /* * If the dl_rq had no -deadline tasks, or if the new task * has shorter deadline than the current one on dl_rq, we * know that the previous earliest becomes our next earliest, * as the new task becomes the earliest itself. */ dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr; dl_rq->earliest_dl.curr = deadline; cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1); } else if (dl_rq->earliest_dl.next == 0 || dl_time_before(deadline, dl_rq->earliest_dl.next)) { /* * On the other hand, if the new -deadline task has a * a later deadline than the earliest one on dl_rq, but * it is earlier than the next (if any), we must * recompute the next-earliest. */ dl_rq->earliest_dl.next = next_deadline(rq); } } static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) { struct rq *rq = rq_of_dl_rq(dl_rq); /* * Since we may have removed our earliest (and/or next earliest) * task we must recompute them. */ if (!dl_rq->dl_nr_running) { dl_rq->earliest_dl.curr = 0; dl_rq->earliest_dl.next = 0; cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); } else { struct rb_node *leftmost = dl_rq->rb_leftmost; struct sched_dl_entity *entry; entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); dl_rq->earliest_dl.curr = entry->deadline; dl_rq->earliest_dl.next = next_deadline(rq); cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1); } } #else static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} #endif /* CONFIG_SMP */ static inline void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { int prio = dl_task_of(dl_se)->prio; u64 deadline = dl_se->deadline; WARN_ON(!dl_prio(prio)); dl_rq->dl_nr_running++; inc_dl_deadline(dl_rq, deadline); inc_dl_migration(dl_se, dl_rq); } static inline void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { int prio = dl_task_of(dl_se)->prio; WARN_ON(!dl_prio(prio)); WARN_ON(!dl_rq->dl_nr_running); dl_rq->dl_nr_running--; dec_dl_deadline(dl_rq, dl_se->deadline); dec_dl_migration(dl_se, dl_rq); } static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) { struct dl_rq *dl_rq = dl_rq_of_se(dl_se); struct rb_node **link = &dl_rq->rb_root.rb_node; struct rb_node *parent = NULL; struct sched_dl_entity *entry; int leftmost = 1; BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node)); while (*link) { parent = *link; entry = rb_entry(parent, struct sched_dl_entity, rb_node); if (dl_time_before(dl_se->deadline, entry->deadline)) link = &parent->rb_left; else { link = &parent->rb_right; leftmost = 0; } } if (leftmost) dl_rq->rb_leftmost = &dl_se->rb_node; rb_link_node(&dl_se->rb_node, parent, link); rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root); inc_dl_tasks(dl_se, dl_rq); } static void __dequeue_dl_entity(struct sched_dl_entity *dl_se) { struct dl_rq *dl_rq = dl_rq_of_se(dl_se); if (RB_EMPTY_NODE(&dl_se->rb_node)) return; if (dl_rq->rb_leftmost == &dl_se->rb_node) { struct rb_node *next_node; next_node = rb_next(&dl_se->rb_node); dl_rq->rb_leftmost = next_node; } rb_erase(&dl_se->rb_node, &dl_rq->rb_root); RB_CLEAR_NODE(&dl_se->rb_node); dec_dl_tasks(dl_se, dl_rq); } static void enqueue_dl_entity(struct sched_dl_entity *dl_se, struct sched_dl_entity *pi_se, int flags) { BUG_ON(on_dl_rq(dl_se)); /* * If this is a wakeup or a new instance, the scheduling * parameters of the task might need updating. Otherwise, * we want a replenishment of its runtime. */ if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) replenish_dl_entity(dl_se, pi_se); else update_dl_entity(dl_se, pi_se); __enqueue_dl_entity(dl_se); } static void dequeue_dl_entity(struct sched_dl_entity *dl_se) { __dequeue_dl_entity(dl_se); } static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) { struct task_struct *pi_task = rt_mutex_get_top_task(p); struct sched_dl_entity *pi_se = &p->dl; /* * Use the scheduling parameters of the top pi-waiter * task if we have one and its (relative) deadline is * smaller than our one... OTW we keep our runtime and * deadline. */ if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) pi_se = &pi_task->dl; /* * If p is throttled, we do nothing. In fact, if it exhausted * its budget it needs a replenishment and, since it now is on * its rq, the bandwidth timer callback (which clearly has not * run yet) will take care of this. */ if (p->dl.dl_throttled) return; enqueue_dl_entity(&p->dl, pi_se, flags); if (!task_current(rq, p) && p->nr_cpus_allowed > 1) enqueue_pushable_dl_task(rq, p); inc_nr_running(rq); } static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) { dequeue_dl_entity(&p->dl); dequeue_pushable_dl_task(rq, p); } static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) { update_curr_dl(rq); __dequeue_task_dl(rq, p, flags); dec_nr_running(rq); } /* * Yield task semantic for -deadline tasks is: * * get off from the CPU until our next instance, with * a new runtime. This is of little use now, since we * don't have a bandwidth reclaiming mechanism. Anyway, * bandwidth reclaiming is planned for the future, and * yield_task_dl will indicate that some spare budget * is available for other task instances to use it. */ static void yield_task_dl(struct rq *rq) { struct task_struct *p = rq->curr; /* * We make the task go to sleep until its current deadline by * forcing its runtime to zero. This way, update_curr_dl() stops * it and the bandwidth timer will wake it up and will give it * new scheduling parameters (thanks to dl_new=1). */ if (p->dl.runtime > 0) { rq->curr->dl.dl_new = 1; p->dl.runtime = 0; } update_curr_dl(rq); } #ifdef CONFIG_SMP static int find_later_rq(struct task_struct *task); static int select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) { struct task_struct *curr; struct rq *rq; if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) goto out; rq = cpu_rq(cpu); rcu_read_lock(); curr = ACCESS_ONCE(rq->curr); /* unlocked access */ /* * If we are dealing with a -deadline task, we must * decide where to wake it up. * If it has a later deadline and the current task * on this rq can't move (provided the waking task * can!) we prefer to send it somewhere else. On the * other hand, if it has a shorter deadline, we * try to make it stay here, it might be important. */ if (unlikely(dl_task(curr)) && (curr->nr_cpus_allowed < 2 || !dl_entity_preempt(&p->dl, &curr->dl)) && (p->nr_cpus_allowed > 1)) { int target = find_later_rq(p); if (target != -1) cpu = target; } rcu_read_unlock(); out: return cpu; } static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) { /* * Current can't be migrated, useless to reschedule, * let's hope p can move out. */ if (rq->curr->nr_cpus_allowed == 1 || cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) return; /* * p is migratable, so let's not schedule it and * see if it is pushed or pulled somewhere else. */ if (p->nr_cpus_allowed != 1 && cpudl_find(&rq->rd->cpudl, p, NULL) != -1) return; resched_task(rq->curr); } #endif /* CONFIG_SMP */ /* * Only called when both the current and waking task are -deadline * tasks. */ static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags) { if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { resched_task(rq->curr); return; } #ifdef CONFIG_SMP /* * In the unlikely case current and p have the same deadline * let us try to decide what's the best thing to do... */ if ((p->dl.deadline == rq->curr->dl.deadline) && !test_tsk_need_resched(rq->curr)) check_preempt_equal_dl(rq, p); #endif /* CONFIG_SMP */ } #ifdef CONFIG_SCHED_HRTICK static void start_hrtick_dl(struct rq *rq, struct task_struct *p) { s64 delta = p->dl.dl_runtime - p->dl.runtime; if (delta > 10000) hrtick_start(rq, p->dl.runtime); } #endif static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, struct dl_rq *dl_rq) { struct rb_node *left = dl_rq->rb_leftmost; if (!left) return NULL; return rb_entry(left, struct sched_dl_entity, rb_node); } struct task_struct *pick_next_task_dl(struct rq *rq) { struct sched_dl_entity *dl_se; struct task_struct *p; struct dl_rq *dl_rq; dl_rq = &rq->dl; if (unlikely(!dl_rq->dl_nr_running)) return NULL; dl_se = pick_next_dl_entity(rq, dl_rq); BUG_ON(!dl_se); p = dl_task_of(dl_se); p->se.exec_start = rq_clock_task(rq); /* Running task will never be pushed. */ dequeue_pushable_dl_task(rq, p); #ifdef CONFIG_SCHED_HRTICK if (hrtick_enabled(rq)) start_hrtick_dl(rq, p); #endif #ifdef CONFIG_SMP rq->post_schedule = has_pushable_dl_tasks(rq); #endif /* CONFIG_SMP */ return p; } static void put_prev_task_dl(struct rq *rq, struct task_struct *p) { update_curr_dl(rq); if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) enqueue_pushable_dl_task(rq, p); } static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) { update_curr_dl(rq); #ifdef CONFIG_SCHED_HRTICK if (hrtick_enabled(rq) && queued && p->dl.runtime > 0) start_hrtick_dl(rq, p); #endif } static void task_fork_dl(struct task_struct *p) { /* * SCHED_DEADLINE tasks cannot fork and this is achieved through * sched_fork() */ } static void task_dead_dl(struct task_struct *p) { struct hrtimer *timer = &p->dl.dl_timer; struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); /* * Since we are TASK_DEAD we won't slip out of the domain! */ raw_spin_lock_irq(&dl_b->lock); dl_b->total_bw -= p->dl.dl_bw; raw_spin_unlock_irq(&dl_b->lock); hrtimer_cancel(timer); } static void set_curr_task_dl(struct rq *rq) { struct task_struct *p = rq->curr; p->se.exec_start = rq_clock_task(rq); /* You can't push away the running task */ dequeue_pushable_dl_task(rq, p); } #ifdef CONFIG_SMP /* Only try algorithms three times */ #define DL_MAX_TRIES 3 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) && (p->nr_cpus_allowed > 1)) return 1; return 0; } /* Returns the second earliest -deadline task, NULL otherwise */ static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu) { struct rb_node *next_node = rq->dl.rb_leftmost; struct sched_dl_entity *dl_se; struct task_struct *p = NULL; next_node: next_node = rb_next(next_node); if (next_node) { dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node); p = dl_task_of(dl_se); if (pick_dl_task(rq, p, cpu)) return p; goto next_node; } return NULL; } static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); static int find_later_rq(struct task_struct *task) { struct sched_domain *sd; struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl); int this_cpu = smp_processor_id(); int best_cpu, cpu = task_cpu(task); /* Make sure the mask is initialized first */ if (unlikely(!later_mask)) return -1; if (task->nr_cpus_allowed == 1) return -1; best_cpu = cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask); if (best_cpu == -1) return -1; /* * If we are here, some target has been found, * the most suitable of which is cached in best_cpu. * This is, among the runqueues where the current tasks * have later deadlines than the task's one, the rq * with the latest possible one. * * Now we check how well this matches with task's * affinity and system topology. * * The last cpu where the task run is our first * guess, since it is most likely cache-hot there. */ if (cpumask_test_cpu(cpu, later_mask)) return cpu; /* * Check if this_cpu is to be skipped (i.e., it is * not in the mask) or not. */ if (!cpumask_test_cpu(this_cpu, later_mask)) this_cpu = -1; rcu_read_lock(); for_each_domain(cpu, sd) { if (sd->flags & SD_WAKE_AFFINE) { /* * If possible, preempting this_cpu is * cheaper than migrating. */ if (this_cpu != -1 && cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { rcu_read_unlock(); return this_cpu; } /* * Last chance: if best_cpu is valid and is * in the mask, that becomes our choice. */ if (best_cpu < nr_cpu_ids && cpumask_test_cpu(best_cpu, sched_domain_span(sd))) { rcu_read_unlock(); return best_cpu; } } } rcu_read_unlock(); /* * At this point, all our guesses failed, we just return * 'something', and let the caller sort the things out. */ if (this_cpu != -1) return this_cpu; cpu = cpumask_any(later_mask); if (cpu < nr_cpu_ids) return cpu; return -1; } /* Locks the rq it finds */ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) { struct rq *later_rq = NULL; int tries; int cpu; for (tries = 0; tries < DL_MAX_TRIES; tries++) { cpu = find_later_rq(task); if ((cpu == -1) || (cpu == rq->cpu)) break; later_rq = cpu_rq(cpu); /* Retry if something changed. */ if (double_lock_balance(rq, later_rq)) { if (unlikely(task_rq(task) != rq || !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) || task_running(rq, task) || !task->on_rq)) { double_unlock_balance(rq, later_rq); later_rq = NULL; break; } } /* * If the rq we found has no -deadline task, or * its earliest one has a later deadline than our * task, the rq is a good one. */ if (!later_rq->dl.dl_nr_running || dl_time_before(task->dl.deadline, later_rq->dl.earliest_dl.curr)) break; /* Otherwise we try again. */ double_unlock_balance(rq, later_rq); later_rq = NULL; } return later_rq; } static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) { struct task_struct *p; if (!has_pushable_dl_tasks(rq)) return NULL; p = rb_entry(rq->dl.pushable_dl_tasks_leftmost, struct task_struct, pushable_dl_tasks); BUG_ON(rq->cpu != task_cpu(p)); BUG_ON(task_current(rq, p)); BUG_ON(p->nr_cpus_allowed <= 1); BUG_ON(!p->on_rq); BUG_ON(!dl_task(p)); return p; } /* * See if the non running -deadline tasks on this rq * can be sent to some other CPU where they can preempt * and start executing. */ static int push_dl_task(struct rq *rq) { struct task_struct *next_task; struct rq *later_rq; if (!rq->dl.overloaded) return 0; next_task = pick_next_pushable_dl_task(rq); if (!next_task) return 0; retry: if (unlikely(next_task == rq->curr)) { WARN_ON(1); return 0; } /* * If next_task preempts rq->curr, and rq->curr * can move away, it makes sense to just reschedule * without going further in pushing next_task. */ if (dl_task(rq->curr) && dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && rq->curr->nr_cpus_allowed > 1) { resched_task(rq->curr); return 0; } /* We might release rq lock */ get_task_struct(next_task); /* Will lock the rq it'll find */ later_rq = find_lock_later_rq(next_task, rq); if (!later_rq) { struct task_struct *task; /* * We must check all this again, since * find_lock_later_rq releases rq->lock and it is * then possible that next_task has migrated. */ task = pick_next_pushable_dl_task(rq); if (task_cpu(next_task) == rq->cpu && task == next_task) { /* * The task is still there. We don't try * again, some other cpu will pull it when ready. */ dequeue_pushable_dl_task(rq, next_task); goto out; } if (!task) /* No more tasks */ goto out; put_task_struct(next_task); next_task = task; goto retry; } deactivate_task(rq, next_task, 0); set_task_cpu(next_task, later_rq->cpu); activate_task(later_rq, next_task, 0); resched_task(later_rq->curr); double_unlock_balance(rq, later_rq); out: put_task_struct(next_task); return 1; } static void push_dl_tasks(struct rq *rq) { /* Terminates as it moves a -deadline task */ while (push_dl_task(rq)) ; } static int pull_dl_task(struct rq *this_rq) { int this_cpu = this_rq->cpu, ret = 0, cpu; struct task_struct *p; struct rq *src_rq; u64 dmin = LONG_MAX; if (likely(!dl_overloaded(this_rq))) return 0; /* * Match the barrier from dl_set_overloaded; this guarantees that if we * see overloaded we must also see the dlo_mask bit. */ smp_rmb(); for_each_cpu(cpu, this_rq->rd->dlo_mask) { if (this_cpu == cpu) continue; src_rq = cpu_rq(cpu); /* * It looks racy, abd it is! However, as in sched_rt.c, * we are fine with this. */ if (this_rq->dl.dl_nr_running && dl_time_before(this_rq->dl.earliest_dl.curr, src_rq->dl.earliest_dl.next)) continue; /* Might drop this_rq->lock */ double_lock_balance(this_rq, src_rq); /* * If there are no more pullable tasks on the * rq, we're done with it. */ if (src_rq->dl.dl_nr_running <= 1) goto skip; p = pick_next_earliest_dl_task(src_rq, this_cpu); /* * We found a task to be pulled if: * - it preempts our current (if there's one), * - it will preempt the last one we pulled (if any). */ if (p && dl_time_before(p->dl.deadline, dmin) && (!this_rq->dl.dl_nr_running || dl_time_before(p->dl.deadline, this_rq->dl.earliest_dl.curr))) { WARN_ON(p == src_rq->curr); WARN_ON(!p->on_rq); /* * Then we pull iff p has actually an earlier * deadline than the current task of its runqueue. */ if (dl_time_before(p->dl.deadline, src_rq->curr->dl.deadline)) goto skip; ret = 1; deactivate_task(src_rq, p, 0); set_task_cpu(p, this_cpu); activate_task(this_rq, p, 0); dmin = p->dl.deadline; /* Is there any other task even earlier? */ } skip: double_unlock_balance(this_rq, src_rq); } return ret; } static void pre_schedule_dl(struct rq *rq, struct task_struct *prev) { /* Try to pull other tasks here */ if (dl_task(prev)) pull_dl_task(rq); } static void post_schedule_dl(struct rq *rq) { push_dl_tasks(rq); } /* * Since the task is not running and a reschedule is not going to happen * anytime soon on its runqueue, we try pushing it away now. */ static void task_woken_dl(struct rq *rq, struct task_struct *p) { if (!task_running(rq, p) && !test_tsk_need_resched(rq->curr) && has_pushable_dl_tasks(rq) && p->nr_cpus_allowed > 1 && dl_task(rq->curr) && (rq->curr->nr_cpus_allowed < 2 || dl_entity_preempt(&rq->curr->dl, &p->dl))) { push_dl_tasks(rq); } } static void set_cpus_allowed_dl(struct task_struct *p, const struct cpumask *new_mask) { struct rq *rq; int weight; BUG_ON(!dl_task(p)); /* * Update only if the task is actually running (i.e., * it is on the rq AND it is not throttled). */ if (!on_dl_rq(&p->dl)) return; weight = cpumask_weight(new_mask); /* * Only update if the process changes its state from whether it * can migrate or not. */ if ((p->nr_cpus_allowed > 1) == (weight > 1)) return; rq = task_rq(p); /* * The process used to be able to migrate OR it can now migrate */ if (weight <= 1) { if (!task_current(rq, p)) dequeue_pushable_dl_task(rq, p); BUG_ON(!rq->dl.dl_nr_migratory); rq->dl.dl_nr_migratory--; } else { if (!task_current(rq, p)) enqueue_pushable_dl_task(rq, p); rq->dl.dl_nr_migratory++; } update_dl_migration(&rq->dl); } /* Assumes rq->lock is held */ static void rq_online_dl(struct rq *rq) { if (rq->dl.overloaded) dl_set_overload(rq); if (rq->dl.dl_nr_running > 0) cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1); } /* Assumes rq->lock is held */ static void rq_offline_dl(struct rq *rq) { if (rq->dl.overloaded) dl_clear_overload(rq); cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); } void init_sched_dl_class(void) { unsigned int i; for_each_possible_cpu(i) zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i), GFP_KERNEL, cpu_to_node(i)); } #endif /* CONFIG_SMP */ static void switched_from_dl(struct rq *rq, struct task_struct *p) { if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy)) hrtimer_try_to_cancel(&p->dl.dl_timer); #ifdef CONFIG_SMP /* * Since this might be the only -deadline task on the rq, * this is the right place to try to pull some other one * from an overloaded cpu, if any. */ if (!rq->dl.dl_nr_running) pull_dl_task(rq); #endif } /* * When switching to -deadline, we may overload the rq, then * we try to push someone off, if possible. */ static void switched_to_dl(struct rq *rq, struct task_struct *p) { int check_resched = 1; /* * If p is throttled, don't consider the possibility * of preempting rq->curr, the check will be done right * after its runtime will get replenished. */ if (unlikely(p->dl.dl_throttled)) return; if (p->on_rq || rq->curr != p) { #ifdef CONFIG_SMP if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p)) /* Only reschedule if pushing failed */ check_resched = 0; #endif /* CONFIG_SMP */ if (check_resched && task_has_dl_policy(rq->curr)) check_preempt_curr_dl(rq, p, 0); } } /* * If the scheduling parameters of a -deadline task changed, * a push or pull operation might be needed. */ static void prio_changed_dl(struct rq *rq, struct task_struct *p, int oldprio) { if (p->on_rq || rq->curr == p) { #ifdef CONFIG_SMP /* * This might be too much, but unfortunately * we don't have the old deadline value, and * we can't argue if the task is increasing * or lowering its prio, so... */ if (!rq->dl.overloaded) pull_dl_task(rq); /* * If we now have a earlier deadline task than p, * then reschedule, provided p is still on this * runqueue. */ if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) && rq->curr == p) resched_task(p); #else /* * Again, we don't know if p has a earlier * or later deadline, so let's blindly set a * (maybe not needed) rescheduling point. */ resched_task(p); #endif /* CONFIG_SMP */ } else switched_to_dl(rq, p); } const struct sched_class dl_sched_class = { .next = &rt_sched_class, .enqueue_task = enqueue_task_dl, .dequeue_task = dequeue_task_dl, .yield_task = yield_task_dl, .check_preempt_curr = check_preempt_curr_dl, .pick_next_task = pick_next_task_dl, .put_prev_task = put_prev_task_dl, #ifdef CONFIG_SMP .select_task_rq = select_task_rq_dl, .set_cpus_allowed = set_cpus_allowed_dl, .rq_online = rq_online_dl, .rq_offline = rq_offline_dl, .pre_schedule = pre_schedule_dl, .post_schedule = post_schedule_dl, .task_woken = task_woken_dl, #endif .set_curr_task = set_curr_task_dl, .task_tick = task_tick_dl, .task_fork = task_fork_dl, .task_dead = task_dead_dl, .prio_changed = prio_changed_dl, .switched_from = switched_from_dl, .switched_to = switched_to_dl, };
wgoossens/linux-nios2
kernel/sched/deadline.c
C
gpl-2.0
42,412
/* Copyright (c) 2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "msm_camera_eeprom.h" int32_t msm_camera_eeprom_read(struct msm_eeprom_ctrl_t *ectrl, uint32_t reg_addr, void *data, uint32_t num_byte, uint16_t convert_endian) { int rc = 0; if (ectrl->func_tbl.eeprom_set_dev_addr != NULL) ectrl->func_tbl.eeprom_set_dev_addr(ectrl, &reg_addr); if (!convert_endian) { rc = msm_camera_i2c_read_seq( &ectrl->i2c_client, reg_addr, data, num_byte); } else { unsigned char buf[num_byte]; uint8_t *data_ptr = (uint8_t *) data; int i; rc = msm_camera_i2c_read_seq( &ectrl->i2c_client, reg_addr, buf, num_byte); for (i = 0; i < num_byte; i += 2) { data_ptr[i] = buf[i+1]; data_ptr[i+1] = buf[i]; } } return rc; } int32_t msm_camera_eeprom_read_tbl(struct msm_eeprom_ctrl_t *ectrl, struct msm_camera_eeprom_read_t *read_tbl, uint16_t tbl_size) { int i, rc = 0; CDBG("%s: open\n", __func__); if (read_tbl == NULL) return rc; for (i = 0; i < tbl_size; i++) { rc = msm_camera_eeprom_read (ectrl, read_tbl[i].reg_addr, read_tbl[i].dest_ptr, read_tbl[i].num_byte, read_tbl[i].convert_endian); if (rc < 0) { pr_err("%s: read failed\n", __func__); return rc; } } CDBG("%s: done\n", __func__); return rc; } int32_t msm_camera_eeprom_get_info(struct msm_eeprom_ctrl_t *ectrl, struct msm_camera_eeprom_info_t *einfo) { int rc = 0; CDBG("%s: open\n", __func__); memcpy(einfo, ectrl->info, ectrl->info_size); CDBG("%s: done =%d\n", __func__, rc); return rc; } int32_t msm_camera_eeprom_get_data(struct msm_eeprom_ctrl_t *ectrl, struct msm_eeprom_data_t *edata) { int rc = 0; if (edata->index >= ectrl->data_tbl_size) return -EFAULT; if (copy_to_user(edata->eeprom_data, ectrl->data_tbl[edata->index].data, ectrl->data_tbl[edata->index].size)) rc = -EFAULT; return rc; } int32_t msm_eeprom_config(struct msm_eeprom_ctrl_t *e_ctrl, void __user *argp) { struct msm_eeprom_cfg_data cdata; int32_t rc = 0; if (copy_from_user(&cdata, (void *)argp, sizeof(struct msm_eeprom_cfg_data))) return -EFAULT; mutex_lock(e_ctrl->eeprom_mutex); switch (cdata.cfgtype) { case CFG_GET_EEPROM_INFO: if (e_ctrl->func_tbl.eeprom_get_info == NULL) { rc = -EFAULT; break; } rc = e_ctrl->func_tbl.eeprom_get_info(e_ctrl, &cdata.cfg.get_info); if (copy_to_user((void *)argp, &cdata, sizeof(struct msm_eeprom_cfg_data))) rc = -EFAULT; break; case CFG_GET_EEPROM_DATA: if (e_ctrl->func_tbl.eeprom_get_data == NULL) { rc = -EFAULT; break; } rc = e_ctrl->func_tbl.eeprom_get_data(e_ctrl, &cdata.cfg.get_data); if (copy_to_user((void *)argp, &cdata, sizeof(struct msm_eeprom_cfg_data))) rc = -EFAULT; break; default: break; } mutex_unlock(e_ctrl->eeprom_mutex); return rc; } struct msm_eeprom_ctrl_t *get_ectrl(struct v4l2_subdev *sd) { return container_of(sd, struct msm_eeprom_ctrl_t, sdev); } long msm_eeprom_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct msm_eeprom_ctrl_t *e_ctrl = get_ectrl(sd); void __user *argp = (void __user *)arg; switch (cmd) { case VIDIOC_MSM_EEPROM_CFG: return msm_eeprom_config(e_ctrl, argp); default: return -ENOIOCTLCMD; } } int32_t msm_eeprom_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rc = 0; struct msm_eeprom_ctrl_t *e_ctrl_t = NULL; CDBG("%s called\n", __func__); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { pr_err("i2c_check_functionality failed\n"); goto probe_failure; } e_ctrl_t = (struct msm_eeprom_ctrl_t *)(id->driver_data); e_ctrl_t->i2c_client.client = client; if (e_ctrl_t->i2c_addr != 0) e_ctrl_t->i2c_client.client->addr = e_ctrl_t->i2c_addr; CDBG("%s client = %x\n", __func__, (unsigned int) client); /* Assign name for sub device */ snprintf(e_ctrl_t->sdev.name, sizeof(e_ctrl_t->sdev.name), "%s", e_ctrl_t->i2c_driver->driver.name); if (e_ctrl_t->func_tbl.eeprom_init != NULL) { rc = e_ctrl_t->func_tbl.eeprom_init(e_ctrl_t, e_ctrl_t->i2c_client.client->adapter); } msm_camera_eeprom_read_tbl(e_ctrl_t, e_ctrl_t->read_tbl, e_ctrl_t->read_tbl_size); if (e_ctrl_t->func_tbl.eeprom_format_data != NULL) e_ctrl_t->func_tbl.eeprom_format_data(); if (e_ctrl_t->func_tbl.eeprom_release != NULL) rc = e_ctrl_t->func_tbl.eeprom_release(e_ctrl_t); /* Initialize sub device */ v4l2_i2c_subdev_init(&e_ctrl_t->sdev, e_ctrl_t->i2c_client.client, e_ctrl_t->eeprom_v4l2_subdev_ops); CDBG("%s success resut=%d\n", __func__, rc); return rc; probe_failure: pr_err("%s failed! rc = %d\n", __func__, rc); return rc; }
gp-b2g/gp-peak-kernel
drivers/media/video/msm/eeprom/msm_camera_eeprom.c
C
gpl-2.0
5,094
<!DOCTYPE html> <html> <head> <title>Number format</title> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> <meta http-equiv="X-UA-Compatible" content="IE=edge"/> <link rel="stylesheet" type="text/css" href="../../../codebase/dhtmlx.css"/> <script src="../../../codebase/dhtmlx.js"></script> <script> var myForm, myForm2; function doOnLoad() { // init from json formData = [ {type: "settings", position: "label-left", labelWidth: 250, inputWidth: 120}, {type: "fieldset", label: "Number Formats", inputWidth: "auto", list:[ {type: "input", name: "t1", label: "Format: 0,000.00", value: "1234567.8987", numberFormat: "0,000.00"}, {type: "input", name: "t2", label: "Format: @ 0,00 (before_change=false)", value: "123456.78", numberFormat: ["@ 0,00",":"]}, {type: "input", name: "t3", label: "Format: $ 0.00", value: "125750.99", numberFormat: ["$ 0,000.00",",","."]}, {type: "input", name: "t4", label: "Format: 0,000 Rub", value: "10250.45", numberFormat: ["0,000 Rub","'"]}, {type: "input", name: "t5", label: "Format: 0,000.00 Kr (set with script)", value: "99570.45"} ]} ]; myForm = new dhtmlXForm("myForm", formData); myForm.setNumberFormat("t5","0,000.00 Kr","'",","); myForm.attachEvent("onChange", function(){ // console.log("onChange",arguments) }); myForm.attachEvent("onBeforeChange", function(name){ // console.log("onBeforeChange",arguments) return (name != "t2"); }); // xml myForm2 = new dhtmlXForm("myForm2"); myForm2.loadStruct("../common/dhxform_numberformat.xml", function(){ myForm2.setNumberFormat("t5","0,000.00 Kr","'",","); }); } </script> </head> <body onload="doOnLoad();"> <table border="0" cellspacing="2" cellpadding="2"> <tr> <td align="left" valign="top"><div id="myForm"></div></td> <td align="left" valign="top"><div id="myForm2" style="margin-left: 30px;"></div></td> </tr> </table> <div style="padding-top: 10px;"> <b>JSON</b> { numberFormat: "format" } or { numberFormat: ["format", "group_sep", "dec_sep"] }<br> <b>XML</b> &lt; numberFormat="format" groupSep="group_sep" decSep="dec_sep" &gt;<br> <br> "format" required, "group_sep" and "dec_sep" optional, "," and "." are defaults </div> </body> </html>
o-unity/lanio
old/backup/lanio_t/ext/dhtmlx/samples/dhtmlxForm/03_api/09_number_format.html
HTML
gpl-2.0
2,345
# This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin class I18n(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): """Internationalization """ plugin_name = 'i18n' profiles = ('system',) def setup(self): self.add_copy_spec([ "/etc/X11/xinit/xinput.d/*", "/etc/locale.conf" ]) self.add_cmd_output("locale") # vim: et ts=4 sw=4
lmiccini/sos
sos/plugins/i18n.py
Python
gpl-2.0
1,102
var gulp = require('gulp'); var concat = require('gulp-concat'); var FOUNDATION = [ 'js/foundation.core.js', 'js/foundation.util.*.js', 'js/*.js' ]; var DEPS = [ 'node_modules/jquery/dist/jquery.js', 'node_modules/motion-ui/dist/motion-ui.js', 'node_modules/what-input/what-input.js' ]; var DOCS = [ 'node_modules/zeroclipboard/dist/ZeroClipboard.js', 'node_modules/typeahead.js/dist/typeahead.bundle.js', 'docs/assets/js/docs.*.js', 'docs/assets/js/docs.js' ]; // Compiles JavaScript into a single file gulp.task('javascript', ['javascript:foundation', 'javascript:deps', 'javascript:docs']); gulp.task('javascript:foundation', function() { return gulp.src(FOUNDATION) .pipe(concat('foundation.js')) .pipe(gulp.dest('_build/assets/js')); }); gulp.task('javascript:deps', function() { return gulp.src(DEPS) .pipe(concat('vendor.js')) .pipe(gulp.dest('_build/assets/js')); }); gulp.task('javascript:docs', function() { return gulp.src(DOCS) .pipe(concat('docs.js')) .pipe(gulp.dest('_build/assets/js')); });
esm-erika/eschoolmedia
wp-content/themes/eschoolmedia-redesign/vendor/foundation-sites/gulp/javascript.js
JavaScript
gpl-2.0
1,064
<?php global $qode_options_proya; /* Set id on -1 beacause archive page id can have same id as some post and settings is not good */ if(is_category() || is_tag() || is_author()){ $archive_id = $id; $id = -1; } if(get_post_meta($id, "qode_responsive-title-image", true) != ""){ $responsive_title_image = get_post_meta($id, "qode_responsive-title-image", true); }else{ $responsive_title_image = $qode_options_proya['responsive_title_image']; } if(get_post_meta($id, "qode_fixed-title-image", true) != ""){ $fixed_title_image = get_post_meta($id, "qode_fixed-title-image", true); }else{ $fixed_title_image = $qode_options_proya['fixed_title_image']; } if(get_post_meta($id, "qode_title-image", true) != ""){ $title_image = get_post_meta($id, "qode_title-image", true); }else{ $title_image = $qode_options_proya['title_image']; } $title_image_height = ""; $title_image_width = ""; if(!empty($title_image)){ $title_image_url_obj = parse_url($title_image); if (file_exists($_SERVER['DOCUMENT_ROOT'].$title_image_url_obj['path'])) list($title_image_width, $title_image_height, $title_image_type, $title_image_attr) = getimagesize($_SERVER['DOCUMENT_ROOT'].$title_image_url_obj['path']); } if(get_post_meta($id, "qode_title-overlay-image", true) != ""){ $title_overlay_image = get_post_meta($id, "qode_title-overlay-image", true); }else{ $title_overlay_image = $qode_options_proya['title_overlay_image']; } $header_height_padding = 0; if (!empty($qode_options_proya['header_height'])) { $header_height = $qode_options_proya['header_height']; } else { $header_height = 100; } if (isset($qode_options_proya['header_bottom_border_color']) && !empty($qode_options_proya['header_bottom_border_color'])) { $header_height = $header_height + 1; } if($qode_options_proya['header_bottom_appearance'] == 'stick menu_bottom'){ $menu_bottom = '46'; if(is_active_sidebar('header_fixed_right')){ $menu_bottom = $menu_bottom + 22; } } else { $menu_bottom = 0; } $nav_font_size = 7; if(isset($qode_options_proya['menu_fontsize']) && $qode_options_proya['menu_fontsize'] != ""){ $nav_font_size = $qode_options_proya['menu_fontsize'] / 2; } $header_top = 0; if(isset($qode_options_proya['header_top_area']) && $qode_options_proya['header_top_area'] == "yes"){ $header_top = 33; } $header_height_padding = $header_height + $menu_bottom + $header_top; if ((isset($qode_options_proya['center_logo_image']) && $qode_options_proya['center_logo_image'] == "yes") || $qode_options_proya['header_bottom_appearance'] == 'fixed_hiding') { if(isset($qode_options_proya['logo_image'])){ $logo_width = 0; $logo_height = 0; if (!empty($qode_options_proya['logo_image'])) { $logo_url_obj = parse_url($qode_options_proya['logo_image']); list($logo_width, $logo_height, $logo_type, $logo_attr) = getimagesize($_SERVER['DOCUMENT_ROOT'].$logo_url_obj['path']); } } if($qode_options_proya['header_bottom_appearance'] == 'stick menu_bottom'){ $header_height_padding = $logo_height + 30 + $menu_bottom + $header_top; // 30 is top and bottom margin of centered logo } else if($qode_options_proya['header_bottom_appearance'] == 'fixed_hiding'){ $header_height_padding = $logo_height/2 + 40 + $header_height + $header_top; // 40 is top and bottom margin of centered logo } else { $header_height_padding = $logo_height + 30 + $header_height + $header_top; // 30 is top and bottom margin of centered logo } } $title_height = 100; if(get_post_meta($id, "qode_title-height", true) != ""){ $title_height = get_post_meta($id, "qode_title-height", true); }else if($qode_options_proya['title_height'] != ''){ $title_height = $qode_options_proya['title_height']; }else { if (isset($qode_options_proya['center_logo_image']) && $qode_options_proya['center_logo_image'] == "yes") { if($qode_options_proya['header_bottom_appearance'] == 'stick menu_bottom'){ $title_height = $title_height + $logo_height + 30 + $menu_bottom + $header_top; // 30 is top and bottom margin of centered logo } else { $title_height = $header_height + $title_height + $logo_height + 30 + $header_top; // 30 is top and bottom margin of centered logo } } else { $title_height = $title_height + $header_height + $menu_bottom + $header_top; } } if(get_post_meta($id, "qode_fixed-title-image", true) != ""){ $fixed_title_image = get_post_meta($id, "qode_fixed-title-image", true); }else{ $fixed_title_image = $qode_options_proya['fixed_title_image']; } $title_background_color = ''; if(get_post_meta($id, "qode_page-title-background-color", true) != ""){ $title_background_color = get_post_meta($id, "qode_page-title-background-color", true); }else{ $title_background_color = $qode_options_proya['title_background_color']; } $show_title_image = true; if(get_post_meta($id, "qode_show-page-title-image", true) == 'yes') { $show_title_image = false; } $qode_page_title_style = "standard"; if(get_post_meta($id, "qode_page_title_style", true) != ""){ $qode_page_title_style = get_post_meta($id, "qode_page_title_style", true); }else{ if(isset($qode_options_proya['title_style'])) { $qode_page_title_style = $qode_options_proya['title_style']; } else { $qode_page_title_style = "standard"; } } $animate_title_area = ''; if(get_post_meta($id, "qode_animate-page-title", true) != ""){ $animate_title_area = get_post_meta($id, "qode_animate-page-title", true); }else{ $animate_title_area = $qode_options_proya['animate_title_area']; } if($animate_title_area == "text_right_left") { $animate_title_class = "animate_title_text"; } elseif($animate_title_area == "area_top_bottom"){ $animate_title_class = "animate_title_area"; } else { $animate_title_class = "title_without_animation"; } $page_title_fontsize = ''; if(get_post_meta($id, "qode_page_title_font_size", true) != ""){ $page_title_fontsize = "title_size_" . get_post_meta($id, "qode_page_title_font_size", true); }else{ if(isset($qode_options_proya['predefined_title_sizes'])) { $page_title_fontsize = "title_size_" . $qode_options_proya['predefined_title_sizes']; } } //init variables $title_subtitle_padding = ''; $header_transparency = ''; $is_header_transparent = false; $transparent_values_array = array('0.00', '0'); $solid_values_array = array('', '1'); $header_bottom_border = ''; //is header transparent not set on current page? if(get_post_meta($id, "qode_header_color_transparency_per_page", true) === "") { //take global value set in Qode Options $header_transparency = $qode_options_proya['header_background_transparency_initial']; } else { //take value set for current page $header_transparency = get_post_meta($id, "qode_header_color_transparency_per_page", true); } //is border bottom color for header set in Qode Options? if(isset($qode_options_proya['header_bottom_border_color']) && !empty($qode_options_proya['header_bottom_border_color'])) { $header_bottom_border = $qode_options_proya['header_bottom_border_color']; } //is header completely transparent? $is_header_transparent = in_array($header_transparency, $transparent_values_array); //is header solid? $is_header_solid = in_array($header_transparency, $solid_values_array); //is header solid? if($is_header_solid) { $title_holder_height = 'style="padding-top:' . $header_height_padding . 'px;height:' . ($title_height - $header_height_padding) . 'px;"'; $title_subtitle_padding = 'style="padding-top:' . $header_height_padding . 'px;"'; } else { //is border for header bottom set? if ($header_bottom_border != '') { //center title between header and end of title section $title_holder_height = 'style="padding-top:' . $header_height_padding . 'px;height:' . ($title_height - $header_height_padding) . 'px;"'; $title_subtitle_padding = 'style="padding-top:' . $header_height_padding . 'px;"'; } else { //is header semi-transparent? if(!$is_header_transparent) { //center title between border and end of title section $title_holder_height = 'style="padding-top:' . $header_height_padding . 'px;height:' . ($title_height - $header_height_padding) . 'px;"'; $title_subtitle_padding = 'style="padding-top:' . $header_height_padding . 'px;"'; } else { //header is transparent. Center it between main menu item's text beginning and end of title section $title_holder_height = 'style="padding-top:'.(($header_height/2 - $nav_font_size) + $header_top) .'px;height:' . ($title_height - ($header_height/2 - $nav_font_size + $header_top)) . 'px;"'; $title_subtitle_padding = 'style="padding-top:'.(($header_height/2 - $nav_font_size) + $header_top) .'px;"'; } } } //is vertical menu activated in Qode Options? if(isset($qode_options_proya['vertical_area']) && $qode_options_proya['vertical_area'] =='yes'){ $title_subtitle_padding = 0; $title_holder_height = 100; $title_height = 100; if(get_post_meta($id, "qode_title-height", true) != ""){ $title_holder_height = get_post_meta($id, "qode_title-height", true); $title_height = get_post_meta($id, "qode_title-height", true); }else if($qode_options_proya['title_height'] != ''){ $title_holder_height = $qode_options_proya['title_height']; $title_height = $qode_options_proya['title_height']; } } $page_title_position = 'left'; $separator_title_position = 'left'; if(get_post_meta($id, "qode_page_title_position", true) != ""){ $page_title_position = " position_" . get_post_meta($id, "qode_page_title_position", true); $separator_title_position = get_post_meta($id, "qode_page_title_position", true); }else{ $page_title_position = " position_" . $qode_options_proya['page_title_position']; $separator_title_position = $qode_options_proya['page_title_position']; } $enable_breadcrumbs = 'no'; if(get_post_meta($id, "qode_enable_breadcrumbs", true) != ""){ $enable_breadcrumbs = get_post_meta($id, "qode_enable_breadcrumbs", true); }elseif(isset($qode_options_proya['enable_breadcrumbs'])){ $enable_breadcrumbs = $qode_options_proya['enable_breadcrumbs']; } $title_text_shadow = ''; if(get_post_meta($id, "qode_title_text_shadow", true) != ""){ if(get_post_meta($id, "qode_title_text_shadow", true) == "yes"){ $title_text_shadow = ' title_text_shadow'; } }else{ if($qode_options_proya['title_text_shadow'] == "yes"){ $title_text_shadow = ' title_text_shadow'; } } $subtitle_color =""; if(get_post_meta($id, "qode_page_subtitle_color", true) != ""){ $subtitle_color = " style='color:" . get_post_meta($id, "qode_page_subtitle_color", true) . "';"; } else { $subtitle_color = ""; } $separator_color =""; if(get_post_meta($id, "qode_title_separator_color", true) != ""){ $separator_color = " style='background-color:" . get_post_meta($id, "qode_title_separator_color", true) . "';"; } else { $separator_color = ""; } $title_separator = "yes"; if(get_post_meta($id, "qode_separator_bellow_title", true)){ $title_separator = get_post_meta($id, "qode_separator_bellow_title", true); } elseif(isset($qode_options_proya['title_separator'])) { $title_separator = $qode_options_proya['title_separator']; } if(get_post_meta($id, "qode_show-page-title", true) !== 'yes') { ?> <div class="title_outer <?php echo $animate_title_class.$title_text_shadow; if($responsive_title_image == 'yes' && $show_title_image == true){ echo ' with_image'; }?>" <?php echo 'data-height="'.$title_height.'"'; if($title_height != '' && $animate_title_area == 'area_top_bottom'){ echo 'style="opacity:0;height:' . $header_height_padding .'px;"'; } ?>> <div class="title <?php echo $page_title_fontsize . " " . $page_title_position; if($responsive_title_image == 'no' && $title_image != "" && ($fixed_title_image == "yes" || $fixed_title_image == "yes_zoom") && $show_title_image == true){ echo ' has_fixed_background '; if($fixed_title_image == "yes_zoom"){ echo 'zoom_out '; } } if($responsive_title_image == 'no' && $title_image != "" && $fixed_title_image == "no" && $show_title_image == true){ echo ' has_background'; } ?>" style="<?php if($responsive_title_image == 'no' && $title_image != "" && $show_title_image == true){ if($title_image_width != ''){ echo 'background-size:'.$title_image_width.'px auto;'; } echo 'background-image:url('.$title_image.');'; } if($title_height != ''){ echo 'height:'.$title_height.'px;'; } if($title_background_color != ''){ echo 'background-color:'.$title_background_color.';'; } ?>"> <div class="image <?php if($responsive_title_image == 'yes' && $title_image != "" && $show_title_image == true){ echo "responsive"; }else{ echo "not_responsive"; } ?>"><?php if($title_image != ""){ ?><img src="<?php echo $title_image; ?>" alt="&nbsp;" /> <?php } ?></div> <?php if($title_overlay_image != ""){ ?> <div class="title_overlay" style="background-image:url('<?php echo $title_overlay_image; ?>');"></div> <?php } ?> <?php if(get_post_meta($id, "qode_show-page-title-text", true) !== 'yes') { ?> <div class="title_holder" <?php if($responsive_title_image != 'yes' && get_post_meta($id, "qode_show-page-title-image", true) == ""){ echo $title_holder_height; }?>> <div class="container"> <div class="container_inner clearfix"> <div class="title_subtitle_holder" <?php if($responsive_title_image == 'yes' && $show_title_image == true){ echo $title_subtitle_padding; }?>> <?php if(($responsive_title_image == 'yes' && $show_title_image == true) || ($fixed_title_image == "yes" || $fixed_title_image == "yes_zoom") || ($responsive_title_image == 'no' && $title_image != "" && $fixed_title_image == "no" && $show_title_image == true)){ ?> <div class="title_subtitle_holder_inner"> <?php } ?> <h1<?php if(get_post_meta($id, "qode_page-title-color", true)) { ?> style="color:<?php echo get_post_meta($id, "qode_page-title-color", true) ?>" <?php } ?>><span><?php qode_title_text(); ?></span></h1> <?php if($title_separator == "yes"){ ?> <span class="separator small <?php echo $separator_title_position; ?>" <?php echo $separator_color; ?>></span> <?php } ?> <?php if(get_post_meta($id, "qode_page_subtitle", true) != ""){ ?> <?php if(get_post_meta($id, "qode_page_title_font_size", true) == "large") { ?> <h4 class="subtitle" <?php echo $subtitle_color; ?>><?php echo get_post_meta($id, "qode_page_subtitle", true); ?></h4> <?php } else { ?> <span class="subtitle" <?php echo $subtitle_color; ?>><?php echo get_post_meta($id, "qode_page_subtitle", true); ?></span> <?php } ?> <?php } ?> <?php if (function_exists('qode_custom_breadcrumbs') && $enable_breadcrumbs == "yes") { ?> <div class="breadcrumb"> <?php qode_custom_breadcrumbs(); ?></div> <?php } ?> </div> <?php if(($responsive_title_image == 'yes' && $show_title_image == true) || ($fixed_title_image == "yes" || $fixed_title_image == "yes_zoom") || ($responsive_title_image == 'no' && $title_image != "" && $fixed_title_image == "no" && $show_title_image == true)){ ?> </div> <?php } ?> </div> </div> </div> <?php } ?> </div> </div> <?php } ?> <?php /* Return id for archive pages */ if(is_category() || is_tag() || is_author()){ $id = $archive_id; } ?>
AnGlez/ver-con-las-manos
wp-content/themes/bridge/title.php
PHP
gpl-2.0
15,303
/* * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Setting up the clock on the MIPS boards. */ #include <linux/types.h> #include <linux/i8253.h> #include <linux/init.h> #include <linux/kernel_stat.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/timex.h> #include <linux/mc146818rtc.h> #include <asm/cpu.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/hardirq.h> #include <asm/irq.h> #include <asm/div64.h> #include <asm/setup.h> #include <asm/time.h> #include <asm/mc146818-time.h> #include <asm/msc01_ic.h> #include <asm/gic.h> #include <asm/mips-boards/generic.h> #include <asm/mips-boards/maltaint.h> static int mips_cpu_timer_irq; static int mips_cpu_perf_irq; extern int cp0_perfcount_irq; static void mips_timer_dispatch(void) { do_IRQ(mips_cpu_timer_irq); } static void mips_perf_dispatch(void) { do_IRQ(mips_cpu_perf_irq); } static unsigned int freqround(unsigned int freq, unsigned int amount) { freq += amount; freq -= freq % (amount*2); return freq; } /* * Estimate CPU and GIC frequencies. */ static void __init estimate_frequencies(void) { unsigned long flags; unsigned int count, start; #ifdef CONFIG_IRQ_GIC unsigned int giccount = 0, gicstart = 0; #endif #if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ) unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK); /* * XXXKYMA: hardwire the CPU frequency to Host Freq/4 */ count = (CONFIG_KVM_HOST_FREQ * 1000000) >> 3; if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) && (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) count *= 2; mips_hpt_frequency = count; return; #endif local_irq_save(flags); /* Start counter exactly on falling edge of update flag. */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); /* Initialize counters. */ start = read_c0_count(); #ifdef CONFIG_IRQ_GIC if (gic_present) GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart); #endif /* Read counter exactly on falling edge of update flag. */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); count = read_c0_count(); #ifdef CONFIG_IRQ_GIC if (gic_present) GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount); #endif local_irq_restore(flags); count -= start; mips_hpt_frequency = count; #ifdef CONFIG_IRQ_GIC if (gic_present) { giccount -= gicstart; gic_frequency = giccount; } #endif } void read_persistent_clock(struct timespec *ts) { ts->tv_sec = mc146818_get_cmos_time(); ts->tv_nsec = 0; } static void __init plat_perf_setup(void) { #ifdef MSC01E_INT_BASE if (cpu_has_veic) { set_vi_handler(MSC01E_INT_PERFCTR, mips_perf_dispatch); mips_cpu_perf_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; } else #endif if (cp0_perfcount_irq >= 0) { if (cpu_has_vint) set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch); mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; #ifdef CONFIG_SMP irq_set_handler(mips_cpu_perf_irq, handle_percpu_irq); #endif } } unsigned int get_c0_compare_int(void) { #ifdef MSC01E_INT_BASE if (cpu_has_veic) { set_vi_handler(MSC01E_INT_CPUCTR, mips_timer_dispatch); mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; } else #endif { if (cpu_has_vint) set_vi_handler(cp0_compare_irq, mips_timer_dispatch); mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; } return mips_cpu_timer_irq; } static void __init init_rtc(void) { /* stop the clock whilst setting it up */ CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL); /* 32KHz time base */ CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); /* start the clock */ CMOS_WRITE(RTC_24H, RTC_CONTROL); } void __init plat_time_init(void) { unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK); unsigned int freq; init_rtc(); estimate_frequencies(); freq = mips_hpt_frequency; if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) && (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) freq *= 2; freq = freqround(freq, 5000); printk("CPU frequency %d.%02d MHz\n", freq/1000000, (freq%1000000)*100/1000000); mips_scroll_message(); #ifdef CONFIG_I8253 /* Only Malta has a PIT. */ setup_pit_timer(); #endif #ifdef CONFIG_IRQ_GIC if (gic_present) { freq = freqround(gic_frequency, 5000); printk("GIC frequency %d.%02d MHz\n", freq/1000000, (freq%1000000)*100/1000000); #ifdef CONFIG_CSRC_GIC gic_clocksource_init(gic_frequency); #endif } #endif plat_perf_setup(); }
freddy77/linux
arch/mips/mti-malta/malta-time.c
C
gpl-2.0
5,250
/* * drivers/media/radio/si470x/radio-si470x-i2c.c * * I2C driver for radios with Silicon Labs Si470x FM Radio Receivers * * Copyright (c) 2009 Samsung Electronics Co.Ltd * Author: Joonyoung Shim <jy0922.shim@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * ToDo: * - RDS support */ /* driver definitions */ #define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>"; #define DRIVER_KERNEL_VERSION KERNEL_VERSION(1, 0, 0) #define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver" #define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers" #define DRIVER_VERSION "1.0.0" /* kernel includes */ #include <linux/i2c.h> #include <linux/delay.h> #include "radio-si470x.h" /* I2C Device ID List */ static const struct i2c_device_id si470x_i2c_id[] = { /* Generic Entry */ { "si470x", 0 }, /* Terminating entry */ { } }; MODULE_DEVICE_TABLE(i2c, si470x_i2c_id); /************************************************************************** * Module Parameters **************************************************************************/ /* Radio Nr */ static int radio_nr = -1; module_param(radio_nr, int, 0444); MODULE_PARM_DESC(radio_nr, "Radio Nr"); /************************************************************************** * I2C Definitions **************************************************************************/ /* Write starts with the upper byte of register 0x02 */ #define WRITE_REG_NUM 8 #define WRITE_INDEX(i) (i + 0x02) /* Read starts with the upper byte of register 0x0a */ #define READ_REG_NUM RADIO_REGISTER_NUM #define READ_INDEX(i) ((i + RADIO_REGISTER_NUM - 0x0a) % READ_REG_NUM) /************************************************************************** * General Driver Functions - REGISTERs **************************************************************************/ /* * si470x_get_register - read register */ int si470x_get_register(struct si470x_device *radio, int regnr) { u16 buf[READ_REG_NUM]; struct i2c_msg msgs[1] = { { radio->client->addr, I2C_M_RD, sizeof(u16) * READ_REG_NUM, (void *)buf }, }; if (i2c_transfer(radio->client->adapter, msgs, 1) != 1) return -EIO; radio->registers[regnr] = __be16_to_cpu(buf[READ_INDEX(regnr)]); return 0; } /* * si470x_set_register - write register */ int si470x_set_register(struct si470x_device *radio, int regnr) { int i; u16 buf[WRITE_REG_NUM]; struct i2c_msg msgs[1] = { { radio->client->addr, 0, sizeof(u16) * WRITE_REG_NUM, (void *)buf }, }; for (i = 0; i < WRITE_REG_NUM; i++) buf[i] = __cpu_to_be16(radio->registers[WRITE_INDEX(i)]); if (i2c_transfer(radio->client->adapter, msgs, 1) != 1) return -EIO; return 0; } /************************************************************************** * General Driver Functions - ENTIRE REGISTERS **************************************************************************/ /* * si470x_get_all_registers - read entire registers */ static int si470x_get_all_registers(struct si470x_device *radio) { int i; u16 buf[READ_REG_NUM]; struct i2c_msg msgs[1] = { { radio->client->addr, I2C_M_RD, sizeof(u16) * READ_REG_NUM, (void *)buf }, }; if (i2c_transfer(radio->client->adapter, msgs, 1) != 1) return -EIO; for (i = 0; i < READ_REG_NUM; i++) radio->registers[i] = __be16_to_cpu(buf[READ_INDEX(i)]); return 0; } /************************************************************************** * General Driver Functions - DISCONNECT_CHECK **************************************************************************/ /* * si470x_disconnect_check - check whether radio disconnects */ int si470x_disconnect_check(struct si470x_device *radio) { return 0; } /************************************************************************** * File Operations Interface **************************************************************************/ /* * si470x_fops_open - file open */ static int si470x_fops_open(struct file *file) { struct si470x_device *radio = video_drvdata(file); int retval = 0; mutex_lock(&radio->lock); radio->users++; if (radio->users == 1) /* start radio */ retval = si470x_start(radio); mutex_unlock(&radio->lock); return retval; } /* * si470x_fops_release - file release */ static int si470x_fops_release(struct file *file) { struct si470x_device *radio = video_drvdata(file); int retval = 0; /* safety check */ if (!radio) return -ENODEV; mutex_lock(&radio->lock); radio->users--; if (radio->users == 0) /* stop radio */ retval = si470x_stop(radio); mutex_unlock(&radio->lock); return retval; } /* * si470x_fops - file operations interface */ const struct v4l2_file_operations si470x_fops = { .owner = THIS_MODULE, .ioctl = video_ioctl2, .open = si470x_fops_open, .release = si470x_fops_release, }; /************************************************************************** * Video4Linux Interface **************************************************************************/ /* * si470x_vidioc_querycap - query device capabilities */ int si470x_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *capability) { strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver)); strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card)); capability->version = DRIVER_KERNEL_VERSION; capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER | V4L2_CAP_RADIO; return 0; } /************************************************************************** * I2C Interface **************************************************************************/ /* * si470x_i2c_probe - probe for the device */ static int __devinit si470x_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct si470x_device *radio; int retval = 0; unsigned char version_warning = 0; /* private data allocation and initialization */ radio = kzalloc(sizeof(struct si470x_device), GFP_KERNEL); if (!radio) { retval = -ENOMEM; goto err_initial; } radio->users = 0; radio->client = client; mutex_init(&radio->lock); /* video device allocation and initialization */ radio->videodev = video_device_alloc(); if (!radio->videodev) { retval = -ENOMEM; goto err_radio; } memcpy(radio->videodev, &si470x_viddev_template, sizeof(si470x_viddev_template)); video_set_drvdata(radio->videodev, radio); /* power up : need 110ms */ radio->registers[POWERCFG] = POWERCFG_ENABLE; if (si470x_set_register(radio, POWERCFG) < 0) { retval = -EIO; goto err_all; } msleep(110); /* get device and chip versions */ if (si470x_get_all_registers(radio) < 0) { retval = -EIO; goto err_video; } dev_info(&client->dev, "DeviceID=0x%4.4hx ChipID=0x%4.4hx\n", radio->registers[DEVICEID], radio->registers[CHIPID]); if ((radio->registers[CHIPID] & CHIPID_FIRMWARE) < RADIO_FW_VERSION) { dev_warn(&client->dev, "This driver is known to work with " "firmware version %hu,\n", RADIO_FW_VERSION); dev_warn(&client->dev, "but the device has firmware version %hu.\n", radio->registers[CHIPID] & CHIPID_FIRMWARE); version_warning = 1; } /* give out version warning */ if (version_warning == 1) { dev_warn(&client->dev, "If you have some trouble using this driver,\n"); dev_warn(&client->dev, "please report to V4L ML at " "linux-media@vger.kernel.org\n"); } /* set initial frequency */ si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */ /* register video device */ retval = video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr); if (retval) { dev_warn(&client->dev, "Could not register video device\n"); goto err_all; } i2c_set_clientdata(client, radio); return 0; err_all: err_video: video_device_release(radio->videodev); err_radio: kfree(radio); err_initial: return retval; } /* * si470x_i2c_remove - remove the device */ static __devexit int si470x_i2c_remove(struct i2c_client *client) { struct si470x_device *radio = i2c_get_clientdata(client); video_unregister_device(radio->videodev); kfree(radio); i2c_set_clientdata(client, NULL); return 0; } /* * si470x_i2c_driver - i2c driver interface */ static struct i2c_driver si470x_i2c_driver = { .driver = { .name = "si470x", .owner = THIS_MODULE, }, .probe = si470x_i2c_probe, .remove = __devexit_p(si470x_i2c_remove), .id_table = si470x_i2c_id, }; /************************************************************************** * Module Interface **************************************************************************/ /* * si470x_i2c_init - module init */ static int __init si470x_i2c_init(void) { printk(KERN_INFO DRIVER_DESC ", Version " DRIVER_VERSION "\n"); return i2c_add_driver(&si470x_i2c_driver); } /* * si470x_i2c_exit - module exit */ static void __exit si470x_i2c_exit(void) { i2c_del_driver(&si470x_i2c_driver); } module_init(si470x_i2c_init); module_exit(si470x_i2c_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION);
gongwan33/hiveboard_linux_with_sonix291_uvcdriver
drivers/media/radio/si470x/radio-si470x-i2c.c
C
gpl-2.0
9,748
/* * NCR 5380 generic driver routines. These should make it *trivial* * to implement 5380 SCSI drivers under Linux with a non-trantor * architecture. * * Note that these routines also work with NR53c400 family chips. * * Copyright 1993, Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * drew@colorado.edu * +1 (303) 666-5836 * * For more information, please consult * * NCR 5380 Family * SCSI Protocol Controller * Databook * * NCR Microelectronics * 1635 Aeroplaza Drive * Colorado Springs, CO 80916 * 1+ (719) 578-3400 * 1+ (800) 334-5454 */ /* * ++roman: To port the 5380 driver to the Atari, I had to do some changes in * this file, too: * * - Some of the debug statements were incorrect (undefined variables and the * like). I fixed that. * * - In information_transfer(), I think a #ifdef was wrong. Looking at the * possible DMA transfer size should also happen for REAL_DMA. I added this * in the #if statement. * * - When using real DMA, information_transfer() should return in a DATAOUT * phase after starting the DMA. It has nothing more to do. * * - The interrupt service routine should run main after end of DMA, too (not * only after RESELECTION interrupts). Additionally, it should _not_ test * for more interrupts after running main, since a DMA process may have * been started and interrupts are turned on now. The new int could happen * inside the execution of NCR5380_intr(), leading to recursive * calls. * * - I've added a function merge_contiguous_buffers() that tries to * merge scatter-gather buffers that are located at contiguous * physical addresses and can be processed with the same DMA setup. * Since most scatter-gather operations work on a page (4K) of * 4 buffers (1K), in more than 90% of all cases three interrupts and * DMA setup actions are saved. * * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA * and USLEEP, because these were messing up readability and will never be * needed for Atari SCSI. * * - I've revised the NCR5380_main() calling scheme (relax the 'main_running' * stuff), and 'main' is executed in a bottom half if awoken by an * interrupt. * * - The code was quite cluttered up by "#if (NDEBUG & NDEBUG_*) printk..." * constructs. In my eyes, this made the source rather unreadable, so I * finally replaced that by the *_PRINTK() macros. * */ /* * Further development / testing that should be done : * 1. Test linked command handling code after Eric is ready with * the high level code. */ /* Adapted for the sun3 by Sam Creasey. */ #include <scsi/scsi_dbg.h> #include <scsi/scsi_transport_spi.h> #if (NDEBUG & NDEBUG_LISTS) #define LIST(x, y) \ do { \ printk("LINE:%d Adding %p to %p\n", \ __LINE__, (void*)(x), (void*)(y)); \ if ((x) == (y)) \ udelay(5); \ } while (0) #define REMOVE(w, x, y, z) \ do { \ printk("LINE:%d Removing: %p->%p %p->%p \n", \ __LINE__, (void*)(w), (void*)(x), \ (void*)(y), (void*)(z)); \ if ((x) == (y)) \ udelay(5); \ } while (0) #else #define LIST(x,y) #define REMOVE(w,x,y,z) #endif #ifndef notyet #undef LINKED #endif /* * Design * * This is a generic 5380 driver. To use it on a different platform, * one simply writes appropriate system specific macros (ie, data * transfer - some PC's will use the I/O bus, 68K's must use * memory mapped) and drops this file in their 'C' wrapper. * * As far as command queueing, two queues are maintained for * each 5380 in the system - commands that haven't been issued yet, * and commands that are currently executing. This means that an * unlimited number of commands may be queued, letting * more commands propagate from the higher driver levels giving higher * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, * allowing multiple commands to propagate all the way to a SCSI-II device * while a command is already executing. * * * Issues specific to the NCR5380 : * * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead * piece of hardware that requires you to sit in a loop polling for * the REQ signal as long as you are connected. Some devices are * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect * while doing long seek operations. * * The workaround for this is to keep track of devices that have * disconnected. If the device hasn't disconnected, for commands that * should disconnect, we do something like * * while (!REQ is asserted) { sleep for N usecs; poll for M usecs } * * Some tweaking of N and M needs to be done. An algorithm based * on "time to data" would give the best results as long as short time * to datas (ie, on the same track) were considered, however these * broken devices are the exception rather than the rule and I'd rather * spend my time optimizing for the normal case. * * Architecture : * * At the heart of the design is a coroutine, NCR5380_main, * which is started from a workqueue for each NCR5380 host in the * system. It attempts to establish I_T_L or I_T_L_Q nexuses by * removing the commands from the issue queue and calling * NCR5380_select() if a nexus is not established. * * Once a nexus is established, the NCR5380_information_transfer() * phase goes through the various phases as instructed by the target. * if the target goes into MSG IN and sends a DISCONNECT message, * the command structure is placed into the per instance disconnected * queue, and NCR5380_main tries to find more work. If the target is * idle for too long, the system will try to sleep. * * If a command has disconnected, eventually an interrupt will trigger, * calling NCR5380_intr() which will in turn call NCR5380_reselect * to reestablish a nexus. This will run main if necessary. * * On command termination, the done function will be called as * appropriate. * * SCSI pointers are maintained in the SCp field of SCSI command * structures, being initialized after the command is connected * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. * Note that in violation of the standard, an implicit SAVE POINTERS operation * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS. */ /* * Using this file : * This file a skeleton Linux SCSI driver for the NCR 5380 series * of chips. To use it, you write an architecture specific functions * and macros and include this file in your driver. * * These macros control options : * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically * for commands that return with a CHECK CONDITION status. * * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential * transceivers. * * LINKED - if defined, linked commands are supported. * * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. * * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible * * These macros MUST be defined : * * NCR5380_read(register) - read from the specified register * * NCR5380_write(register, value) - write to the specific register * * NCR5380_implementation_fields - additional fields needed for this * specific implementation of the NCR5380 * * Either real DMA *or* pseudo DMA may be implemented * REAL functions : * NCR5380_REAL_DMA should be defined if real DMA is to be used. * Note that the DMA setup functions should return the number of bytes * that they were able to program the controller for. * * Also note that generic i386/PC versions of these macros are * available as NCR5380_i386_dma_write_setup, * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. * * NCR5380_dma_write_setup(instance, src, count) - initialize * NCR5380_dma_read_setup(instance, dst, count) - initialize * NCR5380_dma_residual(instance); - residual count * * PSEUDO functions : * NCR5380_pwrite(instance, src, count) * NCR5380_pread(instance, dst, count); * * The generic driver is initialized by calling NCR5380_init(instance), * after setting the appropriate host specific fields and ID. If the * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, * possible) function may be used. */ /* Macros ease life... :-) */ #define SETUP_HOSTDATA(in) \ struct NCR5380_hostdata *hostdata = \ (struct NCR5380_hostdata *)(in)->hostdata #define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) #define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble) #define SET_NEXT(cmd,next) ((cmd)->host_scribble = (void *)(next)) #define NEXTADDR(cmd) ((struct scsi_cmnd **)&(cmd)->host_scribble) #define HOSTNO instance->host_no #define H_NO(cmd) (cmd)->device->host->host_no #ifdef SUPPORT_TAGS /* * Functions for handling tagged queuing * ===================================== * * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes: * * Using consecutive numbers for the tags is no good idea in my eyes. There * could be wrong re-usings if the counter (8 bit!) wraps and some early * command has been preempted for a long time. My solution: a bitfield for * remembering used tags. * * There's also the problem that each target has a certain queue size, but we * cannot know it in advance :-( We just see a QUEUE_FULL status being * returned. So, in this case, the driver internal queue size assumption is * reduced to the number of active tags if QUEUE_FULL is returned by the * target. The command is returned to the mid-level, but with status changed * to BUSY, since --as I've seen-- the mid-level can't handle QUEUE_FULL * correctly. * * We're also not allowed running tagged commands as long as an untagged * command is active. And REQUEST SENSE commands after a contingent allegiance * condition _must_ be untagged. To keep track whether an untagged command has * been issued, the host->busy array is still employed, as it is without * support for tagged queuing. * * One could suspect that there are possible race conditions between * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(), * which already guaranteed to be running at most once. It is also the only * place where tags/LUNs are allocated. So no other allocation can slip * between that pair, there could only happen a reselection, which can free a * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes * important: the tag bit must be cleared before 'nr_allocated' is decreased. */ static void __init init_tags(struct NCR5380_hostdata *hostdata) { int target, lun; struct tag_alloc *ta; if (!(hostdata->flags & FLAG_TAGGED_QUEUING)) return; for (target = 0; target < 8; ++target) { for (lun = 0; lun < 8; ++lun) { ta = &hostdata->TagAlloc[target][lun]; bitmap_zero(ta->allocated, MAX_TAGS); ta->nr_allocated = 0; /* At the beginning, assume the maximum queue size we could * support (MAX_TAGS). This value will be decreased if the target * returns QUEUE_FULL status. */ ta->queue_size = MAX_TAGS; } } } /* Check if we can issue a command to this LUN: First see if the LUN is marked * busy by an untagged command. If the command should use tagged queuing, also * check that there is a free tag and the target's queue won't overflow. This * function should be called with interrupts disabled to avoid race * conditions. */ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) { u8 lun = cmd->device->lun; SETUP_HOSTDATA(cmd->device->host); if (hostdata->busy[cmd->device->id] & (1 << lun)) return 1; if (!should_be_tagged || !(hostdata->flags & FLAG_TAGGED_QUEUING) || !cmd->device->tagged_supported) return 0; if (hostdata->TagAlloc[scmd_id(cmd)][lun].nr_allocated >= hostdata->TagAlloc[scmd_id(cmd)][lun].queue_size) { dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", H_NO(cmd), cmd->device->id, lun); return 1; } return 0; } /* Allocate a tag for a command (there are no checks anymore, check_lun_busy() * must be called before!), or reserve the LUN in 'busy' if the command is * untagged. */ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) { u8 lun = cmd->device->lun; SETUP_HOSTDATA(cmd->device->host); /* If we or the target don't support tagged queuing, allocate the LUN for * an untagged command. */ if (!should_be_tagged || !(hostdata->flags & FLAG_TAGGED_QUEUING) || !cmd->device->tagged_supported) { cmd->tag = TAG_NONE; hostdata->busy[cmd->device->id] |= (1 << lun); dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " "command\n", H_NO(cmd), cmd->device->id, lun); } else { struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); set_bit(cmd->tag, ta->allocated); ta->nr_allocated++; dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d " "(now %d tags in use)\n", H_NO(cmd), cmd->tag, cmd->device->id, lun, ta->nr_allocated); } } /* Mark the tag of command 'cmd' as free, or in case of an untagged command, * unlock the LUN. */ static void cmd_free_tag(struct scsi_cmnd *cmd) { u8 lun = cmd->device->lun; SETUP_HOSTDATA(cmd->device->host); if (cmd->tag == TAG_NONE) { hostdata->busy[cmd->device->id] &= ~(1 << lun); dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n", H_NO(cmd), cmd->device->id, lun); } else if (cmd->tag >= MAX_TAGS) { printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", H_NO(cmd), cmd->tag); } else { struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; clear_bit(cmd->tag, ta->allocated); ta->nr_allocated--; dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", H_NO(cmd), cmd->tag, cmd->device->id, lun); } } static void free_all_tags(struct NCR5380_hostdata *hostdata) { int target, lun; struct tag_alloc *ta; if (!(hostdata->flags & FLAG_TAGGED_QUEUING)) return; for (target = 0; target < 8; ++target) { for (lun = 0; lun < 8; ++lun) { ta = &hostdata->TagAlloc[target][lun]; bitmap_zero(ta->allocated, MAX_TAGS); ta->nr_allocated = 0; } } } #endif /* SUPPORT_TAGS */ /* * Function: void merge_contiguous_buffers( struct scsi_cmnd *cmd ) * * Purpose: Try to merge several scatter-gather requests into one DMA * transfer. This is possible if the scatter buffers lie on * physical contiguous addresses. * * Parameters: struct scsi_cmnd *cmd * The command to work on. The first scatter buffer's data are * assumed to be already transferred into ptr/this_residual. */ static void merge_contiguous_buffers(struct scsi_cmnd *cmd) { #if !defined(CONFIG_SUN3) unsigned long endaddr; #if (NDEBUG & NDEBUG_MERGING) unsigned long oldlen = cmd->SCp.this_residual; int cnt = 1; #endif for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; cmd->SCp.buffers_residual && virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) { dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n", page_address(sg_page(&cmd->SCp.buffer[1])), endaddr); #if (NDEBUG & NDEBUG_MERGING) ++cnt; #endif ++cmd->SCp.buffer; --cmd->SCp.buffers_residual; cmd->SCp.this_residual += cmd->SCp.buffer->length; endaddr += cmd->SCp.buffer->length; } #if (NDEBUG & NDEBUG_MERGING) if (oldlen != cmd->SCp.this_residual) dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n", cnt, cmd->SCp.ptr, cmd->SCp.this_residual); #endif #endif /* !defined(CONFIG_SUN3) */ } /** * initialize_SCp - init the scsi pointer field * @cmd: command block to set up * * Set up the internal fields in the SCSI command. */ static inline void initialize_SCp(struct scsi_cmnd *cmd) { /* * Initialize the Scsi Pointer field so that all of the commands in the * various queues are valid. */ if (scsi_bufflen(cmd)) { cmd->SCp.buffer = scsi_sglist(cmd); cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); cmd->SCp.this_residual = cmd->SCp.buffer->length; /* ++roman: Try to merge some scatter-buffers if they are at * contiguous physical addresses. */ merge_contiguous_buffers(cmd); } else { cmd->SCp.buffer = NULL; cmd->SCp.buffers_residual = 0; cmd->SCp.ptr = NULL; cmd->SCp.this_residual = 0; } } #include <linux/delay.h> #if NDEBUG static struct { unsigned char mask; const char *name; } signals[] = { { SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" }, { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" }, { SR_SEL, "SEL" }, {0, NULL} }, basrs[] = { {BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL} }, icrs[] = { {ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"}, {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"}, {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"}, {0, NULL} }, mrs[] = { {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"}, {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"}, {MR_MONITOR_BSY, "MODE MONITOR BSY"}, {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"}, {0, NULL} }; /** * NCR5380_print - print scsi bus signals * @instance: adapter state to dump * * Print the SCSI bus signals for debugging purposes */ static void NCR5380_print(struct Scsi_Host *instance) { unsigned char status, data, basr, mr, icr, i; unsigned long flags; local_irq_save(flags); data = NCR5380_read(CURRENT_SCSI_DATA_REG); status = NCR5380_read(STATUS_REG); mr = NCR5380_read(MODE_REG); icr = NCR5380_read(INITIATOR_COMMAND_REG); basr = NCR5380_read(BUS_AND_STATUS_REG); local_irq_restore(flags); printk("STATUS_REG: %02x ", status); for (i = 0; signals[i].mask; ++i) if (status & signals[i].mask) printk(",%s", signals[i].name); printk("\nBASR: %02x ", basr); for (i = 0; basrs[i].mask; ++i) if (basr & basrs[i].mask) printk(",%s", basrs[i].name); printk("\nICR: %02x ", icr); for (i = 0; icrs[i].mask; ++i) if (icr & icrs[i].mask) printk(",%s", icrs[i].name); printk("\nMODE: %02x ", mr); for (i = 0; mrs[i].mask; ++i) if (mr & mrs[i].mask) printk(",%s", mrs[i].name); printk("\n"); } static struct { unsigned char value; const char *name; } phases[] = { {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"}, {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"}, {PHASE_UNKNOWN, "UNKNOWN"} }; /** * NCR5380_print_phase - show SCSI phase * @instance: adapter to dump * * Print the current SCSI phase for debugging purposes * * Locks: none */ static void NCR5380_print_phase(struct Scsi_Host *instance) { unsigned char status; int i; status = NCR5380_read(STATUS_REG); if (!(status & SR_REQ)) printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO); else { for (i = 0; (phases[i].value != PHASE_UNKNOWN) && (phases[i].value != (status & PHASE_MASK)); ++i) ; printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name); } } #endif /* * ++roman: New scheme of calling NCR5380_main() * * If we're not in an interrupt, we can call our main directly, it cannot be * already running. Else, we queue it on a task queue, if not 'main_running' * tells us that a lower level is already executing it. This way, * 'main_running' needs not be protected in a special way. * * queue_main() is a utility function for putting our main onto the task * queue, if main_running is false. It should be called only from a * interrupt or bottom half. */ #include <linux/gfp.h> #include <linux/workqueue.h> #include <linux/interrupt.h> static inline void queue_main(struct NCR5380_hostdata *hostdata) { if (!hostdata->main_running) { /* If in interrupt and NCR5380_main() not already running, queue it on the 'immediate' task queue, to be processed immediately after the current interrupt processing has finished. */ schedule_work(&hostdata->main_task); } /* else: nothing to do: the running NCR5380_main() will pick up any newly queued command. */ } /** * NCR58380_info - report driver and host information * @instance: relevant scsi host instance * * For use as the host template info() handler. * * Locks: none */ static const char *NCR5380_info(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); return hostdata->info; } static void prepare_info(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); snprintf(hostdata->info, sizeof(hostdata->info), "%s, io_port 0x%lx, n_io_port %d, " "base 0x%lx, irq %d, " "can_queue %d, cmd_per_lun %d, " "sg_tablesize %d, this_id %d, " "flags { %s}, " "options { %s} ", instance->hostt->name, instance->io_port, instance->n_io_port, instance->base, instance->irq, instance->can_queue, instance->cmd_per_lun, instance->sg_tablesize, instance->this_id, hostdata->flags & FLAG_TAGGED_QUEUING ? "TAGGED_QUEUING " : "", #ifdef DIFFERENTIAL "DIFFERENTIAL " #endif #ifdef REAL_DMA "REAL_DMA " #endif #ifdef PARITY "PARITY " #endif #ifdef SUPPORT_TAGS "SUPPORT_TAGS " #endif ""); } /** * NCR5380_print_status - dump controller info * @instance: controller to dump * * Print commands in the various queues, called from NCR5380_abort * to aid debugging. */ static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd) { int i, s; unsigned char *command; printk("scsi%d: destination target %d, lun %llu\n", H_NO(cmd), cmd->device->id, cmd->device->lun); printk(KERN_CONT " command = "); command = cmd->cmnd; printk(KERN_CONT "%2d (0x%02x)", command[0], command[0]); for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) printk(KERN_CONT " %02x", command[i]); printk("\n"); } static void NCR5380_print_status(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata; struct scsi_cmnd *ptr; unsigned long flags; NCR5380_dprint(NDEBUG_ANY, instance); NCR5380_dprint_phase(NDEBUG_ANY, instance); hostdata = (struct NCR5380_hostdata *)instance->hostdata; local_irq_save(flags); printk("NCR5380: coroutine is%s running.\n", hostdata->main_running ? "" : "n't"); if (!hostdata->connected) printk("scsi%d: no currently connected command\n", HOSTNO); else lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected); printk("scsi%d: issue_queue\n", HOSTNO); for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) lprint_Scsi_Cmnd(ptr); printk("scsi%d: disconnected_queue\n", HOSTNO); for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = NEXT(ptr)) lprint_Scsi_Cmnd(ptr); local_irq_restore(flags); printk("\n"); } static void show_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m) { int i, s; unsigned char *command; seq_printf(m, "scsi%d: destination target %d, lun %llu\n", H_NO(cmd), cmd->device->id, cmd->device->lun); seq_puts(m, " command = "); command = cmd->cmnd; seq_printf(m, "%2d (0x%02x)", command[0], command[0]); for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) seq_printf(m, " %02x", command[i]); seq_putc(m, '\n'); } static int __maybe_unused NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata; struct scsi_cmnd *ptr; unsigned long flags; hostdata = (struct NCR5380_hostdata *)instance->hostdata; local_irq_save(flags); seq_printf(m, "NCR5380: coroutine is%s running.\n", hostdata->main_running ? "" : "n't"); if (!hostdata->connected) seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO); else show_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m); seq_printf(m, "scsi%d: issue_queue\n", HOSTNO); for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) show_Scsi_Cmnd(ptr, m); seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO); for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = NEXT(ptr)) show_Scsi_Cmnd(ptr, m); local_irq_restore(flags); return 0; } /** * NCR5380_init - initialise an NCR5380 * @instance: adapter to configure * @flags: control flags * * Initializes *instance and corresponding 5380 chip, * with flags OR'd into the initial flags value. * * Notes : I assume that the host, hostno, and id bits have been * set correctly. I don't care about the irq and other fields. * * Returns 0 for success */ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) { int i; SETUP_HOSTDATA(instance); hostdata->host = instance; hostdata->aborted = 0; hostdata->id_mask = 1 << instance->this_id; hostdata->id_higher_mask = 0; for (i = hostdata->id_mask; i <= 0x80; i <<= 1) if (i > hostdata->id_mask) hostdata->id_higher_mask |= i; for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; #ifdef SUPPORT_TAGS init_tags(hostdata); #endif #if defined (REAL_DMA) hostdata->dma_len = 0; #endif hostdata->targets_present = 0; hostdata->connected = NULL; hostdata->issue_queue = NULL; hostdata->disconnected_queue = NULL; hostdata->flags = flags; INIT_WORK(&hostdata->main_task, NCR5380_main); prepare_info(instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_write(SELECT_ENABLE_REG, 0); return 0; } /** * NCR5380_exit - remove an NCR5380 * @instance: adapter to remove * * Assumes that no more work can be queued (e.g. by NCR5380_intr). */ static void NCR5380_exit(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); cancel_work_sync(&hostdata->main_task); } /** * NCR5380_queue_command - queue a command * @instance: the relevant SCSI adapter * @cmd: SCSI command * * cmd is added to the per instance issue_queue, with minor * twiddling done to the host specific fields of cmd. If the * main coroutine is not running, it is restarted. */ static int NCR5380_queue_command(struct Scsi_Host *instance, struct scsi_cmnd *cmd) { struct NCR5380_hostdata *hostdata = shost_priv(instance); struct scsi_cmnd *tmp; unsigned long flags; #if (NDEBUG & NDEBUG_NO_WRITE) switch (cmd->cmnd[0]) { case WRITE_6: case WRITE_10: printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n", H_NO(cmd)); cmd->result = (DID_ERROR << 16); cmd->scsi_done(cmd); return 0; } #endif /* (NDEBUG & NDEBUG_NO_WRITE) */ /* * We use the host_scribble field as a pointer to the next command * in a queue */ SET_NEXT(cmd, NULL); cmd->result = 0; /* * Insert the cmd into the issue queue. Note that REQUEST SENSE * commands are added to the head of the queue since any command will * clear the contingent allegiance condition that exists and the * sense data is only guaranteed to be valid while the condition exists. */ /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA. * Otherwise a running NCR5380_main may steal the lock. * Lock before actually inserting due to fairness reasons explained in * atari_scsi.c. If we insert first, then it's impossible for this driver * to release the lock. * Stop timer for this command while waiting for the lock, or timeouts * may happen (and they really do), and it's no good if the command doesn't * appear in any of the queues. * ++roman: Just disabling the NCR interrupt isn't sufficient here, * because also a timer int can trigger an abort or reset, which would * alter queues and touch the lock. */ if (!NCR5380_acquire_dma_irq(instance)) return SCSI_MLQUEUE_HOST_BUSY; local_irq_save(flags); /* * Insert the cmd into the issue queue. Note that REQUEST SENSE * commands are added to the head of the queue since any command will * clear the contingent allegiance condition that exists and the * sense data is only guaranteed to be valid while the condition exists. */ if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { LIST(cmd, hostdata->issue_queue); SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = cmd; } else { for (tmp = (struct scsi_cmnd *)hostdata->issue_queue; NEXT(tmp); tmp = NEXT(tmp)) ; LIST(cmd, tmp); SET_NEXT(tmp, cmd); } local_irq_restore(flags); dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd), (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); /* If queue_command() is called from an interrupt (real one or bottom * half), we let queue_main() do the job of taking care about main. If it * is already running, this is a no-op, else main will be queued. * * If we're not in an interrupt, we can call NCR5380_main() * unconditionally, because it cannot be already running. */ if (in_interrupt() || irqs_disabled()) queue_main(hostdata); else NCR5380_main(&hostdata->main_task); return 0; } static inline void maybe_release_dma_irq(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); /* Caller does the locking needed to set & test these data atomically */ if (!hostdata->disconnected_queue && !hostdata->issue_queue && !hostdata->connected && !hostdata->retain_dma_intr) NCR5380_release_dma_irq(instance); } /** * NCR5380_main - NCR state machines * * NCR5380_main is a coroutine that runs as long as more work can * be done on the NCR5380 host adapters in a system. Both * NCR5380_queue_command() and NCR5380_intr() will try to start it * in case it is not running. * * Locks: called as its own thread with no locks held. */ static void NCR5380_main(struct work_struct *work) { struct NCR5380_hostdata *hostdata = container_of(work, struct NCR5380_hostdata, main_task); struct Scsi_Host *instance = hostdata->host; struct scsi_cmnd *tmp, *prev; int done; unsigned long flags; /* * We run (with interrupts disabled) until we're sure that none of * the host adapters have anything that can be done, at which point * we set main_running to 0 and exit. * * Interrupts are enabled before doing various other internal * instructions, after we've decided that we need to run through * the loop again. * * this should prevent any race conditions. * * ++roman: Just disabling the NCR interrupt isn't sufficient here, * because also a timer int can trigger an abort or reset, which can * alter queues and touch the Falcon lock. */ /* Tell int handlers main() is now already executing. Note that no races are possible here. If an int comes in before 'main_running' is set here, and queues/executes main via the task queue, it doesn't do any harm, just this instance of main won't find any work left to do. */ if (hostdata->main_running) return; hostdata->main_running = 1; local_save_flags(flags); do { local_irq_disable(); /* Freeze request queues */ done = 1; if (!hostdata->connected) { dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO); /* * Search through the issue_queue for a command destined * for a target that's not busy. */ #if (NDEBUG & NDEBUG_LISTS) for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp)) ; /*printk("%p ", tmp);*/ if ((tmp == prev) && tmp) printk(" LOOP\n"); /* else printk("\n"); */ #endif for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) { u8 lun = tmp->device->lun; dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, scmd_id(tmp), hostdata->busy[scmd_id(tmp)], lun); /* When we find one, remove it from the issue queue. */ /* ++guenther: possible race with Falcon locking */ if ( #ifdef SUPPORT_TAGS !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE) #else !(hostdata->busy[tmp->device->id] & (1 << lun)) #endif ) { /* ++guenther: just to be sure, this must be atomic */ local_irq_disable(); if (prev) { REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); SET_NEXT(prev, NEXT(tmp)); } else { REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp)); hostdata->issue_queue = NEXT(tmp); } SET_NEXT(tmp, NULL); hostdata->retain_dma_intr++; /* reenable interrupts after finding one */ local_irq_restore(flags); /* * Attempt to establish an I_T_L nexus here. * On success, instance->hostdata->connected is set. * On failure, we must add the command back to the * issue queue so we can keep trying. */ dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d " "lun %d removed from issue_queue\n", HOSTNO, tmp->device->id, lun); /* * REQUEST SENSE commands are issued without tagged * queueing, even on SCSI-II devices because the * contingent allegiance condition exists for the * entire unit. */ /* ++roman: ...and the standard also requires that * REQUEST SENSE command are untagged. */ #ifdef SUPPORT_TAGS cmd_get_tag(tmp, tmp->cmnd[0] != REQUEST_SENSE); #endif if (!NCR5380_select(instance, tmp)) { local_irq_disable(); hostdata->retain_dma_intr--; /* release if target did not response! */ maybe_release_dma_irq(instance); local_irq_restore(flags); break; } else { local_irq_disable(); LIST(tmp, hostdata->issue_queue); SET_NEXT(tmp, hostdata->issue_queue); hostdata->issue_queue = tmp; #ifdef SUPPORT_TAGS cmd_free_tag(tmp); #endif hostdata->retain_dma_intr--; local_irq_restore(flags); dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, " "returned to issue_queue\n", HOSTNO); if (hostdata->connected) break; } } /* if target/lun/target queue is not busy */ } /* for issue_queue */ } /* if (!hostdata->connected) */ if (hostdata->connected #ifdef REAL_DMA && !hostdata->dma_len #endif ) { local_irq_restore(flags); dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n", HOSTNO); NCR5380_information_transfer(instance); dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO); done = 0; } } while (!done); /* Better allow ints _after_ 'main_running' has been cleared, else an interrupt could believe we'll pick up the work it left for us, but we won't see it anymore here... */ hostdata->main_running = 0; local_irq_restore(flags); } #ifdef REAL_DMA /* * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) * * Purpose : Called by interrupt handler when DMA finishes or a phase * mismatch occurs (which would finish the DMA transfer). * * Inputs : instance - this instance of the NCR5380. * */ static void NCR5380_dma_complete(struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); int transferred; unsigned char **data; volatile int *count; int saved_data = 0, overrun = 0; unsigned char p; if (!hostdata->connected) { printk(KERN_WARNING "scsi%d: received end of DMA interrupt with " "no connected cmd\n", HOSTNO); return; } if (hostdata->read_overruns) { p = hostdata->connected->SCp.phase; if (p & SR_IO) { udelay(10); if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH|BASR_ACK)) == (BASR_PHASE_MATCH|BASR_ACK)) { saved_data = NCR5380_read(INPUT_DATA_REG); overrun = 1; dprintk(NDEBUG_DMA, "scsi%d: read overrun handled\n", HOSTNO); } } } dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); #if defined(CONFIG_SUN3) if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", instance->host_no); BUG(); } /* make sure we're not stuck in a data phase */ if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == (BASR_PHASE_MATCH | BASR_ACK)) { pr_err("scsi%d: BASR %02x\n", instance->host_no, NCR5380_read(BUS_AND_STATUS_REG)); pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n", instance->host_no); BUG(); } #endif (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); transferred = hostdata->dma_len - NCR5380_dma_residual(instance); hostdata->dma_len = 0; data = (unsigned char **)&hostdata->connected->SCp.ptr; count = &hostdata->connected->SCp.this_residual; *data += transferred; *count -= transferred; if (hostdata->read_overruns) { int cnt, toPIO; if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { cnt = toPIO = hostdata->read_overruns; if (overrun) { dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); *(*data)++ = saved_data; (*count)--; cnt--; toPIO--; } dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); NCR5380_transfer_pio(instance, &p, &cnt, data); *count -= toPIO - cnt; } } } #endif /* REAL_DMA */ /** * NCR5380_intr - generic NCR5380 irq handler * @irq: interrupt number * @dev_id: device info * * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses * from the disconnected queue, and restarting NCR5380_main() * as required. */ static irqreturn_t NCR5380_intr(int irq, void *dev_id) { struct Scsi_Host *instance = dev_id; int done = 1, handled = 0; unsigned char basr; dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO); /* Look for pending interrupts */ basr = NCR5380_read(BUS_AND_STATUS_REG); dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr); /* dispatch to appropriate routine if found and done=0 */ if (basr & BASR_IRQ) { NCR5380_dprint(NDEBUG_INTR, instance); if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { done = 0; dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO); NCR5380_reselect(instance); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if (basr & BASR_PARITY_ERROR) { dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else { /* * The rest of the interrupt conditions can occur only during a * DMA transfer */ #if defined(REAL_DMA) /* * We should only get PHASE MISMATCH and EOP interrupts if we have * DMA enabled, so do a sanity check based on the current setting * of the MODE register. */ if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) && ((basr & BASR_END_DMA_TRANSFER) || !(basr & BASR_PHASE_MATCH))) { dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); NCR5380_dma_complete( instance ); done = 0; } else #endif /* REAL_DMA */ { /* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ if (basr & BASR_PHASE_MATCH) dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, " "BASR 0x%x, MR 0x%x, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif } } /* if !(SELECTION || PARITY) */ handled = 1; } /* BASR & IRQ */ else { printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, " "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif } if (!done) { dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO); /* Put a call to NCR5380_main() on the queue... */ queue_main(shost_priv(instance)); } return IRQ_RETVAL(handled); } /* * Function : int NCR5380_select(struct Scsi_Host *instance, * struct scsi_cmnd *cmd) * * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, * including ARBITRATION, SELECTION, and initial message out for * IDENTIFY and queue messages. * * Inputs : instance - instantiation of the 5380 driver on which this * target lives, cmd - SCSI command to execute. * * Returns : -1 if selection could not execute for some reason, * 0 if selection succeeded or failed because the target * did not respond. * * Side effects : * If bus busy, arbitration failed, etc, NCR5380_select() will exit * with registers as they should have been on entry - ie * SELECT_ENABLE will be set appropriately, the NCR5380 * will cease to drive any SCSI bus signals. * * If successful : I_T_L or I_T_L_Q nexus will be established, * instance->connected will be set to cmd. * SELECT interrupt will be disabled. * * If failed (no target) : cmd->scsi_done() will be called, and the * cmd->result host byte set to DID_BAD_TARGET. */ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) { SETUP_HOSTDATA(instance); unsigned char tmp[3], phase; unsigned char *data; int len; unsigned long timeout; unsigned long flags; hostdata->restart_select = 0; NCR5380_dprint(NDEBUG_ARBITRATION, instance); dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO, instance->this_id); /* * Set the phase bits to 0, otherwise the NCR5380 won't drive the * data bus during SELECTION. */ local_irq_save(flags); if (hostdata->connected) { local_irq_restore(flags); return -1; } NCR5380_write(TARGET_COMMAND_REG, 0); /* * Start arbitration. */ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); NCR5380_write(MODE_REG, MR_ARBITRATE); local_irq_restore(flags); /* Wait for arbitration logic to complete */ #if defined(NCR_TIMEOUT) { unsigned long timeout = jiffies + 2*NCR_TIMEOUT; while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && time_before(jiffies, timeout) && !hostdata->connected) ; if (time_after_eq(jiffies, timeout)) { printk("scsi : arbitration timeout at %d\n", __LINE__); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } } #else /* NCR_TIMEOUT */ while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && !hostdata->connected) ; #endif dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO); if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); return -1; } /* * The arbitration delay is 2.2us, but this is a minimum and there is * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate * the integral nature of udelay(). * */ udelay(3); /* Check for lost arbitration */ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", HOSTNO); return -1; } /* after/during arbitration, BSY should be asserted. IBM DPES-31080 Version S31Q works now */ /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY); if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", HOSTNO); return -1; } /* * Again, bus clear + bus settle time is 1.2us, however, this is * a minimum so we'll udelay ceil(1.2) */ #ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY /* ++roman: But some targets (see above :-) seem to need a bit more... */ udelay(15); #else udelay(2); #endif if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return -1; } dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO); /* * Now that we have won arbitration, start Selection process, asserting * the host and target ID's on the SCSI bus. */ NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id))); /* * Raise ATN while SEL is true before BSY goes false from arbitration, * since this is the only way to guarantee that we'll get a MESSAGE OUT * phase immediately after selection. */ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL )); NCR5380_write(MODE_REG, MR_BASE); /* * Reselect interrupts must be turned off prior to the dropping of BSY, * otherwise we will trigger an interrupt. */ if (hostdata->connected) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return -1; } NCR5380_write(SELECT_ENABLE_REG, 0); /* * The initiator shall then wait at least two deskew delays and release * the BSY signal. */ udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ /* Reset BSY */ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL)); /* * Something weird happens when we cease to drive BSY - looks * like the board/chip is letting us do another read before the * appropriate propagation delay has expired, and we're confusing * a BSY signal from ourselves as the target's response to SELECTION. * * A small delay (the 'C++' frontend breaks the pipeline with an * unnecessary jump, making it work on my 386-33/Trantor T128, the * tighter 'C' code breaks and requires this) solves the problem - * the 1 us delay is arbitrary, and only used because this delay will * be the same on other platforms and since it works here, it should * work there. * * wingel suggests that this could be due to failing to wait * one deskew delay. */ udelay(1); dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); /* * The SCSI specification calls for a 250 ms timeout for the actual * selection. */ timeout = jiffies + msecs_to_jiffies(250); /* * XXX very interesting - we're seeing a bounce where the BSY we * asserted is being reflected / still asserted (propagation delay?) * and it's detecting as true. Sigh. */ #if 0 /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert * IO while SEL is true. But again, there are some disks out the in the * world that do that nevertheless. (Somebody claimed that this announces * reselection capability of the target.) So we better skip that test and * only wait for BSY... (Famous german words: Der Klügere gibt nach :-) */ while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & (SR_BSY | SR_IO))) ; if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_reselect(instance); printk(KERN_ERR "scsi%d: reselection after won arbitration?\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } #else while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY)) ; #endif /* * No less than two deskew delays after the initiator detects the * BSY signal is true, it shall release the SEL signal and may * change the DATA BUS. -wingel */ udelay(1); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); if (!(NCR5380_read(STATUS_REG) & SR_BSY)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); if (hostdata->targets_present & (1 << cmd->device->id)) { printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); if (hostdata->restart_select) printk(KERN_NOTICE "\trestart select\n"); NCR5380_dprint(NDEBUG_ANY, instance); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } cmd->result = DID_BAD_TARGET << 16; #ifdef SUPPORT_TAGS cmd_free_tag(cmd); #endif cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return 0; } hostdata->targets_present |= (1 << cmd->device->id); /* * Since we followed the SCSI spec, and raised ATN while SEL * was true but before BSY was false during selection, the information * transfer phase should be a MESSAGE OUT phase so that we can send the * IDENTIFY message. * * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG * message (2 bytes) with a tag ID that we increment with every command * until it wraps back to 0. * * XXX - it turns out that there are some broken SCSI-II devices, * which claim to support tagged queuing but fail when more than * some number of commands are issued at once. */ /* Wait for start of REQ/ACK handshake */ while (!(NCR5380_read(STATUS_REG) & SR_REQ)) ; dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n", HOSTNO, cmd->device->id); tmp[0] = IDENTIFY(1, cmd->device->lun); #ifdef SUPPORT_TAGS if (cmd->tag != TAG_NONE) { tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG; tmp[2] = cmd->tag; len = 3; } else len = 1; #else len = 1; cmd->tag = 0; #endif /* SUPPORT_TAGS */ /* Send message(s) */ data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO); /* XXX need to handle errors here */ hostdata->connected = cmd; #ifndef SUPPORT_TAGS hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); #endif #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif initialize_SCp(cmd); return 0; } /* * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using polled I/O * * Inputs : instance - instance of driver, *phase - pointer to * what phase is expected, *count - pointer to number of * bytes to transfer, **data - pointer to data pointer. * * Returns : -1 when different phase is entered without transferring * maximum number of bytes, 0 if all bytes are transferred or exit * is in same phase. * * Also, *phase, *count, *data are modified in place. * * XXX Note : handling for bus free may be useful. */ /* * Note : this code is not as quick as it could be, however it * IS 100% reliable, and for the actual data transfer where speed * counts, we will always do a pseudo DMA or DMA transfer. */ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { register unsigned char p = *phase, tmp; register int c = *count; register unsigned char *d = *data; /* * The NCR5380 chip will only drive the SCSI bus when the * phase specified in the appropriate bits of the TARGET COMMAND * REGISTER match the STATUS REGISTER */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); do { /* * Wait for assertion of REQ, after which the phase bits will be * valid */ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)) ; dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO); /* Check for phase mismatch */ if ((tmp & PHASE_MASK) != p) { dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO); NCR5380_dprint_phase(NDEBUG_PIO, instance); break; } /* Do actual transfer from SCSI bus to / from memory */ if (!(p & SR_IO)) NCR5380_write(OUTPUT_DATA_REG, *d); else *d = NCR5380_read(CURRENT_SCSI_DATA_REG); ++d; /* * The SCSI standard suggests that in MSGOUT phase, the initiator * should drop ATN on the last byte of the message phase * after REQ has been asserted for the handshake but before * the initiator raises ACK. */ if (!(p & SR_IO)) { if (!((p & SR_MSG) && c > 1)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ACK); } else { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN); NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); } } else { NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); } while (NCR5380_read(STATUS_REG) & SR_REQ) ; dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO); /* * We have several special cases to consider during REQ/ACK handshaking : * 1. We were in MSGOUT phase, and we are on the last byte of the * message. ATN must be dropped as ACK is dropped. * * 2. We are in a MSGIN phase, and we are on the last byte of the * message. We must exit with ACK asserted, so that the calling * code may raise ATN before dropping ACK to reject the message. * * 3. ACK and ATN are clear and the target may proceed as normal. */ if (!(p == PHASE_MSGIN && c == 1)) { if (p == PHASE_MSGOUT && c > 1) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); else NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); } } while (--c); dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c); *count = c; *data = d; tmp = NCR5380_read(STATUS_REG); /* The phase read from the bus is valid if either REQ is (already) * asserted or if ACK hasn't been released yet. The latter is the case if * we're in MSGIN and all wanted bytes have been received. */ if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0)) *phase = tmp & PHASE_MASK; else *phase = PHASE_UNKNOWN; if (!c || (*phase == p)) return 0; else return -1; } /* * Function : do_abort (Scsi_Host *host) * * Purpose : abort the currently established nexus. Should only be * called from a routine which can drop into a * * Returns : 0 on success, -1 on failure. */ static int do_abort(struct Scsi_Host *instance) { unsigned char tmp, *msgptr, phase; int len; /* Request message out phase */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); /* * Wait for the target to indicate a valid phase by asserting * REQ. Once this happens, we'll have either a MSGOUT phase * and can immediately send the ABORT message, or we'll have some * other phase and will have to source/sink data. * * We really don't care what value was on the bus or what value * the target sees, so we just handshake. */ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)) ; NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); if ((tmp & PHASE_MASK) != PHASE_MSGOUT) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); while (NCR5380_read(STATUS_REG) & SR_REQ) ; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); } tmp = ABORT; msgptr = &tmp; len = 1; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &msgptr); /* * If we got here, and the command completed successfully, * we're about to go into bus free state. */ return len ? -1 : 0; } #if defined(REAL_DMA) /* * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using either real * or pseudo DMA. * * Inputs : instance - instance of driver, *phase - pointer to * what phase is expected, *count - pointer to number of * bytes to transfer, **data - pointer to data pointer. * * Returns : -1 when different phase is entered without transferring * maximum number of bytes, 0 if all bytes or transferred or exit * is in same phase. * * Also, *phase, *count, *data are modified in place. * */ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { SETUP_HOSTDATA(instance); register int c = *count; register unsigned char p = *phase; unsigned long flags; #if defined(CONFIG_SUN3) /* sanity check */ if (!sun3_dma_setup_done) { pr_err("scsi%d: transfer_dma without setup!\n", instance->host_no); BUG(); } hostdata->dma_len = c; dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", instance->host_no, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", *data); /* netbsd turns off ints here, why not be safe and do it too */ local_irq_save(flags); /* send start chain */ sun3scsi_dma_start(c, *data); if (p & SR_IO) { NCR5380_write(TARGET_COMMAND_REG, 1); NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(INITIATOR_COMMAND_REG, 0); NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); } else { NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA); NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); NCR5380_write(START_DMA_SEND_REG, 0); } #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif local_irq_restore(flags); sun3_dma_active = 1; #else /* !defined(CONFIG_SUN3) */ register unsigned char *d = *data; unsigned char tmp; if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { *phase = tmp; return -1; } if (hostdata->read_overruns && (p & SR_IO)) c -= hostdata->read_overruns; dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", HOSTNO, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", d); NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); #ifdef REAL_DMA NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY); #endif /* def REAL_DMA */ if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) { /* On the Medusa, it is a must to initialize the DMA before * starting the NCR. This is also the cleaner way for the TT. */ local_irq_save(flags); hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); local_irq_restore(flags); } if (p & SR_IO) NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); else { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); NCR5380_write(START_DMA_SEND_REG, 0); } if (hostdata->flags & FLAG_LATE_DMA_SETUP) { /* On the Falcon, the DMA setup must be done after the last */ /* NCR access, else the DMA setup gets trashed! */ local_irq_save(flags); hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); local_irq_restore(flags); } #endif /* !defined(CONFIG_SUN3) */ return 0; } #endif /* defined(REAL_DMA) */ /* * Function : NCR5380_information_transfer (struct Scsi_Host *instance) * * Purpose : run through the various SCSI phases and do as the target * directs us to. Operates on the currently connected command, * instance->connected. * * Inputs : instance, instance for which we are doing commands * * Side effects : SCSI things happen, the disconnected queue will be * modified if a command disconnects, *instance->connected will * change. * * XXX Note : we need to watch for bus free or a reset condition here * to recover from an unexpected bus free condition. */ static void NCR5380_information_transfer(struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); unsigned long flags; unsigned char msgout = NOP; int sink = 0; int len; #if defined(REAL_DMA) int transfersize; #endif unsigned char *data; unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected; #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif while (1) { tmp = NCR5380_read(STATUS_REG); /* We only have a valid SCSI phase when REQ is asserted */ if (tmp & SR_REQ) { phase = (tmp & PHASE_MASK); if (phase != old_phase) { old_phase = phase; NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); } #if defined(CONFIG_SUN3) if (phase == PHASE_CMDOUT) { #if defined(REAL_DMA) void *d; unsigned long count; if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { count = cmd->SCp.buffer->length; d = sg_virt(cmd->SCp.buffer); } else { count = cmd->SCp.this_residual; d = cmd->SCp.ptr; } /* this command setup for dma yet? */ if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != cmd)) { if (cmd->request->cmd_type == REQ_TYPE_FS) { sun3scsi_dma_setup(d, count, rq_data_dir(cmd->request)); sun3_dma_setup_done = cmd; } } #endif #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif } #endif /* CONFIG_SUN3 */ if (sink && (phase != PHASE_MSGOUT)) { NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); while (NCR5380_read(STATUS_REG) & SR_REQ) ; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); sink = 0; continue; } switch (phase) { case PHASE_DATAOUT: #if (NDEBUG & NDEBUG_NO_DATAOUT) printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT " "aborted\n", HOSTNO); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; cmd->scsi_done(cmd); return; #endif case PHASE_DATAIN: /* * If there is no room left in the current buffer in the * scatter-gather list, move onto the next one. */ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { ++cmd->SCp.buffer; --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); /* ++roman: Try to merge some scatter-buffers if * they are at contiguous physical addresses. */ merge_contiguous_buffers(cmd); dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n", HOSTNO, cmd->SCp.this_residual, cmd->SCp.buffers_residual); } /* * The preferred transfer method is going to be * PSEUDO-DMA for systems that are strictly PIO, * since we can let the hardware do the handshaking. * * For this to work, we need to know the transfersize * ahead of time, since the pseudo-DMA code will sit * in an unconditional loop. */ /* ++roman: I suggest, this should be * #if def(REAL_DMA) * instead of leaving REAL_DMA out. */ #if defined(REAL_DMA) if ( #if !defined(CONFIG_SUN3) !cmd->device->borken && #endif (transfersize = NCR5380_dma_xfer_len(instance, cmd, phase)) >= DMA_MIN_SIZE) { len = transfersize; cmd->SCp.phase = phase; if (NCR5380_transfer_dma(instance, &phase, &len, (unsigned char **)&cmd->SCp.ptr)) { /* * If the watchdog timer fires, all future * accesses to this device will use the * polled-IO. */ scmd_printk(KERN_INFO, cmd, "switching to slow handshake\n"); cmd->device->borken = 1; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; cmd->scsi_done(cmd); /* XXX - need to source or sink data here, as appropriate */ } else { #ifdef REAL_DMA /* ++roman: When using real DMA, * information_transfer() should return after * starting DMA since it has nothing more to * do. */ return; #else cmd->SCp.this_residual -= transfersize - len; #endif } } else #endif /* defined(REAL_DMA) */ NCR5380_transfer_pio(instance, &phase, (int *)&cmd->SCp.this_residual, (unsigned char **)&cmd->SCp.ptr); #if defined(CONFIG_SUN3) && defined(REAL_DMA) /* if we had intended to dma that command clear it */ if (sun3_dma_setup_done == cmd) sun3_dma_setup_done = NULL; #endif break; case PHASE_MSGIN: len = 1; data = &tmp; NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */ NCR5380_transfer_pio(instance, &phase, &len, &data); cmd->SCp.Message = tmp; switch (tmp) { /* * Linking lets us reduce the time required to get the * next command out to the device, hopefully this will * mean we don't waste another revolution due to the delays * required by ARBITRATION and another SELECTION. * * In the current implementation proposal, low level drivers * merely have to start the next command, pointed to by * next_link, done() is called as with unlinked commands. */ #ifdef LINKED case LINKED_CMD_COMPLETE: case LINKED_FLG_CMD_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked command " "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * Sanity check : A linked command should only terminate * with one of these messages if there are more linked * commands available. */ if (!cmd->next_link) { printk(KERN_NOTICE "scsi%d: target %d lun %llu " "linked command complete, no next_link\n", HOSTNO, cmd->device->id, cmd->device->lun); sink = 1; do_abort(instance); return; } initialize_SCp(cmd->next_link); /* The next command is still part of this process; copy it * and don't free it! */ cmd->next_link->tag = cmd->tag; cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request " "done, calling scsi_done().\n", HOSTNO, cmd->device->id, cmd->device->lun); cmd->scsi_done(cmd); cmd = hostdata->connected; break; #endif /* def LINKED */ case ABORT: case COMMAND_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu " "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); local_irq_save(flags); hostdata->retain_dma_intr++; hostdata->connected = NULL; #ifdef SUPPORT_TAGS cmd_free_tag(cmd); if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { /* Turn a QUEUE FULL status into BUSY, I think the * mid level cannot handle QUEUE FULL :-( (The * command is retried after BUSY). Also update our * queue size to the number of currently issued * commands now. */ /* ++Andreas: the mid level code knows about QUEUE_FULL now. */ struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][cmd->device->lun]; dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned " "QUEUE_FULL after %d commands\n", HOSTNO, cmd->device->id, cmd->device->lun, ta->nr_allocated); if (ta->queue_size > ta->nr_allocated) ta->nr_allocated = ta->queue_size; } #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * I'm not sure what the correct thing to do here is : * * If the command that just executed is NOT a request * sense, the obvious thing to do is to set the result * code to the values of the stored parameters. * * If it was a REQUEST SENSE command, we need some way to * differentiate between the failure code of the original * and the failure code of the REQUEST sense - the obvious * case is success, where we fall through and leave the * result code unchanged. * * The non-obvious place is where the REQUEST SENSE failed */ if (cmd->cmnd[0] != REQUEST_SENSE) cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); else if (status_byte(cmd->SCp.Status) != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); if ((cmd->cmnd[0] == REQUEST_SENSE) && hostdata->ses.cmd_len) { scsi_eh_restore_cmnd(cmd, &hostdata->ses); hostdata->ses.cmd_len = 0 ; } if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO); LIST(cmd,hostdata->issue_queue); SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = (struct scsi_cmnd *) cmd; dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of " "issue queue\n", H_NO(cmd)); } else { cmd->scsi_done(cmd); } local_irq_restore(flags); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) barrier(); local_irq_save(flags); hostdata->retain_dma_intr--; /* ++roman: For Falcon SCSI, release the lock on the * ST-DMA here if no other commands are waiting on the * disconnected queue. */ maybe_release_dma_irq(instance); local_irq_restore(flags); return; case MESSAGE_REJECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); switch (hostdata->last_message) { case HEAD_OF_QUEUE_TAG: case ORDERED_QUEUE_TAG: case SIMPLE_QUEUE_TAG: /* The target obviously doesn't support tagged * queuing, even though it announced this ability in * its INQUIRY data ?!? (maybe only this LUN?) Ok, * clear 'tagged_supported' and lock the LUN, since * the command is treated as untagged further on. */ cmd->device->tagged_supported = 0; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); cmd->tag = TAG_NONE; dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu rejected " "QUEUE_TAG message; tagged queuing " "disabled\n", HOSTNO, cmd->device->id, cmd->device->lun); break; } break; case DISCONNECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); local_irq_save(flags); cmd->device->disconnect = 1; LIST(cmd,hostdata->disconnected_queue); SET_NEXT(cmd, hostdata->disconnected_queue); hostdata->connected = NULL; hostdata->disconnected_queue = cmd; local_irq_restore(flags); dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %llu was " "moved from connected to the " "disconnected_queue\n", HOSTNO, cmd->device->id, cmd->device->lun); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* Wait for bus free to avoid nasty timeouts */ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) barrier(); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif return; /* * The SCSI data pointer is *IMPLICITLY* saved on a disconnect * operation, in violation of the SCSI spec so we can safely * ignore SAVE/RESTORE pointers calls. * * Unfortunately, some disks violate the SCSI spec and * don't issue the required SAVE_POINTERS message before * disconnecting, and we have to break spec to remain * compatible. */ case SAVE_POINTERS: case RESTORE_POINTERS: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); break; case EXTENDED_MESSAGE: /* * Extended messages are sent in the following format : * Byte * 0 EXTENDED_MESSAGE == 1 * 1 length (includes one byte for code, doesn't * include first two bytes) * 2 code * 3..length+1 arguments * * Start the extended message buffer with the EXTENDED_MESSAGE * byte, since spi_print_msg() wants the whole thing. */ extended_msg[0] = EXTENDED_MESSAGE; /* Accept first byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO); len = 2; data = extended_msg + 1; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO, (int)extended_msg[1], (int)extended_msg[2]); if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) { /* Accept third byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); len = extended_msg[1] - 1; data = extended_msg + 3; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n", HOSTNO, len); switch (extended_msg[2]) { case EXTENDED_SDTR: case EXTENDED_WDTR: case EXTENDED_MODIFY_DATA_POINTER: case EXTENDED_EXTENDED_IDENTIFY: tmp = 0; } } else if (len) { printk(KERN_NOTICE "scsi%d: error receiving " "extended message\n", HOSTNO); tmp = 0; } else { printk(KERN_NOTICE "scsi%d: extended message " "code %02x length %d is too long\n", HOSTNO, extended_msg[2], extended_msg[1]); tmp = 0; } /* Fall through to reject message */ /* * If we get something weird that we aren't expecting, * reject it. */ default: if (!tmp) { printk(KERN_INFO "scsi%d: rejecting message ", instance->host_no); spi_print_msg(extended_msg); printk("\n"); } else if (tmp != EXTENDED_MESSAGE) scmd_printk(KERN_INFO, cmd, "rejecting unknown message %02x\n", tmp); else scmd_printk(KERN_INFO, cmd, "rejecting unknown extended message code %02x, length %d\n", extended_msg[1], extended_msg[0]); msgout = MESSAGE_REJECT; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); break; } /* switch (tmp) */ break; case PHASE_MSGOUT: len = 1; data = &msgout; hostdata->last_message = msgout; NCR5380_transfer_pio(instance, &phase, &len, &data); if (msgout == ABORT) { local_irq_save(flags); #ifdef SUPPORT_TAGS cmd_free_tag(cmd); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif hostdata->connected = NULL; cmd->result = DID_ERROR << 16; NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); maybe_release_dma_irq(instance); local_irq_restore(flags); cmd->scsi_done(cmd); return; } msgout = NOP; break; case PHASE_CMDOUT: len = cmd->cmd_len; data = cmd->cmnd; /* * XXX for performance reasons, on machines with a * PSEUDO-DMA architecture we should probably * use the dma transfer function. */ NCR5380_transfer_pio(instance, &phase, &len, &data); break; case PHASE_STATIN: len = 1; data = &tmp; NCR5380_transfer_pio(instance, &phase, &len, &data); cmd->SCp.Status = tmp; break; default: printk("scsi%d: unknown phase\n", HOSTNO); NCR5380_dprint(NDEBUG_ANY, instance); } /* switch(phase) */ } /* if (tmp * SR_REQ) */ } /* while (1) */ } /* * Function : void NCR5380_reselect (struct Scsi_Host *instance) * * Purpose : does reselection, initializing the instance->connected * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q * nexus has been reestablished, * * Inputs : instance - this instance of the NCR5380. * */ /* it might eventually prove necessary to do a dma setup on reselection, but it doesn't seem to be needed now -- sam */ static void NCR5380_reselect(struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); unsigned char target_mask; unsigned char lun; #ifdef SUPPORT_TAGS unsigned char tag; #endif unsigned char msg[3]; int __maybe_unused len; unsigned char __maybe_unused *data, __maybe_unused phase; struct scsi_cmnd *tmp = NULL, *prev; /* * Disable arbitration, etc. since the host adapter obviously * lost, and tell an interrupted NCR5380_select() to restart. */ NCR5380_write(MODE_REG, MR_BASE); hostdata->restart_select = 1; target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO); /* * At this point, we have detected that our SCSI ID is on the bus, * SEL is true and BSY was false for at least one bus settle delay * (400 ns). * * We must assert BSY ourselves, until the target drops the SEL * signal. */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); while (NCR5380_read(STATUS_REG) & SR_SEL) ; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* * Wait for target to go into MSGIN. */ while (!(NCR5380_read(STATUS_REG) & SR_REQ)) ; #if defined(CONFIG_SUN3) && defined(REAL_DMA) /* acknowledge toggle to MSGIN */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); /* peek at the byte without really hitting the bus */ msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); #else len = 1; data = msg; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); #endif if (!(msg[0] & 0x80)) { printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO); spi_print_msg(msg); do_abort(instance); return; } lun = (msg[0] & 0x07); #if defined(SUPPORT_TAGS) && !defined(CONFIG_SUN3) /* If the phase is still MSGIN, the target wants to send some more * messages. In case it supports tagged queuing, this is probably a * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. */ tag = TAG_NONE; if (phase == PHASE_MSGIN && (hostdata->flags & FLAG_TAGGED_QUEUING)) { /* Accept previous IDENTIFY message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); len = 2; data = msg + 1; if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && msg[1] == SIMPLE_QUEUE_TAG) tag = msg[2]; dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at " "reselection\n", HOSTNO, target_mask, lun, tag); } #endif /* * Find the command corresponding to the I_T_L or I_T_L_Q nexus we * just reestablished, and remove it from the disconnected queue. */ for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) { if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) #ifdef SUPPORT_TAGS && (tag == tmp->tag) #endif ) { if (prev) { REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); SET_NEXT(prev, NEXT(tmp)); } else { REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp)); hostdata->disconnected_queue = NEXT(tmp); } SET_NEXT(tmp, NULL); break; } } if (!tmp) { printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d " #ifdef SUPPORT_TAGS "tag %d " #endif "not in disconnected_queue.\n", HOSTNO, target_mask, lun #ifdef SUPPORT_TAGS , tag #endif ); /* * Since we have an established nexus that we can't do anything * with, we must abort it. */ do_abort(instance); return; } #if defined(CONFIG_SUN3) && defined(REAL_DMA) /* engage dma setup for the command we just saw */ { void *d; unsigned long count; if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { count = tmp->SCp.buffer->length; d = sg_virt(tmp->SCp.buffer); } else { count = tmp->SCp.this_residual; d = tmp->SCp.ptr; } /* setup this command for dma if not already */ if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != tmp)) { sun3scsi_dma_setup(d, count, rq_data_dir(tmp->request)); sun3_dma_setup_done = tmp; } } NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); #endif /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); #if defined(SUPPORT_TAGS) && defined(CONFIG_SUN3) /* If the phase is still MSGIN, the target wants to send some more * messages. In case it supports tagged queuing, this is probably a * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. */ tag = TAG_NONE; if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { /* Accept previous IDENTIFY message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); len = 2; data = msg + 1; if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && msg[1] == SIMPLE_QUEUE_TAG) tag = msg[2]; dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at reselection\n" HOSTNO, target_mask, lun, tag); } #endif hostdata->connected = tmp; dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n", HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); } /* * Function : int NCR5380_abort (struct scsi_cmnd *cmd) * * Purpose : abort a command * * Inputs : cmd - the scsi_cmnd to abort, code - code to set the * host byte of the result field to, if zero DID_ABORTED is * used. * * Returns : SUCCESS - success, FAILED on failure. * * XXX - there is no way to abort the command that is currently * connected, you have to wait for it to complete. If this is * a problem, we could implement longjmp() / setjmp(), setjmp() * called where the loop started in NCR5380_main(). */ static int NCR5380_abort(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; SETUP_HOSTDATA(instance); struct scsi_cmnd *tmp, **prev; unsigned long flags; scmd_printk(KERN_NOTICE, cmd, "aborting command\n"); NCR5380_print_status(instance); local_irq_save(flags); dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); #if 1 /* * Case 1 : If the command is the currently executing command, * we'll set the aborted flag and return control so that * information transfer routine can exit cleanly. */ if (hostdata->connected == cmd) { dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO); /* * We should perform BSY checking, and make sure we haven't slipped * into BUS FREE. */ /* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */ /* * Since we can't change phases until we've completed the current * handshake, we have to source or sink a byte of data if the current * phase is not MSGOUT. */ /* * Return control to the executing NCR drive so we can clear the * aborted flag and get back into our main loop. */ if (do_abort(instance) == 0) { hostdata->aborted = 1; hostdata->connected = NULL; cmd->result = DID_ABORT << 16; #ifdef SUPPORT_TAGS cmd_free_tag(cmd); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif maybe_release_dma_irq(instance); local_irq_restore(flags); cmd->scsi_done(cmd); return SUCCESS; } else { local_irq_restore(flags); printk("scsi%d: abort of connected command failed!\n", HOSTNO); return FAILED; } } #endif /* * Case 2 : If the command hasn't been issued yet, we simply remove it * from the issue queue. */ for (prev = (struct scsi_cmnd **)&(hostdata->issue_queue), tmp = (struct scsi_cmnd *)hostdata->issue_queue; tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); (*prev) = NEXT(tmp); SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; maybe_release_dma_irq(instance); local_irq_restore(flags); dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n", HOSTNO); /* Tagged queuing note: no tag to free here, hasn't been assigned * yet... */ tmp->scsi_done(tmp); return SUCCESS; } } /* * Case 3 : If any commands are connected, we're going to fail the abort * and let the high level SCSI driver retry at a later time or * issue a reset. * * Timeouts, and therefore aborted commands, will be highly unlikely * and handling them cleanly in this situation would make the common * case of noresets less efficient, and would pollute our code. So, * we fail. */ if (hostdata->connected) { local_irq_restore(flags); dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO); return FAILED; } /* * Case 4: If the command is currently disconnected from the bus, and * there are no connected commands, we reconnect the I_T_L or * I_T_L_Q nexus associated with it, go into message out, and send * an abort message. * * This case is especially ugly. In order to reestablish the nexus, we * need to call NCR5380_select(). The easiest way to implement this * function was to abort if the bus was busy, and let the interrupt * handler triggered on the SEL for reselect take care of lost arbitrations * where necessary, meaning interrupts need to be enabled. * * When interrupts are enabled, the queues may change - so we * can't remove it from the disconnected queue before selecting it * because that could cause a failure in hashing the nexus if that * device reselected. * * Since the queues may change, we can't use the pointers from when we * first locate it. * * So, we must first locate the command, and if NCR5380_select() * succeeds, then issue the abort, relocate the command and remove * it from the disconnected queue. */ for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; tmp = NEXT(tmp)) { if (cmd == tmp) { local_irq_restore(flags); dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO); if (NCR5380_select(instance, cmd)) return FAILED; dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO); do_abort(instance); local_irq_save(flags); for (prev = (struct scsi_cmnd **)&(hostdata->disconnected_queue), tmp = (struct scsi_cmnd *)hostdata->disconnected_queue; tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); *prev = NEXT(tmp); SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; /* We must unlock the tag/LUN immediately here, since the * target goes to BUS FREE and doesn't send us another * message (COMMAND_COMPLETE or the like) */ #ifdef SUPPORT_TAGS cmd_free_tag(tmp); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif maybe_release_dma_irq(instance); local_irq_restore(flags); tmp->scsi_done(tmp); return SUCCESS; } } } } /* Maybe it is sufficient just to release the ST-DMA lock... (if * possible at all) At least, we should check if the lock could be * released after the abort, in case it is kept due to some bug. */ maybe_release_dma_irq(instance); local_irq_restore(flags); /* * Case 5 : If we reached this point, the command was not found in any of * the queues. * * We probably reached this point because of an unlikely race condition * between the command completing successfully and the abortion code, * so we won't panic, but we will notify the user in case something really * broke. */ printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); return FAILED; } /* * Function : int NCR5380_reset (struct scsi_cmnd *cmd) * * Purpose : reset the SCSI bus. * * Returns : SUCCESS or FAILURE * */ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; struct NCR5380_hostdata *hostdata = shost_priv(instance); int i; unsigned long flags; NCR5380_print_status(instance); /* get in phase */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG))); /* assert RST */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); udelay(40); /* reset NCR registers */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_write(SELECT_ENABLE_REG, 0); /* ++roman: reset interrupt condition! otherwise no interrupts don't get * through anymore ... */ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); /* After the reset, there are no more connected or disconnected commands * and no busy units; so clear the low-level status here to avoid * conflicts when the mid-level code tries to wake up the affected * commands! */ if (hostdata->issue_queue) dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); if (hostdata->connected) dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); if (hostdata->disconnected_queue) dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); local_irq_save(flags); hostdata->issue_queue = NULL; hostdata->connected = NULL; hostdata->disconnected_queue = NULL; #ifdef SUPPORT_TAGS free_all_tags(hostdata); #endif for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; #ifdef REAL_DMA hostdata->dma_len = 0; #endif maybe_release_dma_irq(instance); local_irq_restore(flags); return SUCCESS; }
francescosganga/remixos-kernel
drivers/scsi/atari_NCR5380.c
C
gpl-2.0
91,108
/* * Generic entry point for the idle threads */ #include <linux/sched.h> #include <linux/cpu.h> #include <linux/tick.h> #include <linux/mm.h> #include <linux/stackprotector.h> #include <asm/tlb.h> #include <trace/events/power.h> int __read_mostly cpu_idle_force_poll; void cpu_idle_poll_ctrl(bool enable) { if (enable) { cpu_idle_force_poll++; } else { cpu_idle_force_poll--; WARN_ON_ONCE(cpu_idle_force_poll < 0); } } #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP static int __init cpu_idle_poll_setup(char *__unused) { cpu_idle_force_poll = 1; return 1; } __setup("nohlt", cpu_idle_poll_setup); static int __init cpu_idle_nopoll_setup(char *__unused) { cpu_idle_force_poll = 0; return 1; } __setup("hlt", cpu_idle_nopoll_setup); #endif static inline int cpu_idle_poll(void) { rcu_idle_enter(); trace_cpu_idle_rcuidle(0, smp_processor_id()); local_irq_enable(); while (!tif_need_resched()) cpu_relax(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); rcu_idle_exit(); return 1; } /* Weak implementations for optional arch specific functions */ void __weak arch_cpu_idle_prepare(void) { } void __weak arch_cpu_idle_enter(void) { } void __weak arch_cpu_idle_exit(void) { } void __weak arch_cpu_idle_dead(void) { } void __weak arch_cpu_idle(void) { cpu_idle_force_poll = 1; local_irq_enable(); } /* * Generic idle loop implementation */ static void cpu_idle_loop(void) { while (1) { tick_nohz_idle_enter(); while (!need_resched()) { check_pgt_cache(); rmb(); local_irq_disable(); arch_cpu_idle_enter(); /* * In poll mode we reenable interrupts and spin. * * Also if we detected in the wakeup from idle * path that the tick broadcast device expired * for us, we don't want to go deep idle as we * know that the IPI is going to arrive right * away */ if (cpu_idle_force_poll || tick_check_broadcast_expired()) { cpu_idle_poll(); } else { if (!current_clr_polling_and_test()) { stop_critical_timings(); rcu_idle_enter(); arch_cpu_idle(); WARN_ON_ONCE(irqs_disabled()); rcu_idle_exit(); start_critical_timings(); } else { local_irq_enable(); } __current_set_polling(); } arch_cpu_idle_exit(); } tick_nohz_idle_exit(); schedule_preempt_disabled(); if (cpu_is_offline(smp_processor_id())) arch_cpu_idle_dead(); } } void cpu_startup_entry(enum cpuhp_state state) { /* * This #ifdef needs to die, but it's too late in the cycle to * make this generic (arm and sh have never invoked the canary * init for the non boot cpus!). Will be fixed in 3.11 */ #ifdef CONFIG_X86 /* * If we're the non-boot CPU, nothing set the stack canary up * for us. The boot CPU already has it initialized but no harm * in doing it again. This is a good place for updating it, as * we wont ever return from this function (so the invalid * canaries already on the stack wont ever trigger). */ boot_init_stack_canary(); #endif __current_set_polling(); arch_cpu_idle_prepare(); cpu_idle_loop(); }
TeamExodus/kernel_moto_shamu
kernel/cpu/idle.c
C
gpl-2.0
3,064
/* * * linux/arch/sh/boards/se/7206/setup.c * * Copyright (C) 2006 Yoshinori Sato * Copyright (C) 2007 - 2008 Paul Mundt * * Hitachi 7206 SolutionEngine Support. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/smc91x.h> #include <mach-se/mach/se7206.h> #include <asm/io.h> #include <asm/machvec.h> #include <asm/heartbeat.h> static struct resource smc91x_resources[] = { [0] = { .name = "smc91x-regs", .start = PA_SMSC + 0x300, .end = PA_SMSC + 0x300 + 0x020 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = 64, .end = 64, .flags = IORESOURCE_IRQ, }, }; static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = -1, .dev = { .dma_mask = NULL, .coherent_dma_mask = 0xffffffff, .platform_data = &smc91x_info, }, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; static unsigned char heartbeat_bit_pos[] = { 8, 9, 10, 11, 12, 13, 14, 15 }; static struct heartbeat_data heartbeat_data = { .bit_pos = heartbeat_bit_pos, .nr_bits = ARRAY_SIZE(heartbeat_bit_pos), .regsize = 32, }; static struct resource heartbeat_resources[] = { [0] = { .start = PA_LED, .end = PA_LED, .flags = IORESOURCE_MEM, }, }; static struct platform_device heartbeat_device = { .name = "heartbeat", .id = -1, .dev = { .platform_data = &heartbeat_data, }, .num_resources = ARRAY_SIZE(heartbeat_resources), .resource = heartbeat_resources, }; static struct platform_device *se7206_devices[] __initdata = { &smc91x_device, &heartbeat_device, }; static int __init se7206_devices_setup(void) { return platform_add_devices(se7206_devices, ARRAY_SIZE(se7206_devices)); } __initcall(se7206_devices_setup); /* * The Machine Vector */ static struct sh_machine_vector mv_se __initmv = { .mv_name = "SolutionEngine", .mv_nr_irqs = 256, .mv_inb = se7206_inb, .mv_inw = se7206_inw, .mv_outb = se7206_outb, .mv_outw = se7206_outw, .mv_inb_p = se7206_inb_p, .mv_inw_p = se7206_inw, .mv_outb_p = se7206_outb_p, .mv_outw_p = se7206_outw, .mv_insb = se7206_insb, .mv_insw = se7206_insw, .mv_outsb = se7206_outsb, .mv_outsw = se7206_outsw, .mv_init_irq = init_se7206_IRQ, };
qnhoang81/Intercept_Kernel
arch/sh/boards/mach-se/7206/setup.c
C
gpl-2.0
2,299
/* * NCR 5380 generic driver routines. These should make it *trivial* * to implement 5380 SCSI drivers under Linux with a non-trantor * architecture. * * Note that these routines also work with NR53c400 family chips. * * Copyright 1993, Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * drew@colorado.edu * +1 (303) 666-5836 * * For more information, please consult * * NCR 5380 Family * SCSI Protocol Controller * Databook * * NCR Microelectronics * 1635 Aeroplaza Drive * Colorado Springs, CO 80916 * 1+ (719) 578-3400 * 1+ (800) 334-5454 */ /* * ++roman: To port the 5380 driver to the Atari, I had to do some changes in * this file, too: * * - Some of the debug statements were incorrect (undefined variables and the * like). I fixed that. * * - In information_transfer(), I think a #ifdef was wrong. Looking at the * possible DMA transfer size should also happen for REAL_DMA. I added this * in the #if statement. * * - When using real DMA, information_transfer() should return in a DATAOUT * phase after starting the DMA. It has nothing more to do. * * - The interrupt service routine should run main after end of DMA, too (not * only after RESELECTION interrupts). Additionally, it should _not_ test * for more interrupts after running main, since a DMA process may have * been started and interrupts are turned on now. The new int could happen * inside the execution of NCR5380_intr(), leading to recursive * calls. * * - I've added a function merge_contiguous_buffers() that tries to * merge scatter-gather buffers that are located at contiguous * physical addresses and can be processed with the same DMA setup. * Since most scatter-gather operations work on a page (4K) of * 4 buffers (1K), in more than 90% of all cases three interrupts and * DMA setup actions are saved. * * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA * and USLEEP, because these were messing up readability and will never be * needed for Atari SCSI. * * - I've revised the NCR5380_main() calling scheme (relax the 'main_running' * stuff), and 'main' is executed in a bottom half if awoken by an * interrupt. * * - The code was quite cluttered up by "#if (NDEBUG & NDEBUG_*) printk..." * constructs. In my eyes, this made the source rather unreadable, so I * finally replaced that by the *_PRINTK() macros. * */ /* * Further development / testing that should be done : * 1. Test linked command handling code after Eric is ready with * the high level code. */ /* Adapted for the sun3 by Sam Creasey. */ #include <scsi/scsi_dbg.h> #include <scsi/scsi_transport_spi.h> #if (NDEBUG & NDEBUG_LISTS) #define LIST(x, y) \ do { \ printk("LINE:%d Adding %p to %p\n", \ __LINE__, (void*)(x), (void*)(y)); \ if ((x) == (y)) \ udelay(5); \ } while (0) #define REMOVE(w, x, y, z) \ do { \ printk("LINE:%d Removing: %p->%p %p->%p \n", \ __LINE__, (void*)(w), (void*)(x), \ (void*)(y), (void*)(z)); \ if ((x) == (y)) \ udelay(5); \ } while (0) #else #define LIST(x,y) #define REMOVE(w,x,y,z) #endif #ifndef notyet #undef LINKED #endif /* * Design * * This is a generic 5380 driver. To use it on a different platform, * one simply writes appropriate system specific macros (ie, data * transfer - some PC's will use the I/O bus, 68K's must use * memory mapped) and drops this file in their 'C' wrapper. * * As far as command queueing, two queues are maintained for * each 5380 in the system - commands that haven't been issued yet, * and commands that are currently executing. This means that an * unlimited number of commands may be queued, letting * more commands propagate from the higher driver levels giving higher * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, * allowing multiple commands to propagate all the way to a SCSI-II device * while a command is already executing. * * * Issues specific to the NCR5380 : * * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead * piece of hardware that requires you to sit in a loop polling for * the REQ signal as long as you are connected. Some devices are * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect * while doing long seek operations. * * The workaround for this is to keep track of devices that have * disconnected. If the device hasn't disconnected, for commands that * should disconnect, we do something like * * while (!REQ is asserted) { sleep for N usecs; poll for M usecs } * * Some tweaking of N and M needs to be done. An algorithm based * on "time to data" would give the best results as long as short time * to datas (ie, on the same track) were considered, however these * broken devices are the exception rather than the rule and I'd rather * spend my time optimizing for the normal case. * * Architecture : * * At the heart of the design is a coroutine, NCR5380_main, * which is started from a workqueue for each NCR5380 host in the * system. It attempts to establish I_T_L or I_T_L_Q nexuses by * removing the commands from the issue queue and calling * NCR5380_select() if a nexus is not established. * * Once a nexus is established, the NCR5380_information_transfer() * phase goes through the various phases as instructed by the target. * if the target goes into MSG IN and sends a DISCONNECT message, * the command structure is placed into the per instance disconnected * queue, and NCR5380_main tries to find more work. If the target is * idle for too long, the system will try to sleep. * * If a command has disconnected, eventually an interrupt will trigger, * calling NCR5380_intr() which will in turn call NCR5380_reselect * to reestablish a nexus. This will run main if necessary. * * On command termination, the done function will be called as * appropriate. * * SCSI pointers are maintained in the SCp field of SCSI command * structures, being initialized after the command is connected * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. * Note that in violation of the standard, an implicit SAVE POINTERS operation * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS. */ /* * Using this file : * This file a skeleton Linux SCSI driver for the NCR 5380 series * of chips. To use it, you write an architecture specific functions * and macros and include this file in your driver. * * These macros control options : * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically * for commands that return with a CHECK CONDITION status. * * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential * transceivers. * * LINKED - if defined, linked commands are supported. * * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. * * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible * * These macros MUST be defined : * * NCR5380_read(register) - read from the specified register * * NCR5380_write(register, value) - write to the specific register * * NCR5380_implementation_fields - additional fields needed for this * specific implementation of the NCR5380 * * Either real DMA *or* pseudo DMA may be implemented * REAL functions : * NCR5380_REAL_DMA should be defined if real DMA is to be used. * Note that the DMA setup functions should return the number of bytes * that they were able to program the controller for. * * Also note that generic i386/PC versions of these macros are * available as NCR5380_i386_dma_write_setup, * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. * * NCR5380_dma_write_setup(instance, src, count) - initialize * NCR5380_dma_read_setup(instance, dst, count) - initialize * NCR5380_dma_residual(instance); - residual count * * PSEUDO functions : * NCR5380_pwrite(instance, src, count) * NCR5380_pread(instance, dst, count); * * The generic driver is initialized by calling NCR5380_init(instance), * after setting the appropriate host specific fields and ID. If the * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, * possible) function may be used. */ /* Macros ease life... :-) */ #define SETUP_HOSTDATA(in) \ struct NCR5380_hostdata *hostdata = \ (struct NCR5380_hostdata *)(in)->hostdata #define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) #define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble) #define SET_NEXT(cmd,next) ((cmd)->host_scribble = (void *)(next)) #define NEXTADDR(cmd) ((struct scsi_cmnd **)&(cmd)->host_scribble) #define HOSTNO instance->host_no #define H_NO(cmd) (cmd)->device->host->host_no #ifdef SUPPORT_TAGS /* * Functions for handling tagged queuing * ===================================== * * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes: * * Using consecutive numbers for the tags is no good idea in my eyes. There * could be wrong re-usings if the counter (8 bit!) wraps and some early * command has been preempted for a long time. My solution: a bitfield for * remembering used tags. * * There's also the problem that each target has a certain queue size, but we * cannot know it in advance :-( We just see a QUEUE_FULL status being * returned. So, in this case, the driver internal queue size assumption is * reduced to the number of active tags if QUEUE_FULL is returned by the * target. The command is returned to the mid-level, but with status changed * to BUSY, since --as I've seen-- the mid-level can't handle QUEUE_FULL * correctly. * * We're also not allowed running tagged commands as long as an untagged * command is active. And REQUEST SENSE commands after a contingent allegiance * condition _must_ be untagged. To keep track whether an untagged command has * been issued, the host->busy array is still employed, as it is without * support for tagged queuing. * * One could suspect that there are possible race conditions between * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(), * which already guaranteed to be running at most once. It is also the only * place where tags/LUNs are allocated. So no other allocation can slip * between that pair, there could only happen a reselection, which can free a * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes * important: the tag bit must be cleared before 'nr_allocated' is decreased. */ static void __init init_tags(struct NCR5380_hostdata *hostdata) { int target, lun; struct tag_alloc *ta; if (!(hostdata->flags & FLAG_TAGGED_QUEUING)) return; for (target = 0; target < 8; ++target) { for (lun = 0; lun < 8; ++lun) { ta = &hostdata->TagAlloc[target][lun]; bitmap_zero(ta->allocated, MAX_TAGS); ta->nr_allocated = 0; /* At the beginning, assume the maximum queue size we could * support (MAX_TAGS). This value will be decreased if the target * returns QUEUE_FULL status. */ ta->queue_size = MAX_TAGS; } } } /* Check if we can issue a command to this LUN: First see if the LUN is marked * busy by an untagged command. If the command should use tagged queuing, also * check that there is a free tag and the target's queue won't overflow. This * function should be called with interrupts disabled to avoid race * conditions. */ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) { u8 lun = cmd->device->lun; SETUP_HOSTDATA(cmd->device->host); if (hostdata->busy[cmd->device->id] & (1 << lun)) return 1; if (!should_be_tagged || !(hostdata->flags & FLAG_TAGGED_QUEUING) || !cmd->device->tagged_supported) return 0; if (hostdata->TagAlloc[scmd_id(cmd)][lun].nr_allocated >= hostdata->TagAlloc[scmd_id(cmd)][lun].queue_size) { dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", H_NO(cmd), cmd->device->id, lun); return 1; } return 0; } /* Allocate a tag for a command (there are no checks anymore, check_lun_busy() * must be called before!), or reserve the LUN in 'busy' if the command is * untagged. */ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) { u8 lun = cmd->device->lun; SETUP_HOSTDATA(cmd->device->host); /* If we or the target don't support tagged queuing, allocate the LUN for * an untagged command. */ if (!should_be_tagged || !(hostdata->flags & FLAG_TAGGED_QUEUING) || !cmd->device->tagged_supported) { cmd->tag = TAG_NONE; hostdata->busy[cmd->device->id] |= (1 << lun); dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " "command\n", H_NO(cmd), cmd->device->id, lun); } else { struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); set_bit(cmd->tag, ta->allocated); ta->nr_allocated++; dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d " "(now %d tags in use)\n", H_NO(cmd), cmd->tag, cmd->device->id, lun, ta->nr_allocated); } } /* Mark the tag of command 'cmd' as free, or in case of an untagged command, * unlock the LUN. */ static void cmd_free_tag(struct scsi_cmnd *cmd) { u8 lun = cmd->device->lun; SETUP_HOSTDATA(cmd->device->host); if (cmd->tag == TAG_NONE) { hostdata->busy[cmd->device->id] &= ~(1 << lun); dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n", H_NO(cmd), cmd->device->id, lun); } else if (cmd->tag >= MAX_TAGS) { printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", H_NO(cmd), cmd->tag); } else { struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; clear_bit(cmd->tag, ta->allocated); ta->nr_allocated--; dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", H_NO(cmd), cmd->tag, cmd->device->id, lun); } } static void free_all_tags(struct NCR5380_hostdata *hostdata) { int target, lun; struct tag_alloc *ta; if (!(hostdata->flags & FLAG_TAGGED_QUEUING)) return; for (target = 0; target < 8; ++target) { for (lun = 0; lun < 8; ++lun) { ta = &hostdata->TagAlloc[target][lun]; bitmap_zero(ta->allocated, MAX_TAGS); ta->nr_allocated = 0; } } } #endif /* SUPPORT_TAGS */ /* * Function: void merge_contiguous_buffers( struct scsi_cmnd *cmd ) * * Purpose: Try to merge several scatter-gather requests into one DMA * transfer. This is possible if the scatter buffers lie on * physical contiguous addresses. * * Parameters: struct scsi_cmnd *cmd * The command to work on. The first scatter buffer's data are * assumed to be already transferred into ptr/this_residual. */ static void merge_contiguous_buffers(struct scsi_cmnd *cmd) { #if !defined(CONFIG_SUN3) unsigned long endaddr; #if (NDEBUG & NDEBUG_MERGING) unsigned long oldlen = cmd->SCp.this_residual; int cnt = 1; #endif for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; cmd->SCp.buffers_residual && virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) { dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n", page_address(sg_page(&cmd->SCp.buffer[1])), endaddr); #if (NDEBUG & NDEBUG_MERGING) ++cnt; #endif ++cmd->SCp.buffer; --cmd->SCp.buffers_residual; cmd->SCp.this_residual += cmd->SCp.buffer->length; endaddr += cmd->SCp.buffer->length; } #if (NDEBUG & NDEBUG_MERGING) if (oldlen != cmd->SCp.this_residual) dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n", cnt, cmd->SCp.ptr, cmd->SCp.this_residual); #endif #endif /* !defined(CONFIG_SUN3) */ } /** * initialize_SCp - init the scsi pointer field * @cmd: command block to set up * * Set up the internal fields in the SCSI command. */ static inline void initialize_SCp(struct scsi_cmnd *cmd) { /* * Initialize the Scsi Pointer field so that all of the commands in the * various queues are valid. */ if (scsi_bufflen(cmd)) { cmd->SCp.buffer = scsi_sglist(cmd); cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); cmd->SCp.this_residual = cmd->SCp.buffer->length; /* ++roman: Try to merge some scatter-buffers if they are at * contiguous physical addresses. */ merge_contiguous_buffers(cmd); } else { cmd->SCp.buffer = NULL; cmd->SCp.buffers_residual = 0; cmd->SCp.ptr = NULL; cmd->SCp.this_residual = 0; } } #include <linux/delay.h> #if NDEBUG static struct { unsigned char mask; const char *name; } signals[] = { { SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" }, { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" }, { SR_SEL, "SEL" }, {0, NULL} }, basrs[] = { {BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL} }, icrs[] = { {ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"}, {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"}, {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"}, {0, NULL} }, mrs[] = { {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"}, {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"}, {MR_MONITOR_BSY, "MODE MONITOR BSY"}, {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"}, {0, NULL} }; /** * NCR5380_print - print scsi bus signals * @instance: adapter state to dump * * Print the SCSI bus signals for debugging purposes */ static void NCR5380_print(struct Scsi_Host *instance) { unsigned char status, data, basr, mr, icr, i; unsigned long flags; local_irq_save(flags); data = NCR5380_read(CURRENT_SCSI_DATA_REG); status = NCR5380_read(STATUS_REG); mr = NCR5380_read(MODE_REG); icr = NCR5380_read(INITIATOR_COMMAND_REG); basr = NCR5380_read(BUS_AND_STATUS_REG); local_irq_restore(flags); printk("STATUS_REG: %02x ", status); for (i = 0; signals[i].mask; ++i) if (status & signals[i].mask) printk(",%s", signals[i].name); printk("\nBASR: %02x ", basr); for (i = 0; basrs[i].mask; ++i) if (basr & basrs[i].mask) printk(",%s", basrs[i].name); printk("\nICR: %02x ", icr); for (i = 0; icrs[i].mask; ++i) if (icr & icrs[i].mask) printk(",%s", icrs[i].name); printk("\nMODE: %02x ", mr); for (i = 0; mrs[i].mask; ++i) if (mr & mrs[i].mask) printk(",%s", mrs[i].name); printk("\n"); } static struct { unsigned char value; const char *name; } phases[] = { {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"}, {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"}, {PHASE_UNKNOWN, "UNKNOWN"} }; /** * NCR5380_print_phase - show SCSI phase * @instance: adapter to dump * * Print the current SCSI phase for debugging purposes * * Locks: none */ static void NCR5380_print_phase(struct Scsi_Host *instance) { unsigned char status; int i; status = NCR5380_read(STATUS_REG); if (!(status & SR_REQ)) printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO); else { for (i = 0; (phases[i].value != PHASE_UNKNOWN) && (phases[i].value != (status & PHASE_MASK)); ++i) ; printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name); } } #endif /* * ++roman: New scheme of calling NCR5380_main() * * If we're not in an interrupt, we can call our main directly, it cannot be * already running. Else, we queue it on a task queue, if not 'main_running' * tells us that a lower level is already executing it. This way, * 'main_running' needs not be protected in a special way. * * queue_main() is a utility function for putting our main onto the task * queue, if main_running is false. It should be called only from a * interrupt or bottom half. */ #include <linux/gfp.h> #include <linux/workqueue.h> #include <linux/interrupt.h> static inline void queue_main(struct NCR5380_hostdata *hostdata) { if (!hostdata->main_running) { /* If in interrupt and NCR5380_main() not already running, queue it on the 'immediate' task queue, to be processed immediately after the current interrupt processing has finished. */ schedule_work(&hostdata->main_task); } /* else: nothing to do: the running NCR5380_main() will pick up any newly queued command. */ } /** * NCR58380_info - report driver and host information * @instance: relevant scsi host instance * * For use as the host template info() handler. * * Locks: none */ static const char *NCR5380_info(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); return hostdata->info; } static void prepare_info(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); snprintf(hostdata->info, sizeof(hostdata->info), "%s, io_port 0x%lx, n_io_port %d, " "base 0x%lx, irq %d, " "can_queue %d, cmd_per_lun %d, " "sg_tablesize %d, this_id %d, " "flags { %s}, " "options { %s} ", instance->hostt->name, instance->io_port, instance->n_io_port, instance->base, instance->irq, instance->can_queue, instance->cmd_per_lun, instance->sg_tablesize, instance->this_id, hostdata->flags & FLAG_TAGGED_QUEUING ? "TAGGED_QUEUING " : "", #ifdef DIFFERENTIAL "DIFFERENTIAL " #endif #ifdef REAL_DMA "REAL_DMA " #endif #ifdef PARITY "PARITY " #endif #ifdef SUPPORT_TAGS "SUPPORT_TAGS " #endif ""); } /** * NCR5380_print_status - dump controller info * @instance: controller to dump * * Print commands in the various queues, called from NCR5380_abort * to aid debugging. */ static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd) { int i, s; unsigned char *command; printk("scsi%d: destination target %d, lun %llu\n", H_NO(cmd), cmd->device->id, cmd->device->lun); printk(KERN_CONT " command = "); command = cmd->cmnd; printk(KERN_CONT "%2d (0x%02x)", command[0], command[0]); for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) printk(KERN_CONT " %02x", command[i]); printk("\n"); } static void NCR5380_print_status(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata; struct scsi_cmnd *ptr; unsigned long flags; NCR5380_dprint(NDEBUG_ANY, instance); NCR5380_dprint_phase(NDEBUG_ANY, instance); hostdata = (struct NCR5380_hostdata *)instance->hostdata; local_irq_save(flags); printk("NCR5380: coroutine is%s running.\n", hostdata->main_running ? "" : "n't"); if (!hostdata->connected) printk("scsi%d: no currently connected command\n", HOSTNO); else lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected); printk("scsi%d: issue_queue\n", HOSTNO); for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) lprint_Scsi_Cmnd(ptr); printk("scsi%d: disconnected_queue\n", HOSTNO); for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = NEXT(ptr)) lprint_Scsi_Cmnd(ptr); local_irq_restore(flags); printk("\n"); } static void show_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m) { int i, s; unsigned char *command; seq_printf(m, "scsi%d: destination target %d, lun %llu\n", H_NO(cmd), cmd->device->id, cmd->device->lun); seq_puts(m, " command = "); command = cmd->cmnd; seq_printf(m, "%2d (0x%02x)", command[0], command[0]); for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) seq_printf(m, " %02x", command[i]); seq_putc(m, '\n'); } static int __maybe_unused NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata; struct scsi_cmnd *ptr; unsigned long flags; hostdata = (struct NCR5380_hostdata *)instance->hostdata; local_irq_save(flags); seq_printf(m, "NCR5380: coroutine is%s running.\n", hostdata->main_running ? "" : "n't"); if (!hostdata->connected) seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO); else show_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m); seq_printf(m, "scsi%d: issue_queue\n", HOSTNO); for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) show_Scsi_Cmnd(ptr, m); seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO); for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = NEXT(ptr)) show_Scsi_Cmnd(ptr, m); local_irq_restore(flags); return 0; } /** * NCR5380_init - initialise an NCR5380 * @instance: adapter to configure * @flags: control flags * * Initializes *instance and corresponding 5380 chip, * with flags OR'd into the initial flags value. * * Notes : I assume that the host, hostno, and id bits have been * set correctly. I don't care about the irq and other fields. * * Returns 0 for success */ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) { int i; SETUP_HOSTDATA(instance); hostdata->host = instance; hostdata->aborted = 0; hostdata->id_mask = 1 << instance->this_id; hostdata->id_higher_mask = 0; for (i = hostdata->id_mask; i <= 0x80; i <<= 1) if (i > hostdata->id_mask) hostdata->id_higher_mask |= i; for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; #ifdef SUPPORT_TAGS init_tags(hostdata); #endif #if defined (REAL_DMA) hostdata->dma_len = 0; #endif hostdata->targets_present = 0; hostdata->connected = NULL; hostdata->issue_queue = NULL; hostdata->disconnected_queue = NULL; hostdata->flags = flags; INIT_WORK(&hostdata->main_task, NCR5380_main); prepare_info(instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_write(SELECT_ENABLE_REG, 0); return 0; } /** * NCR5380_exit - remove an NCR5380 * @instance: adapter to remove * * Assumes that no more work can be queued (e.g. by NCR5380_intr). */ static void NCR5380_exit(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); cancel_work_sync(&hostdata->main_task); } /** * NCR5380_queue_command - queue a command * @instance: the relevant SCSI adapter * @cmd: SCSI command * * cmd is added to the per instance issue_queue, with minor * twiddling done to the host specific fields of cmd. If the * main coroutine is not running, it is restarted. */ static int NCR5380_queue_command(struct Scsi_Host *instance, struct scsi_cmnd *cmd) { struct NCR5380_hostdata *hostdata = shost_priv(instance); struct scsi_cmnd *tmp; unsigned long flags; #if (NDEBUG & NDEBUG_NO_WRITE) switch (cmd->cmnd[0]) { case WRITE_6: case WRITE_10: printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n", H_NO(cmd)); cmd->result = (DID_ERROR << 16); cmd->scsi_done(cmd); return 0; } #endif /* (NDEBUG & NDEBUG_NO_WRITE) */ /* * We use the host_scribble field as a pointer to the next command * in a queue */ SET_NEXT(cmd, NULL); cmd->result = 0; /* * Insert the cmd into the issue queue. Note that REQUEST SENSE * commands are added to the head of the queue since any command will * clear the contingent allegiance condition that exists and the * sense data is only guaranteed to be valid while the condition exists. */ /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA. * Otherwise a running NCR5380_main may steal the lock. * Lock before actually inserting due to fairness reasons explained in * atari_scsi.c. If we insert first, then it's impossible for this driver * to release the lock. * Stop timer for this command while waiting for the lock, or timeouts * may happen (and they really do), and it's no good if the command doesn't * appear in any of the queues. * ++roman: Just disabling the NCR interrupt isn't sufficient here, * because also a timer int can trigger an abort or reset, which would * alter queues and touch the lock. */ if (!NCR5380_acquire_dma_irq(instance)) return SCSI_MLQUEUE_HOST_BUSY; local_irq_save(flags); /* * Insert the cmd into the issue queue. Note that REQUEST SENSE * commands are added to the head of the queue since any command will * clear the contingent allegiance condition that exists and the * sense data is only guaranteed to be valid while the condition exists. */ if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { LIST(cmd, hostdata->issue_queue); SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = cmd; } else { for (tmp = (struct scsi_cmnd *)hostdata->issue_queue; NEXT(tmp); tmp = NEXT(tmp)) ; LIST(cmd, tmp); SET_NEXT(tmp, cmd); } local_irq_restore(flags); dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd), (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); /* If queue_command() is called from an interrupt (real one or bottom * half), we let queue_main() do the job of taking care about main. If it * is already running, this is a no-op, else main will be queued. * * If we're not in an interrupt, we can call NCR5380_main() * unconditionally, because it cannot be already running. */ if (in_interrupt() || irqs_disabled()) queue_main(hostdata); else NCR5380_main(&hostdata->main_task); return 0; } static inline void maybe_release_dma_irq(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); /* Caller does the locking needed to set & test these data atomically */ if (!hostdata->disconnected_queue && !hostdata->issue_queue && !hostdata->connected && !hostdata->retain_dma_intr) NCR5380_release_dma_irq(instance); } /** * NCR5380_main - NCR state machines * * NCR5380_main is a coroutine that runs as long as more work can * be done on the NCR5380 host adapters in a system. Both * NCR5380_queue_command() and NCR5380_intr() will try to start it * in case it is not running. * * Locks: called as its own thread with no locks held. */ static void NCR5380_main(struct work_struct *work) { struct NCR5380_hostdata *hostdata = container_of(work, struct NCR5380_hostdata, main_task); struct Scsi_Host *instance = hostdata->host; struct scsi_cmnd *tmp, *prev; int done; unsigned long flags; /* * We run (with interrupts disabled) until we're sure that none of * the host adapters have anything that can be done, at which point * we set main_running to 0 and exit. * * Interrupts are enabled before doing various other internal * instructions, after we've decided that we need to run through * the loop again. * * this should prevent any race conditions. * * ++roman: Just disabling the NCR interrupt isn't sufficient here, * because also a timer int can trigger an abort or reset, which can * alter queues and touch the Falcon lock. */ /* Tell int handlers main() is now already executing. Note that no races are possible here. If an int comes in before 'main_running' is set here, and queues/executes main via the task queue, it doesn't do any harm, just this instance of main won't find any work left to do. */ if (hostdata->main_running) return; hostdata->main_running = 1; local_save_flags(flags); do { local_irq_disable(); /* Freeze request queues */ done = 1; if (!hostdata->connected) { dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO); /* * Search through the issue_queue for a command destined * for a target that's not busy. */ #if (NDEBUG & NDEBUG_LISTS) for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp)) ; /*printk("%p ", tmp);*/ if ((tmp == prev) && tmp) printk(" LOOP\n"); /* else printk("\n"); */ #endif for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) { u8 lun = tmp->device->lun; dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, scmd_id(tmp), hostdata->busy[scmd_id(tmp)], lun); /* When we find one, remove it from the issue queue. */ /* ++guenther: possible race with Falcon locking */ if ( #ifdef SUPPORT_TAGS !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE) #else !(hostdata->busy[tmp->device->id] & (1 << lun)) #endif ) { /* ++guenther: just to be sure, this must be atomic */ local_irq_disable(); if (prev) { REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); SET_NEXT(prev, NEXT(tmp)); } else { REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp)); hostdata->issue_queue = NEXT(tmp); } SET_NEXT(tmp, NULL); hostdata->retain_dma_intr++; /* reenable interrupts after finding one */ local_irq_restore(flags); /* * Attempt to establish an I_T_L nexus here. * On success, instance->hostdata->connected is set. * On failure, we must add the command back to the * issue queue so we can keep trying. */ dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d " "lun %d removed from issue_queue\n", HOSTNO, tmp->device->id, lun); /* * REQUEST SENSE commands are issued without tagged * queueing, even on SCSI-II devices because the * contingent allegiance condition exists for the * entire unit. */ /* ++roman: ...and the standard also requires that * REQUEST SENSE command are untagged. */ #ifdef SUPPORT_TAGS cmd_get_tag(tmp, tmp->cmnd[0] != REQUEST_SENSE); #endif if (!NCR5380_select(instance, tmp)) { local_irq_disable(); hostdata->retain_dma_intr--; /* release if target did not response! */ maybe_release_dma_irq(instance); local_irq_restore(flags); break; } else { local_irq_disable(); LIST(tmp, hostdata->issue_queue); SET_NEXT(tmp, hostdata->issue_queue); hostdata->issue_queue = tmp; #ifdef SUPPORT_TAGS cmd_free_tag(tmp); #endif hostdata->retain_dma_intr--; local_irq_restore(flags); dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, " "returned to issue_queue\n", HOSTNO); if (hostdata->connected) break; } } /* if target/lun/target queue is not busy */ } /* for issue_queue */ } /* if (!hostdata->connected) */ if (hostdata->connected #ifdef REAL_DMA && !hostdata->dma_len #endif ) { local_irq_restore(flags); dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n", HOSTNO); NCR5380_information_transfer(instance); dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO); done = 0; } } while (!done); /* Better allow ints _after_ 'main_running' has been cleared, else an interrupt could believe we'll pick up the work it left for us, but we won't see it anymore here... */ hostdata->main_running = 0; local_irq_restore(flags); } #ifdef REAL_DMA /* * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) * * Purpose : Called by interrupt handler when DMA finishes or a phase * mismatch occurs (which would finish the DMA transfer). * * Inputs : instance - this instance of the NCR5380. * */ static void NCR5380_dma_complete(struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); int transferred; unsigned char **data; volatile int *count; int saved_data = 0, overrun = 0; unsigned char p; if (!hostdata->connected) { printk(KERN_WARNING "scsi%d: received end of DMA interrupt with " "no connected cmd\n", HOSTNO); return; } if (hostdata->read_overruns) { p = hostdata->connected->SCp.phase; if (p & SR_IO) { udelay(10); if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH|BASR_ACK)) == (BASR_PHASE_MATCH|BASR_ACK)) { saved_data = NCR5380_read(INPUT_DATA_REG); overrun = 1; dprintk(NDEBUG_DMA, "scsi%d: read overrun handled\n", HOSTNO); } } } dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); #if defined(CONFIG_SUN3) if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", instance->host_no); BUG(); } /* make sure we're not stuck in a data phase */ if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == (BASR_PHASE_MATCH | BASR_ACK)) { pr_err("scsi%d: BASR %02x\n", instance->host_no, NCR5380_read(BUS_AND_STATUS_REG)); pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n", instance->host_no); BUG(); } #endif (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); transferred = hostdata->dma_len - NCR5380_dma_residual(instance); hostdata->dma_len = 0; data = (unsigned char **)&hostdata->connected->SCp.ptr; count = &hostdata->connected->SCp.this_residual; *data += transferred; *count -= transferred; if (hostdata->read_overruns) { int cnt, toPIO; if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { cnt = toPIO = hostdata->read_overruns; if (overrun) { dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); *(*data)++ = saved_data; (*count)--; cnt--; toPIO--; } dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); NCR5380_transfer_pio(instance, &p, &cnt, data); *count -= toPIO - cnt; } } } #endif /* REAL_DMA */ /** * NCR5380_intr - generic NCR5380 irq handler * @irq: interrupt number * @dev_id: device info * * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses * from the disconnected queue, and restarting NCR5380_main() * as required. */ static irqreturn_t NCR5380_intr(int irq, void *dev_id) { struct Scsi_Host *instance = dev_id; int done = 1, handled = 0; unsigned char basr; dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO); /* Look for pending interrupts */ basr = NCR5380_read(BUS_AND_STATUS_REG); dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr); /* dispatch to appropriate routine if found and done=0 */ if (basr & BASR_IRQ) { NCR5380_dprint(NDEBUG_INTR, instance); if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { done = 0; dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO); NCR5380_reselect(instance); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if (basr & BASR_PARITY_ERROR) { dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else { /* * The rest of the interrupt conditions can occur only during a * DMA transfer */ #if defined(REAL_DMA) /* * We should only get PHASE MISMATCH and EOP interrupts if we have * DMA enabled, so do a sanity check based on the current setting * of the MODE register. */ if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) && ((basr & BASR_END_DMA_TRANSFER) || !(basr & BASR_PHASE_MATCH))) { dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); NCR5380_dma_complete( instance ); done = 0; } else #endif /* REAL_DMA */ { /* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ if (basr & BASR_PHASE_MATCH) dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, " "BASR 0x%x, MR 0x%x, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif } } /* if !(SELECTION || PARITY) */ handled = 1; } /* BASR & IRQ */ else { printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, " "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif } if (!done) { dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO); /* Put a call to NCR5380_main() on the queue... */ queue_main(shost_priv(instance)); } return IRQ_RETVAL(handled); } /* * Function : int NCR5380_select(struct Scsi_Host *instance, * struct scsi_cmnd *cmd) * * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, * including ARBITRATION, SELECTION, and initial message out for * IDENTIFY and queue messages. * * Inputs : instance - instantiation of the 5380 driver on which this * target lives, cmd - SCSI command to execute. * * Returns : -1 if selection could not execute for some reason, * 0 if selection succeeded or failed because the target * did not respond. * * Side effects : * If bus busy, arbitration failed, etc, NCR5380_select() will exit * with registers as they should have been on entry - ie * SELECT_ENABLE will be set appropriately, the NCR5380 * will cease to drive any SCSI bus signals. * * If successful : I_T_L or I_T_L_Q nexus will be established, * instance->connected will be set to cmd. * SELECT interrupt will be disabled. * * If failed (no target) : cmd->scsi_done() will be called, and the * cmd->result host byte set to DID_BAD_TARGET. */ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) { SETUP_HOSTDATA(instance); unsigned char tmp[3], phase; unsigned char *data; int len; unsigned long timeout; unsigned long flags; hostdata->restart_select = 0; NCR5380_dprint(NDEBUG_ARBITRATION, instance); dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO, instance->this_id); /* * Set the phase bits to 0, otherwise the NCR5380 won't drive the * data bus during SELECTION. */ local_irq_save(flags); if (hostdata->connected) { local_irq_restore(flags); return -1; } NCR5380_write(TARGET_COMMAND_REG, 0); /* * Start arbitration. */ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); NCR5380_write(MODE_REG, MR_ARBITRATE); local_irq_restore(flags); /* Wait for arbitration logic to complete */ #if defined(NCR_TIMEOUT) { unsigned long timeout = jiffies + 2*NCR_TIMEOUT; while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && time_before(jiffies, timeout) && !hostdata->connected) ; if (time_after_eq(jiffies, timeout)) { printk("scsi : arbitration timeout at %d\n", __LINE__); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } } #else /* NCR_TIMEOUT */ while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && !hostdata->connected) ; #endif dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO); if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); return -1; } /* * The arbitration delay is 2.2us, but this is a minimum and there is * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate * the integral nature of udelay(). * */ udelay(3); /* Check for lost arbitration */ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", HOSTNO); return -1; } /* after/during arbitration, BSY should be asserted. IBM DPES-31080 Version S31Q works now */ /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY); if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", HOSTNO); return -1; } /* * Again, bus clear + bus settle time is 1.2us, however, this is * a minimum so we'll udelay ceil(1.2) */ #ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY /* ++roman: But some targets (see above :-) seem to need a bit more... */ udelay(15); #else udelay(2); #endif if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return -1; } dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO); /* * Now that we have won arbitration, start Selection process, asserting * the host and target ID's on the SCSI bus. */ NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id))); /* * Raise ATN while SEL is true before BSY goes false from arbitration, * since this is the only way to guarantee that we'll get a MESSAGE OUT * phase immediately after selection. */ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL )); NCR5380_write(MODE_REG, MR_BASE); /* * Reselect interrupts must be turned off prior to the dropping of BSY, * otherwise we will trigger an interrupt. */ if (hostdata->connected) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return -1; } NCR5380_write(SELECT_ENABLE_REG, 0); /* * The initiator shall then wait at least two deskew delays and release * the BSY signal. */ udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ /* Reset BSY */ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL)); /* * Something weird happens when we cease to drive BSY - looks * like the board/chip is letting us do another read before the * appropriate propagation delay has expired, and we're confusing * a BSY signal from ourselves as the target's response to SELECTION. * * A small delay (the 'C++' frontend breaks the pipeline with an * unnecessary jump, making it work on my 386-33/Trantor T128, the * tighter 'C' code breaks and requires this) solves the problem - * the 1 us delay is arbitrary, and only used because this delay will * be the same on other platforms and since it works here, it should * work there. * * wingel suggests that this could be due to failing to wait * one deskew delay. */ udelay(1); dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); /* * The SCSI specification calls for a 250 ms timeout for the actual * selection. */ timeout = jiffies + msecs_to_jiffies(250); /* * XXX very interesting - we're seeing a bounce where the BSY we * asserted is being reflected / still asserted (propagation delay?) * and it's detecting as true. Sigh. */ #if 0 /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert * IO while SEL is true. But again, there are some disks out the in the * world that do that nevertheless. (Somebody claimed that this announces * reselection capability of the target.) So we better skip that test and * only wait for BSY... (Famous german words: Der Klügere gibt nach :-) */ while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & (SR_BSY | SR_IO))) ; if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_reselect(instance); printk(KERN_ERR "scsi%d: reselection after won arbitration?\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } #else while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY)) ; #endif /* * No less than two deskew delays after the initiator detects the * BSY signal is true, it shall release the SEL signal and may * change the DATA BUS. -wingel */ udelay(1); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); if (!(NCR5380_read(STATUS_REG) & SR_BSY)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); if (hostdata->targets_present & (1 << cmd->device->id)) { printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); if (hostdata->restart_select) printk(KERN_NOTICE "\trestart select\n"); NCR5380_dprint(NDEBUG_ANY, instance); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } cmd->result = DID_BAD_TARGET << 16; #ifdef SUPPORT_TAGS cmd_free_tag(cmd); #endif cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return 0; } hostdata->targets_present |= (1 << cmd->device->id); /* * Since we followed the SCSI spec, and raised ATN while SEL * was true but before BSY was false during selection, the information * transfer phase should be a MESSAGE OUT phase so that we can send the * IDENTIFY message. * * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG * message (2 bytes) with a tag ID that we increment with every command * until it wraps back to 0. * * XXX - it turns out that there are some broken SCSI-II devices, * which claim to support tagged queuing but fail when more than * some number of commands are issued at once. */ /* Wait for start of REQ/ACK handshake */ while (!(NCR5380_read(STATUS_REG) & SR_REQ)) ; dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n", HOSTNO, cmd->device->id); tmp[0] = IDENTIFY(1, cmd->device->lun); #ifdef SUPPORT_TAGS if (cmd->tag != TAG_NONE) { tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG; tmp[2] = cmd->tag; len = 3; } else len = 1; #else len = 1; cmd->tag = 0; #endif /* SUPPORT_TAGS */ /* Send message(s) */ data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO); /* XXX need to handle errors here */ hostdata->connected = cmd; #ifndef SUPPORT_TAGS hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); #endif #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif initialize_SCp(cmd); return 0; } /* * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using polled I/O * * Inputs : instance - instance of driver, *phase - pointer to * what phase is expected, *count - pointer to number of * bytes to transfer, **data - pointer to data pointer. * * Returns : -1 when different phase is entered without transferring * maximum number of bytes, 0 if all bytes are transferred or exit * is in same phase. * * Also, *phase, *count, *data are modified in place. * * XXX Note : handling for bus free may be useful. */ /* * Note : this code is not as quick as it could be, however it * IS 100% reliable, and for the actual data transfer where speed * counts, we will always do a pseudo DMA or DMA transfer. */ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { register unsigned char p = *phase, tmp; register int c = *count; register unsigned char *d = *data; /* * The NCR5380 chip will only drive the SCSI bus when the * phase specified in the appropriate bits of the TARGET COMMAND * REGISTER match the STATUS REGISTER */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); do { /* * Wait for assertion of REQ, after which the phase bits will be * valid */ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)) ; dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO); /* Check for phase mismatch */ if ((tmp & PHASE_MASK) != p) { dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO); NCR5380_dprint_phase(NDEBUG_PIO, instance); break; } /* Do actual transfer from SCSI bus to / from memory */ if (!(p & SR_IO)) NCR5380_write(OUTPUT_DATA_REG, *d); else *d = NCR5380_read(CURRENT_SCSI_DATA_REG); ++d; /* * The SCSI standard suggests that in MSGOUT phase, the initiator * should drop ATN on the last byte of the message phase * after REQ has been asserted for the handshake but before * the initiator raises ACK. */ if (!(p & SR_IO)) { if (!((p & SR_MSG) && c > 1)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ACK); } else { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN); NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); } } else { NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); } while (NCR5380_read(STATUS_REG) & SR_REQ) ; dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO); /* * We have several special cases to consider during REQ/ACK handshaking : * 1. We were in MSGOUT phase, and we are on the last byte of the * message. ATN must be dropped as ACK is dropped. * * 2. We are in a MSGIN phase, and we are on the last byte of the * message. We must exit with ACK asserted, so that the calling * code may raise ATN before dropping ACK to reject the message. * * 3. ACK and ATN are clear and the target may proceed as normal. */ if (!(p == PHASE_MSGIN && c == 1)) { if (p == PHASE_MSGOUT && c > 1) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); else NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); } } while (--c); dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c); *count = c; *data = d; tmp = NCR5380_read(STATUS_REG); /* The phase read from the bus is valid if either REQ is (already) * asserted or if ACK hasn't been released yet. The latter is the case if * we're in MSGIN and all wanted bytes have been received. */ if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0)) *phase = tmp & PHASE_MASK; else *phase = PHASE_UNKNOWN; if (!c || (*phase == p)) return 0; else return -1; } /* * Function : do_abort (Scsi_Host *host) * * Purpose : abort the currently established nexus. Should only be * called from a routine which can drop into a * * Returns : 0 on success, -1 on failure. */ static int do_abort(struct Scsi_Host *instance) { unsigned char tmp, *msgptr, phase; int len; /* Request message out phase */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); /* * Wait for the target to indicate a valid phase by asserting * REQ. Once this happens, we'll have either a MSGOUT phase * and can immediately send the ABORT message, or we'll have some * other phase and will have to source/sink data. * * We really don't care what value was on the bus or what value * the target sees, so we just handshake. */ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)) ; NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); if ((tmp & PHASE_MASK) != PHASE_MSGOUT) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); while (NCR5380_read(STATUS_REG) & SR_REQ) ; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); } tmp = ABORT; msgptr = &tmp; len = 1; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &msgptr); /* * If we got here, and the command completed successfully, * we're about to go into bus free state. */ return len ? -1 : 0; } #if defined(REAL_DMA) /* * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using either real * or pseudo DMA. * * Inputs : instance - instance of driver, *phase - pointer to * what phase is expected, *count - pointer to number of * bytes to transfer, **data - pointer to data pointer. * * Returns : -1 when different phase is entered without transferring * maximum number of bytes, 0 if all bytes or transferred or exit * is in same phase. * * Also, *phase, *count, *data are modified in place. * */ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { SETUP_HOSTDATA(instance); register int c = *count; register unsigned char p = *phase; unsigned long flags; #if defined(CONFIG_SUN3) /* sanity check */ if (!sun3_dma_setup_done) { pr_err("scsi%d: transfer_dma without setup!\n", instance->host_no); BUG(); } hostdata->dma_len = c; dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", instance->host_no, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", *data); /* netbsd turns off ints here, why not be safe and do it too */ local_irq_save(flags); /* send start chain */ sun3scsi_dma_start(c, *data); if (p & SR_IO) { NCR5380_write(TARGET_COMMAND_REG, 1); NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(INITIATOR_COMMAND_REG, 0); NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); } else { NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA); NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); NCR5380_write(START_DMA_SEND_REG, 0); } #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif local_irq_restore(flags); sun3_dma_active = 1; #else /* !defined(CONFIG_SUN3) */ register unsigned char *d = *data; unsigned char tmp; if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { *phase = tmp; return -1; } if (hostdata->read_overruns && (p & SR_IO)) c -= hostdata->read_overruns; dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", HOSTNO, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", d); NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); #ifdef REAL_DMA NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY); #endif /* def REAL_DMA */ if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) { /* On the Medusa, it is a must to initialize the DMA before * starting the NCR. This is also the cleaner way for the TT. */ local_irq_save(flags); hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); local_irq_restore(flags); } if (p & SR_IO) NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); else { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); NCR5380_write(START_DMA_SEND_REG, 0); } if (hostdata->flags & FLAG_LATE_DMA_SETUP) { /* On the Falcon, the DMA setup must be done after the last */ /* NCR access, else the DMA setup gets trashed! */ local_irq_save(flags); hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); local_irq_restore(flags); } #endif /* !defined(CONFIG_SUN3) */ return 0; } #endif /* defined(REAL_DMA) */ /* * Function : NCR5380_information_transfer (struct Scsi_Host *instance) * * Purpose : run through the various SCSI phases and do as the target * directs us to. Operates on the currently connected command, * instance->connected. * * Inputs : instance, instance for which we are doing commands * * Side effects : SCSI things happen, the disconnected queue will be * modified if a command disconnects, *instance->connected will * change. * * XXX Note : we need to watch for bus free or a reset condition here * to recover from an unexpected bus free condition. */ static void NCR5380_information_transfer(struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); unsigned long flags; unsigned char msgout = NOP; int sink = 0; int len; #if defined(REAL_DMA) int transfersize; #endif unsigned char *data; unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected; #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif while (1) { tmp = NCR5380_read(STATUS_REG); /* We only have a valid SCSI phase when REQ is asserted */ if (tmp & SR_REQ) { phase = (tmp & PHASE_MASK); if (phase != old_phase) { old_phase = phase; NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); } #if defined(CONFIG_SUN3) if (phase == PHASE_CMDOUT) { #if defined(REAL_DMA) void *d; unsigned long count; if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { count = cmd->SCp.buffer->length; d = sg_virt(cmd->SCp.buffer); } else { count = cmd->SCp.this_residual; d = cmd->SCp.ptr; } /* this command setup for dma yet? */ if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != cmd)) { if (cmd->request->cmd_type == REQ_TYPE_FS) { sun3scsi_dma_setup(d, count, rq_data_dir(cmd->request)); sun3_dma_setup_done = cmd; } } #endif #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif } #endif /* CONFIG_SUN3 */ if (sink && (phase != PHASE_MSGOUT)) { NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); while (NCR5380_read(STATUS_REG) & SR_REQ) ; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); sink = 0; continue; } switch (phase) { case PHASE_DATAOUT: #if (NDEBUG & NDEBUG_NO_DATAOUT) printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT " "aborted\n", HOSTNO); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; cmd->scsi_done(cmd); return; #endif case PHASE_DATAIN: /* * If there is no room left in the current buffer in the * scatter-gather list, move onto the next one. */ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { ++cmd->SCp.buffer; --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); /* ++roman: Try to merge some scatter-buffers if * they are at contiguous physical addresses. */ merge_contiguous_buffers(cmd); dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n", HOSTNO, cmd->SCp.this_residual, cmd->SCp.buffers_residual); } /* * The preferred transfer method is going to be * PSEUDO-DMA for systems that are strictly PIO, * since we can let the hardware do the handshaking. * * For this to work, we need to know the transfersize * ahead of time, since the pseudo-DMA code will sit * in an unconditional loop. */ /* ++roman: I suggest, this should be * #if def(REAL_DMA) * instead of leaving REAL_DMA out. */ #if defined(REAL_DMA) if ( #if !defined(CONFIG_SUN3) !cmd->device->borken && #endif (transfersize = NCR5380_dma_xfer_len(instance, cmd, phase)) >= DMA_MIN_SIZE) { len = transfersize; cmd->SCp.phase = phase; if (NCR5380_transfer_dma(instance, &phase, &len, (unsigned char **)&cmd->SCp.ptr)) { /* * If the watchdog timer fires, all future * accesses to this device will use the * polled-IO. */ scmd_printk(KERN_INFO, cmd, "switching to slow handshake\n"); cmd->device->borken = 1; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; cmd->scsi_done(cmd); /* XXX - need to source or sink data here, as appropriate */ } else { #ifdef REAL_DMA /* ++roman: When using real DMA, * information_transfer() should return after * starting DMA since it has nothing more to * do. */ return; #else cmd->SCp.this_residual -= transfersize - len; #endif } } else #endif /* defined(REAL_DMA) */ NCR5380_transfer_pio(instance, &phase, (int *)&cmd->SCp.this_residual, (unsigned char **)&cmd->SCp.ptr); #if defined(CONFIG_SUN3) && defined(REAL_DMA) /* if we had intended to dma that command clear it */ if (sun3_dma_setup_done == cmd) sun3_dma_setup_done = NULL; #endif break; case PHASE_MSGIN: len = 1; data = &tmp; NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */ NCR5380_transfer_pio(instance, &phase, &len, &data); cmd->SCp.Message = tmp; switch (tmp) { /* * Linking lets us reduce the time required to get the * next command out to the device, hopefully this will * mean we don't waste another revolution due to the delays * required by ARBITRATION and another SELECTION. * * In the current implementation proposal, low level drivers * merely have to start the next command, pointed to by * next_link, done() is called as with unlinked commands. */ #ifdef LINKED case LINKED_CMD_COMPLETE: case LINKED_FLG_CMD_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked command " "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * Sanity check : A linked command should only terminate * with one of these messages if there are more linked * commands available. */ if (!cmd->next_link) { printk(KERN_NOTICE "scsi%d: target %d lun %llu " "linked command complete, no next_link\n", HOSTNO, cmd->device->id, cmd->device->lun); sink = 1; do_abort(instance); return; } initialize_SCp(cmd->next_link); /* The next command is still part of this process; copy it * and don't free it! */ cmd->next_link->tag = cmd->tag; cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request " "done, calling scsi_done().\n", HOSTNO, cmd->device->id, cmd->device->lun); cmd->scsi_done(cmd); cmd = hostdata->connected; break; #endif /* def LINKED */ case ABORT: case COMMAND_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu " "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); local_irq_save(flags); hostdata->retain_dma_intr++; hostdata->connected = NULL; #ifdef SUPPORT_TAGS cmd_free_tag(cmd); if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { /* Turn a QUEUE FULL status into BUSY, I think the * mid level cannot handle QUEUE FULL :-( (The * command is retried after BUSY). Also update our * queue size to the number of currently issued * commands now. */ /* ++Andreas: the mid level code knows about QUEUE_FULL now. */ struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][cmd->device->lun]; dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned " "QUEUE_FULL after %d commands\n", HOSTNO, cmd->device->id, cmd->device->lun, ta->nr_allocated); if (ta->queue_size > ta->nr_allocated) ta->nr_allocated = ta->queue_size; } #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * I'm not sure what the correct thing to do here is : * * If the command that just executed is NOT a request * sense, the obvious thing to do is to set the result * code to the values of the stored parameters. * * If it was a REQUEST SENSE command, we need some way to * differentiate between the failure code of the original * and the failure code of the REQUEST sense - the obvious * case is success, where we fall through and leave the * result code unchanged. * * The non-obvious place is where the REQUEST SENSE failed */ if (cmd->cmnd[0] != REQUEST_SENSE) cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); else if (status_byte(cmd->SCp.Status) != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); if ((cmd->cmnd[0] == REQUEST_SENSE) && hostdata->ses.cmd_len) { scsi_eh_restore_cmnd(cmd, &hostdata->ses); hostdata->ses.cmd_len = 0 ; } if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO); LIST(cmd,hostdata->issue_queue); SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = (struct scsi_cmnd *) cmd; dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of " "issue queue\n", H_NO(cmd)); } else { cmd->scsi_done(cmd); } local_irq_restore(flags); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) barrier(); local_irq_save(flags); hostdata->retain_dma_intr--; /* ++roman: For Falcon SCSI, release the lock on the * ST-DMA here if no other commands are waiting on the * disconnected queue. */ maybe_release_dma_irq(instance); local_irq_restore(flags); return; case MESSAGE_REJECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); switch (hostdata->last_message) { case HEAD_OF_QUEUE_TAG: case ORDERED_QUEUE_TAG: case SIMPLE_QUEUE_TAG: /* The target obviously doesn't support tagged * queuing, even though it announced this ability in * its INQUIRY data ?!? (maybe only this LUN?) Ok, * clear 'tagged_supported' and lock the LUN, since * the command is treated as untagged further on. */ cmd->device->tagged_supported = 0; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); cmd->tag = TAG_NONE; dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu rejected " "QUEUE_TAG message; tagged queuing " "disabled\n", HOSTNO, cmd->device->id, cmd->device->lun); break; } break; case DISCONNECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); local_irq_save(flags); cmd->device->disconnect = 1; LIST(cmd,hostdata->disconnected_queue); SET_NEXT(cmd, hostdata->disconnected_queue); hostdata->connected = NULL; hostdata->disconnected_queue = cmd; local_irq_restore(flags); dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %llu was " "moved from connected to the " "disconnected_queue\n", HOSTNO, cmd->device->id, cmd->device->lun); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* Wait for bus free to avoid nasty timeouts */ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) barrier(); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif return; /* * The SCSI data pointer is *IMPLICITLY* saved on a disconnect * operation, in violation of the SCSI spec so we can safely * ignore SAVE/RESTORE pointers calls. * * Unfortunately, some disks violate the SCSI spec and * don't issue the required SAVE_POINTERS message before * disconnecting, and we have to break spec to remain * compatible. */ case SAVE_POINTERS: case RESTORE_POINTERS: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); break; case EXTENDED_MESSAGE: /* * Extended messages are sent in the following format : * Byte * 0 EXTENDED_MESSAGE == 1 * 1 length (includes one byte for code, doesn't * include first two bytes) * 2 code * 3..length+1 arguments * * Start the extended message buffer with the EXTENDED_MESSAGE * byte, since spi_print_msg() wants the whole thing. */ extended_msg[0] = EXTENDED_MESSAGE; /* Accept first byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO); len = 2; data = extended_msg + 1; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO, (int)extended_msg[1], (int)extended_msg[2]); if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) { /* Accept third byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); len = extended_msg[1] - 1; data = extended_msg + 3; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n", HOSTNO, len); switch (extended_msg[2]) { case EXTENDED_SDTR: case EXTENDED_WDTR: case EXTENDED_MODIFY_DATA_POINTER: case EXTENDED_EXTENDED_IDENTIFY: tmp = 0; } } else if (len) { printk(KERN_NOTICE "scsi%d: error receiving " "extended message\n", HOSTNO); tmp = 0; } else { printk(KERN_NOTICE "scsi%d: extended message " "code %02x length %d is too long\n", HOSTNO, extended_msg[2], extended_msg[1]); tmp = 0; } /* Fall through to reject message */ /* * If we get something weird that we aren't expecting, * reject it. */ default: if (!tmp) { printk(KERN_INFO "scsi%d: rejecting message ", instance->host_no); spi_print_msg(extended_msg); printk("\n"); } else if (tmp != EXTENDED_MESSAGE) scmd_printk(KERN_INFO, cmd, "rejecting unknown message %02x\n", tmp); else scmd_printk(KERN_INFO, cmd, "rejecting unknown extended message code %02x, length %d\n", extended_msg[1], extended_msg[0]); msgout = MESSAGE_REJECT; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); break; } /* switch (tmp) */ break; case PHASE_MSGOUT: len = 1; data = &msgout; hostdata->last_message = msgout; NCR5380_transfer_pio(instance, &phase, &len, &data); if (msgout == ABORT) { local_irq_save(flags); #ifdef SUPPORT_TAGS cmd_free_tag(cmd); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif hostdata->connected = NULL; cmd->result = DID_ERROR << 16; NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); maybe_release_dma_irq(instance); local_irq_restore(flags); cmd->scsi_done(cmd); return; } msgout = NOP; break; case PHASE_CMDOUT: len = cmd->cmd_len; data = cmd->cmnd; /* * XXX for performance reasons, on machines with a * PSEUDO-DMA architecture we should probably * use the dma transfer function. */ NCR5380_transfer_pio(instance, &phase, &len, &data); break; case PHASE_STATIN: len = 1; data = &tmp; NCR5380_transfer_pio(instance, &phase, &len, &data); cmd->SCp.Status = tmp; break; default: printk("scsi%d: unknown phase\n", HOSTNO); NCR5380_dprint(NDEBUG_ANY, instance); } /* switch(phase) */ } /* if (tmp * SR_REQ) */ } /* while (1) */ } /* * Function : void NCR5380_reselect (struct Scsi_Host *instance) * * Purpose : does reselection, initializing the instance->connected * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q * nexus has been reestablished, * * Inputs : instance - this instance of the NCR5380. * */ /* it might eventually prove necessary to do a dma setup on reselection, but it doesn't seem to be needed now -- sam */ static void NCR5380_reselect(struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); unsigned char target_mask; unsigned char lun; #ifdef SUPPORT_TAGS unsigned char tag; #endif unsigned char msg[3]; int __maybe_unused len; unsigned char __maybe_unused *data, __maybe_unused phase; struct scsi_cmnd *tmp = NULL, *prev; /* * Disable arbitration, etc. since the host adapter obviously * lost, and tell an interrupted NCR5380_select() to restart. */ NCR5380_write(MODE_REG, MR_BASE); hostdata->restart_select = 1; target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO); /* * At this point, we have detected that our SCSI ID is on the bus, * SEL is true and BSY was false for at least one bus settle delay * (400 ns). * * We must assert BSY ourselves, until the target drops the SEL * signal. */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); while (NCR5380_read(STATUS_REG) & SR_SEL) ; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* * Wait for target to go into MSGIN. */ while (!(NCR5380_read(STATUS_REG) & SR_REQ)) ; #if defined(CONFIG_SUN3) && defined(REAL_DMA) /* acknowledge toggle to MSGIN */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); /* peek at the byte without really hitting the bus */ msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); #else len = 1; data = msg; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); #endif if (!(msg[0] & 0x80)) { printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO); spi_print_msg(msg); do_abort(instance); return; } lun = (msg[0] & 0x07); #if defined(SUPPORT_TAGS) && !defined(CONFIG_SUN3) /* If the phase is still MSGIN, the target wants to send some more * messages. In case it supports tagged queuing, this is probably a * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. */ tag = TAG_NONE; if (phase == PHASE_MSGIN && (hostdata->flags & FLAG_TAGGED_QUEUING)) { /* Accept previous IDENTIFY message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); len = 2; data = msg + 1; if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && msg[1] == SIMPLE_QUEUE_TAG) tag = msg[2]; dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at " "reselection\n", HOSTNO, target_mask, lun, tag); } #endif /* * Find the command corresponding to the I_T_L or I_T_L_Q nexus we * just reestablished, and remove it from the disconnected queue. */ for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) { if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) #ifdef SUPPORT_TAGS && (tag == tmp->tag) #endif ) { if (prev) { REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); SET_NEXT(prev, NEXT(tmp)); } else { REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp)); hostdata->disconnected_queue = NEXT(tmp); } SET_NEXT(tmp, NULL); break; } } if (!tmp) { printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d " #ifdef SUPPORT_TAGS "tag %d " #endif "not in disconnected_queue.\n", HOSTNO, target_mask, lun #ifdef SUPPORT_TAGS , tag #endif ); /* * Since we have an established nexus that we can't do anything * with, we must abort it. */ do_abort(instance); return; } #if defined(CONFIG_SUN3) && defined(REAL_DMA) /* engage dma setup for the command we just saw */ { void *d; unsigned long count; if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { count = tmp->SCp.buffer->length; d = sg_virt(tmp->SCp.buffer); } else { count = tmp->SCp.this_residual; d = tmp->SCp.ptr; } /* setup this command for dma if not already */ if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != tmp)) { sun3scsi_dma_setup(d, count, rq_data_dir(tmp->request)); sun3_dma_setup_done = tmp; } } NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); #endif /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); #if defined(SUPPORT_TAGS) && defined(CONFIG_SUN3) /* If the phase is still MSGIN, the target wants to send some more * messages. In case it supports tagged queuing, this is probably a * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. */ tag = TAG_NONE; if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { /* Accept previous IDENTIFY message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); len = 2; data = msg + 1; if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && msg[1] == SIMPLE_QUEUE_TAG) tag = msg[2]; dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at reselection\n" HOSTNO, target_mask, lun, tag); } #endif hostdata->connected = tmp; dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n", HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); } /* * Function : int NCR5380_abort (struct scsi_cmnd *cmd) * * Purpose : abort a command * * Inputs : cmd - the scsi_cmnd to abort, code - code to set the * host byte of the result field to, if zero DID_ABORTED is * used. * * Returns : SUCCESS - success, FAILED on failure. * * XXX - there is no way to abort the command that is currently * connected, you have to wait for it to complete. If this is * a problem, we could implement longjmp() / setjmp(), setjmp() * called where the loop started in NCR5380_main(). */ static int NCR5380_abort(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; SETUP_HOSTDATA(instance); struct scsi_cmnd *tmp, **prev; unsigned long flags; scmd_printk(KERN_NOTICE, cmd, "aborting command\n"); NCR5380_print_status(instance); local_irq_save(flags); dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); #if 1 /* * Case 1 : If the command is the currently executing command, * we'll set the aborted flag and return control so that * information transfer routine can exit cleanly. */ if (hostdata->connected == cmd) { dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO); /* * We should perform BSY checking, and make sure we haven't slipped * into BUS FREE. */ /* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */ /* * Since we can't change phases until we've completed the current * handshake, we have to source or sink a byte of data if the current * phase is not MSGOUT. */ /* * Return control to the executing NCR drive so we can clear the * aborted flag and get back into our main loop. */ if (do_abort(instance) == 0) { hostdata->aborted = 1; hostdata->connected = NULL; cmd->result = DID_ABORT << 16; #ifdef SUPPORT_TAGS cmd_free_tag(cmd); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif maybe_release_dma_irq(instance); local_irq_restore(flags); cmd->scsi_done(cmd); return SUCCESS; } else { local_irq_restore(flags); printk("scsi%d: abort of connected command failed!\n", HOSTNO); return FAILED; } } #endif /* * Case 2 : If the command hasn't been issued yet, we simply remove it * from the issue queue. */ for (prev = (struct scsi_cmnd **)&(hostdata->issue_queue), tmp = (struct scsi_cmnd *)hostdata->issue_queue; tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); (*prev) = NEXT(tmp); SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; maybe_release_dma_irq(instance); local_irq_restore(flags); dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n", HOSTNO); /* Tagged queuing note: no tag to free here, hasn't been assigned * yet... */ tmp->scsi_done(tmp); return SUCCESS; } } /* * Case 3 : If any commands are connected, we're going to fail the abort * and let the high level SCSI driver retry at a later time or * issue a reset. * * Timeouts, and therefore aborted commands, will be highly unlikely * and handling them cleanly in this situation would make the common * case of noresets less efficient, and would pollute our code. So, * we fail. */ if (hostdata->connected) { local_irq_restore(flags); dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO); return FAILED; } /* * Case 4: If the command is currently disconnected from the bus, and * there are no connected commands, we reconnect the I_T_L or * I_T_L_Q nexus associated with it, go into message out, and send * an abort message. * * This case is especially ugly. In order to reestablish the nexus, we * need to call NCR5380_select(). The easiest way to implement this * function was to abort if the bus was busy, and let the interrupt * handler triggered on the SEL for reselect take care of lost arbitrations * where necessary, meaning interrupts need to be enabled. * * When interrupts are enabled, the queues may change - so we * can't remove it from the disconnected queue before selecting it * because that could cause a failure in hashing the nexus if that * device reselected. * * Since the queues may change, we can't use the pointers from when we * first locate it. * * So, we must first locate the command, and if NCR5380_select() * succeeds, then issue the abort, relocate the command and remove * it from the disconnected queue. */ for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; tmp = NEXT(tmp)) { if (cmd == tmp) { local_irq_restore(flags); dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO); if (NCR5380_select(instance, cmd)) return FAILED; dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO); do_abort(instance); local_irq_save(flags); for (prev = (struct scsi_cmnd **)&(hostdata->disconnected_queue), tmp = (struct scsi_cmnd *)hostdata->disconnected_queue; tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); *prev = NEXT(tmp); SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; /* We must unlock the tag/LUN immediately here, since the * target goes to BUS FREE and doesn't send us another * message (COMMAND_COMPLETE or the like) */ #ifdef SUPPORT_TAGS cmd_free_tag(tmp); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif maybe_release_dma_irq(instance); local_irq_restore(flags); tmp->scsi_done(tmp); return SUCCESS; } } } } /* Maybe it is sufficient just to release the ST-DMA lock... (if * possible at all) At least, we should check if the lock could be * released after the abort, in case it is kept due to some bug. */ maybe_release_dma_irq(instance); local_irq_restore(flags); /* * Case 5 : If we reached this point, the command was not found in any of * the queues. * * We probably reached this point because of an unlikely race condition * between the command completing successfully and the abortion code, * so we won't panic, but we will notify the user in case something really * broke. */ printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); return FAILED; } /* * Function : int NCR5380_reset (struct scsi_cmnd *cmd) * * Purpose : reset the SCSI bus. * * Returns : SUCCESS or FAILURE * */ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; struct NCR5380_hostdata *hostdata = shost_priv(instance); int i; unsigned long flags; NCR5380_print_status(instance); /* get in phase */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG))); /* assert RST */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); udelay(40); /* reset NCR registers */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_write(SELECT_ENABLE_REG, 0); /* ++roman: reset interrupt condition! otherwise no interrupts don't get * through anymore ... */ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); /* After the reset, there are no more connected or disconnected commands * and no busy units; so clear the low-level status here to avoid * conflicts when the mid-level code tries to wake up the affected * commands! */ if (hostdata->issue_queue) dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); if (hostdata->connected) dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); if (hostdata->disconnected_queue) dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); local_irq_save(flags); hostdata->issue_queue = NULL; hostdata->connected = NULL; hostdata->disconnected_queue = NULL; #ifdef SUPPORT_TAGS free_all_tags(hostdata); #endif for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; #ifdef REAL_DMA hostdata->dma_len = 0; #endif maybe_release_dma_irq(instance); local_irq_restore(flags); return SUCCESS; }
atalax/linux
drivers/scsi/atari_NCR5380.c
C
gpl-2.0
91,108
/* * linux/fs/nfs/write.c * * Write file data over NFS. * * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/writeback.h> #include <linux/swap.h> #include <linux/migrate.h> #include <linux/sunrpc/clnt.h> #include <linux/nfs_fs.h> #include <linux/nfs_mount.h> #include <linux/nfs_page.h> #include <linux/backing-dev.h> #include <linux/export.h> #include <asm/uaccess.h> #include "delegation.h" #include "internal.h" #include "iostat.h" #include "nfs4_fs.h" #include "fscache.h" #include "pnfs.h" #define NFSDBG_FACILITY NFSDBG_PAGECACHE #define MIN_POOL_WRITE (32) #define MIN_POOL_COMMIT (4) /* * Local function declarations */ static void nfs_redirty_request(struct nfs_page *req); static const struct rpc_call_ops nfs_write_common_ops; static const struct rpc_call_ops nfs_commit_ops; static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; static const struct nfs_commit_completion_ops nfs_commit_completion_ops; static struct kmem_cache *nfs_wdata_cachep; static mempool_t *nfs_wdata_mempool; static struct kmem_cache *nfs_cdata_cachep; static mempool_t *nfs_commit_mempool; struct nfs_commit_data *nfs_commitdata_alloc(void) { struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); if (p) { memset(p, 0, sizeof(*p)); INIT_LIST_HEAD(&p->pages); } return p; } EXPORT_SYMBOL_GPL(nfs_commitdata_alloc); void nfs_commit_free(struct nfs_commit_data *p) { mempool_free(p, nfs_commit_mempool); } EXPORT_SYMBOL_GPL(nfs_commit_free); struct nfs_write_header *nfs_writehdr_alloc(void) { struct nfs_write_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO); if (p) { struct nfs_pgio_header *hdr = &p->header; memset(p, 0, sizeof(*p)); INIT_LIST_HEAD(&hdr->pages); INIT_LIST_HEAD(&hdr->rpc_list); spin_lock_init(&hdr->lock); atomic_set(&hdr->refcnt, 0); hdr->verf = &p->verf; } return p; } EXPORT_SYMBOL_GPL(nfs_writehdr_alloc); static struct nfs_write_data *nfs_writedata_alloc(struct nfs_pgio_header *hdr, unsigned int pagecount) { struct nfs_write_data *data, *prealloc; prealloc = &container_of(hdr, struct nfs_write_header, header)->rpc_data; if (prealloc->header == NULL) data = prealloc; else data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) goto out; if (nfs_pgarray_set(&data->pages, pagecount)) { data->header = hdr; atomic_inc(&hdr->refcnt); } else { if (data != prealloc) kfree(data); data = NULL; } out: return data; } void nfs_writehdr_free(struct nfs_pgio_header *hdr) { struct nfs_write_header *whdr = container_of(hdr, struct nfs_write_header, header); mempool_free(whdr, nfs_wdata_mempool); } EXPORT_SYMBOL_GPL(nfs_writehdr_free); void nfs_writedata_release(struct nfs_write_data *wdata) { struct nfs_pgio_header *hdr = wdata->header; struct nfs_write_header *write_header = container_of(hdr, struct nfs_write_header, header); put_nfs_open_context(wdata->args.context); if (wdata->pages.pagevec != wdata->pages.page_array) kfree(wdata->pages.pagevec); if (wdata == &write_header->rpc_data) { wdata->header = NULL; wdata = NULL; } if (atomic_dec_and_test(&hdr->refcnt)) hdr->completion_ops->completion(hdr); /* Note: we only free the rpc_task after callbacks are done. * See the comment in rpc_free_task() for why */ kfree(wdata); } EXPORT_SYMBOL_GPL(nfs_writedata_release); static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) { ctx->error = error; smp_wmb(); set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); } static struct nfs_page * nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page) { struct nfs_page *req = NULL; if (PagePrivate(page)) req = (struct nfs_page *)page_private(page); else if (unlikely(PageSwapCache(page))) { struct nfs_page *freq, *t; /* Linearly search the commit list for the correct req */ list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) { if (freq->wb_page == page) { req = freq; break; } } } if (req) kref_get(&req->wb_kref); return req; } static struct nfs_page *nfs_page_find_request(struct page *page) { struct inode *inode = page_file_mapping(page)->host; struct nfs_page *req = NULL; spin_lock(&inode->i_lock); req = nfs_page_find_request_locked(NFS_I(inode), page); spin_unlock(&inode->i_lock); return req; } /* Adjust the file length if we're writing beyond the end */ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) { struct inode *inode = page_file_mapping(page)->host; loff_t end, i_size; pgoff_t end_index; spin_lock(&inode->i_lock); i_size = i_size_read(inode); end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; if (i_size > 0 && page_file_index(page) < end_index) goto out; end = page_file_offset(page) + ((loff_t)offset+count); if (i_size >= end) goto out; i_size_write(inode, end); nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); out: spin_unlock(&inode->i_lock); } /* A writeback failed: mark the page as bad, and invalidate the page cache */ static void nfs_set_pageerror(struct page *page) { nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page)); } /* We can set the PG_uptodate flag if we see that a write request * covers the full page. */ static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count) { if (PageUptodate(page)) return; if (base != 0) return; if (count != nfs_page_length(page)) return; SetPageUptodate(page); } static int wb_priority(struct writeback_control *wbc) { if (wbc->for_reclaim) return FLUSH_HIGHPRI | FLUSH_STABLE; if (wbc->for_kupdate || wbc->for_background) return FLUSH_LOWPRI | FLUSH_COND_STABLE; return FLUSH_COND_STABLE; } /* * NFS congestion control */ int nfs_congestion_kb; #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) #define NFS_CONGESTION_OFF_THRESH \ (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) static void nfs_set_page_writeback(struct page *page) { struct nfs_server *nfss = NFS_SERVER(page_file_mapping(page)->host); int ret = test_set_page_writeback(page); WARN_ON_ONCE(ret != 0); if (atomic_long_inc_return(&nfss->writeback) > NFS_CONGESTION_ON_THRESH) { set_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); } } static void nfs_end_page_writeback(struct page *page) { struct inode *inode = page_file_mapping(page)->host; struct nfs_server *nfss = NFS_SERVER(inode); end_page_writeback(page); if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); } static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock) { struct inode *inode = page_file_mapping(page)->host; struct nfs_page *req; int ret; spin_lock(&inode->i_lock); for (;;) { req = nfs_page_find_request_locked(NFS_I(inode), page); if (req == NULL) break; if (nfs_lock_request(req)) break; /* Note: If we hold the page lock, as is the case in nfs_writepage, * then the call to nfs_lock_request() will always * succeed provided that someone hasn't already marked the * request as dirty (in which case we don't care). */ spin_unlock(&inode->i_lock); if (!nonblock) ret = nfs_wait_on_request(req); else ret = -EAGAIN; nfs_release_request(req); if (ret != 0) return ERR_PTR(ret); spin_lock(&inode->i_lock); } spin_unlock(&inode->i_lock); return req; } /* * Find an associated nfs write request, and prepare to flush it out * May return an error if the user signalled nfs_wait_on_request(). */ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, struct page *page, bool nonblock) { struct nfs_page *req; int ret = 0; req = nfs_find_and_lock_request(page, nonblock); if (!req) goto out; ret = PTR_ERR(req); if (IS_ERR(req)) goto out; nfs_set_page_writeback(page); WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); ret = 0; if (!nfs_pageio_add_request(pgio, req)) { nfs_redirty_request(req); ret = pgio->pg_error; } out: return ret; } static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) { struct inode *inode = page_file_mapping(page)->host; int ret; nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); nfs_pageio_cond_complete(pgio, page_file_index(page)); ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); if (ret == -EAGAIN) { redirty_page_for_writepage(wbc, page); ret = 0; } return ret; } /* * Write an mmapped page to the server. */ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) { struct nfs_pageio_descriptor pgio; int err; NFS_PROTO(page_file_mapping(page)->host)->write_pageio_init(&pgio, page->mapping->host, wb_priority(wbc), &nfs_async_write_completion_ops); err = nfs_do_writepage(page, wbc, &pgio); nfs_pageio_complete(&pgio); if (err < 0) return err; if (pgio.pg_error < 0) return pgio.pg_error; return 0; } int nfs_writepage(struct page *page, struct writeback_control *wbc) { int ret; ret = nfs_writepage_locked(page, wbc); unlock_page(page); return ret; } static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data) { int ret; ret = nfs_do_writepage(page, wbc, data); unlock_page(page); return ret; } int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; unsigned long *bitlock = &NFS_I(inode)->flags; struct nfs_pageio_descriptor pgio; int err; /* Stop dirtying of new pages while we sync */ err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING, nfs_wait_bit_killable, TASK_KILLABLE); if (err) goto out_err; nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); NFS_PROTO(inode)->write_pageio_init(&pgio, inode, wb_priority(wbc), &nfs_async_write_completion_ops); err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); nfs_pageio_complete(&pgio); clear_bit_unlock(NFS_INO_FLUSHING, bitlock); smp_mb__after_clear_bit(); wake_up_bit(bitlock, NFS_INO_FLUSHING); if (err < 0) goto out_err; err = pgio.pg_error; if (err < 0) goto out_err; return 0; out_err: return err; } /* * Insert a write request into an inode */ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) { struct nfs_inode *nfsi = NFS_I(inode); /* Lock the request! */ nfs_lock_request(req); spin_lock(&inode->i_lock); if (!nfsi->npages && NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) inode->i_version++; /* * Swap-space should not get truncated. Hence no need to plug the race * with invalidate/truncate. */ if (likely(!PageSwapCache(req->wb_page))) { set_bit(PG_MAPPED, &req->wb_flags); SetPagePrivate(req->wb_page); set_page_private(req->wb_page, (unsigned long)req); } nfsi->npages++; kref_get(&req->wb_kref); spin_unlock(&inode->i_lock); } /* * Remove a write request from an inode */ static void nfs_inode_remove_request(struct nfs_page *req) { struct inode *inode = req->wb_context->dentry->d_inode; struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); if (likely(!PageSwapCache(req->wb_page))) { set_page_private(req->wb_page, 0); ClearPagePrivate(req->wb_page); clear_bit(PG_MAPPED, &req->wb_flags); } nfsi->npages--; spin_unlock(&inode->i_lock); nfs_release_request(req); } static void nfs_mark_request_dirty(struct nfs_page *req) { __set_page_dirty_nobuffers(req->wb_page); } #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) /** * nfs_request_add_commit_list - add request to a commit list * @req: pointer to a struct nfs_page * @dst: commit list head * @cinfo: holds list lock and accounting info * * This sets the PG_CLEAN bit, updates the cinfo count of * number of outstanding requests requiring a commit as well as * the MM page stats. * * The caller must _not_ hold the cinfo->lock, but must be * holding the nfs_page lock. */ void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst, struct nfs_commit_info *cinfo) { set_bit(PG_CLEAN, &(req)->wb_flags); spin_lock(cinfo->lock); nfs_list_add_request(req, dst); cinfo->mds->ncommit++; spin_unlock(cinfo->lock); if (!cinfo->dreq) { inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, BDI_RECLAIMABLE); __mark_inode_dirty(req->wb_context->dentry->d_inode, I_DIRTY_DATASYNC); } } EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); /** * nfs_request_remove_commit_list - Remove request from a commit list * @req: pointer to a nfs_page * @cinfo: holds list lock and accounting info * * This clears the PG_CLEAN bit, and updates the cinfo's count of * number of outstanding requests requiring a commit * It does not update the MM page stats. * * The caller _must_ hold the cinfo->lock and the nfs_page lock. */ void nfs_request_remove_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo) { if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) return; nfs_list_remove_request(req); cinfo->mds->ncommit--; } EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, struct inode *inode) { cinfo->lock = &inode->i_lock; cinfo->mds = &NFS_I(inode)->commit_info; cinfo->ds = pnfs_get_ds_info(inode); cinfo->dreq = NULL; cinfo->completion_ops = &nfs_commit_completion_ops; } void nfs_init_cinfo(struct nfs_commit_info *cinfo, struct inode *inode, struct nfs_direct_req *dreq) { if (dreq) nfs_init_cinfo_from_dreq(cinfo, dreq); else nfs_init_cinfo_from_inode(cinfo, inode); } EXPORT_SYMBOL_GPL(nfs_init_cinfo); /* * Add a request to the inode's commit list. */ void nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, struct nfs_commit_info *cinfo) { if (pnfs_mark_request_commit(req, lseg, cinfo)) return; nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo); } static void nfs_clear_page_commit(struct page *page) { dec_zone_page_state(page, NR_UNSTABLE_NFS); dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE); } static void nfs_clear_request_commit(struct nfs_page *req) { if (test_bit(PG_CLEAN, &req->wb_flags)) { struct inode *inode = req->wb_context->dentry->d_inode; struct nfs_commit_info cinfo; nfs_init_cinfo_from_inode(&cinfo, inode); if (!pnfs_clear_request_commit(req, &cinfo)) { spin_lock(cinfo.lock); nfs_request_remove_commit_list(req, &cinfo); spin_unlock(cinfo.lock); } nfs_clear_page_commit(req->wb_page); } } static inline int nfs_write_need_commit(struct nfs_write_data *data) { if (data->verf.committed == NFS_DATA_SYNC) return data->header->lseg == NULL; return data->verf.committed != NFS_FILE_SYNC; } #else static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, struct inode *inode) { } void nfs_init_cinfo(struct nfs_commit_info *cinfo, struct inode *inode, struct nfs_direct_req *dreq) { } void nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, struct nfs_commit_info *cinfo) { } static void nfs_clear_request_commit(struct nfs_page *req) { } static inline int nfs_write_need_commit(struct nfs_write_data *data) { return 0; } #endif static void nfs_write_completion(struct nfs_pgio_header *hdr) { struct nfs_commit_info cinfo; unsigned long bytes = 0; if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) goto out; nfs_init_cinfo_from_inode(&cinfo, hdr->inode); while (!list_empty(&hdr->pages)) { struct nfs_page *req = nfs_list_entry(hdr->pages.next); bytes += req->wb_bytes; nfs_list_remove_request(req); if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes < bytes)) { nfs_set_pageerror(req->wb_page); nfs_context_set_write_error(req->wb_context, hdr->error); goto remove_req; } if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) { nfs_mark_request_dirty(req); goto next; } if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) { memcpy(&req->wb_verf, &hdr->verf->verifier, sizeof(req->wb_verf)); nfs_mark_request_commit(req, hdr->lseg, &cinfo); goto next; } remove_req: nfs_inode_remove_request(req); next: nfs_unlock_request(req); nfs_end_page_writeback(req->wb_page); nfs_release_request(req); } out: hdr->release(hdr); } #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) static unsigned long nfs_reqs_to_commit(struct nfs_commit_info *cinfo) { return cinfo->mds->ncommit; } /* cinfo->lock held by caller */ int nfs_scan_commit_list(struct list_head *src, struct list_head *dst, struct nfs_commit_info *cinfo, int max) { struct nfs_page *req, *tmp; int ret = 0; list_for_each_entry_safe(req, tmp, src, wb_list) { if (!nfs_lock_request(req)) continue; kref_get(&req->wb_kref); if (cond_resched_lock(cinfo->lock)) list_safe_reset_next(req, tmp, wb_list); nfs_request_remove_commit_list(req, cinfo); nfs_list_add_request(req, dst); ret++; if ((ret == max) && !cinfo->dreq) break; } return ret; } /* * nfs_scan_commit - Scan an inode for commit requests * @inode: NFS inode to scan * @dst: mds destination list * @cinfo: mds and ds lists of reqs ready to commit * * Moves requests from the inode's 'commit' request list. * The requests are *not* checked to ensure that they form a contiguous set. */ int nfs_scan_commit(struct inode *inode, struct list_head *dst, struct nfs_commit_info *cinfo) { int ret = 0; spin_lock(cinfo->lock); if (cinfo->mds->ncommit > 0) { const int max = INT_MAX; ret = nfs_scan_commit_list(&cinfo->mds->list, dst, cinfo, max); ret += pnfs_scan_commit_lists(inode, cinfo, max - ret); } spin_unlock(cinfo->lock); return ret; } #else static unsigned long nfs_reqs_to_commit(struct nfs_commit_info *cinfo) { return 0; } int nfs_scan_commit(struct inode *inode, struct list_head *dst, struct nfs_commit_info *cinfo) { return 0; } #endif /* * Search for an existing write request, and attempt to update * it to reflect a new dirty region on a given page. * * If the attempt fails, then the existing request is flushed out * to disk. */ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, struct page *page, unsigned int offset, unsigned int bytes) { struct nfs_page *req; unsigned int rqend; unsigned int end; int error; if (!PagePrivate(page)) return NULL; end = offset + bytes; spin_lock(&inode->i_lock); for (;;) { req = nfs_page_find_request_locked(NFS_I(inode), page); if (req == NULL) goto out_unlock; rqend = req->wb_offset + req->wb_bytes; /* * Tell the caller to flush out the request if * the offsets are non-contiguous. * Note: nfs_flush_incompatible() will already * have flushed out requests having wrong owners. */ if (offset > rqend || end < req->wb_offset) goto out_flushme; if (nfs_lock_request(req)) break; /* The request is locked, so wait and then retry */ spin_unlock(&inode->i_lock); error = nfs_wait_on_request(req); nfs_release_request(req); if (error != 0) goto out_err; spin_lock(&inode->i_lock); } /* Okay, the request matches. Update the region */ if (offset < req->wb_offset) { req->wb_offset = offset; req->wb_pgbase = offset; } if (end > rqend) req->wb_bytes = end - req->wb_offset; else req->wb_bytes = rqend - req->wb_offset; out_unlock: spin_unlock(&inode->i_lock); if (req) nfs_clear_request_commit(req); return req; out_flushme: spin_unlock(&inode->i_lock); nfs_release_request(req); error = nfs_wb_page(inode, page); out_err: return ERR_PTR(error); } /* * Try to update an existing write request, or create one if there is none. * * Note: Should always be called with the Page Lock held to prevent races * if we have to add a new request. Also assumes that the caller has * already called nfs_flush_incompatible() if necessary. */ static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, struct page *page, unsigned int offset, unsigned int bytes) { struct inode *inode = page_file_mapping(page)->host; struct nfs_page *req; req = nfs_try_to_update_request(inode, page, offset, bytes); if (req != NULL) goto out; req = nfs_create_request(ctx, inode, page, offset, bytes); if (IS_ERR(req)) goto out; nfs_inode_add_request(inode, req); out: return req; } static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, unsigned int offset, unsigned int count) { struct nfs_page *req; req = nfs_setup_write_request(ctx, page, offset, count); if (IS_ERR(req)) return PTR_ERR(req); /* Update file length */ nfs_grow_file(page, offset, count); nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); nfs_mark_request_dirty(req); nfs_unlock_and_release_request(req); return 0; } int nfs_flush_incompatible(struct file *file, struct page *page) { struct nfs_open_context *ctx = nfs_file_open_context(file); struct nfs_lock_context *l_ctx; struct nfs_page *req; int do_flush, status; /* * Look for a request corresponding to this page. If there * is one, and it belongs to another file, we flush it out * before we try to copy anything into the page. Do this * due to the lack of an ACCESS-type call in NFSv2. * Also do the same if we find a request from an existing * dropped page. */ do { req = nfs_page_find_request(page); if (req == NULL) return 0; l_ctx = req->wb_lock_context; do_flush = req->wb_page != page || req->wb_context != ctx; if (l_ctx) { do_flush |= l_ctx->lockowner.l_owner != current->files || l_ctx->lockowner.l_pid != current->tgid; } nfs_release_request(req); if (!do_flush) return 0; status = nfs_wb_page(page_file_mapping(page)->host, page); } while (status == 0); return status; } /* * If the page cache is marked as unsafe or invalid, then we can't rely on * the PageUptodate() flag. In this case, we will need to turn off * write optimisations that depend on the page contents being correct. */ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode) { if (nfs_have_delegated_attributes(inode)) goto out; if (NFS_I(inode)->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE)) return false; out: return PageUptodate(page) != 0; } /* * Update and possibly write a cached page of an NFS file. * * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad * things with a page scheduled for an RPC call (e.g. invalidate it). */ int nfs_updatepage(struct file *file, struct page *page, unsigned int offset, unsigned int count) { struct nfs_open_context *ctx = nfs_file_open_context(file); struct inode *inode = page_file_mapping(page)->host; int status = 0; nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); dprintk("NFS: nfs_updatepage(%s/%s %d@%lld)\n", file->f_path.dentry->d_parent->d_name.name, file->f_path.dentry->d_name.name, count, (long long)(page_file_offset(page) + offset)); /* If we're not using byte range locks, and we know the page * is up to date, it may be more efficient to extend the write * to cover the entire page in order to avoid fragmentation * inefficiencies. */ if (nfs_write_pageuptodate(page, inode) && inode->i_flock == NULL && !(file->f_flags & O_DSYNC)) { count = max(count + offset, nfs_page_length(page)); offset = 0; } status = nfs_writepage_setup(ctx, page, offset, count); if (status < 0) nfs_set_pageerror(page); else __set_page_dirty_nobuffers(page); dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", status, (long long)i_size_read(inode)); return status; } static int flush_task_priority(int how) { switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { case FLUSH_HIGHPRI: return RPC_PRIORITY_HIGH; case FLUSH_LOWPRI: return RPC_PRIORITY_LOW; } return RPC_PRIORITY_NORMAL; } int nfs_initiate_write(struct rpc_clnt *clnt, struct nfs_write_data *data, const struct rpc_call_ops *call_ops, int how, int flags) { struct inode *inode = data->header->inode; int priority = flush_task_priority(how); struct rpc_task *task; struct rpc_message msg = { .rpc_argp = &data->args, .rpc_resp = &data->res, .rpc_cred = data->header->cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .task = &data->task, .rpc_message = &msg, .callback_ops = call_ops, .callback_data = data, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC | flags, .priority = priority, }; int ret = 0; /* Set up the initial task struct. */ NFS_PROTO(inode)->write_setup(data, &msg); dprintk("NFS: %5u initiated write call " "(req %s/%lld, %u bytes @ offset %llu)\n", data->task.tk_pid, inode->i_sb->s_id, (long long)NFS_FILEID(inode), data->args.count, (unsigned long long)data->args.offset); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) { ret = PTR_ERR(task); goto out; } if (how & FLUSH_SYNC) { ret = rpc_wait_for_completion_task(task); if (ret == 0) ret = task->tk_status; } rpc_put_task(task); out: return ret; } EXPORT_SYMBOL_GPL(nfs_initiate_write); /* * Set up the argument/result storage required for the RPC call. */ static void nfs_write_rpcsetup(struct nfs_write_data *data, unsigned int count, unsigned int offset, int how, struct nfs_commit_info *cinfo) { struct nfs_page *req = data->header->req; /* Set up the RPC argument and reply structs * NB: take care not to mess about with data->commit et al. */ data->args.fh = NFS_FH(data->header->inode); data->args.offset = req_offset(req) + offset; /* pnfs_set_layoutcommit needs this */ data->mds_offset = data->args.offset; data->args.pgbase = req->wb_pgbase + offset; data->args.pages = data->pages.pagevec; data->args.count = count; data->args.context = get_nfs_open_context(req->wb_context); data->args.lock_context = req->wb_lock_context; data->args.stable = NFS_UNSTABLE; switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) { case 0: break; case FLUSH_COND_STABLE: if (nfs_reqs_to_commit(cinfo)) break; default: data->args.stable = NFS_FILE_SYNC; } data->res.fattr = &data->fattr; data->res.count = count; data->res.verf = &data->verf; nfs_fattr_init(&data->fattr); } static int nfs_do_write(struct nfs_write_data *data, const struct rpc_call_ops *call_ops, int how) { struct inode *inode = data->header->inode; return nfs_initiate_write(NFS_CLIENT(inode), data, call_ops, how, 0); } static int nfs_do_multiple_writes(struct list_head *head, const struct rpc_call_ops *call_ops, int how) { struct nfs_write_data *data; int ret = 0; while (!list_empty(head)) { int ret2; data = list_first_entry(head, struct nfs_write_data, list); list_del_init(&data->list); ret2 = nfs_do_write(data, call_ops, how); if (ret == 0) ret = ret2; } return ret; } /* If a nfs_flush_* function fails, it should remove reqs from @head and * call this on each, which will prepare them to be retried on next * writeback using standard nfs. */ static void nfs_redirty_request(struct nfs_page *req) { nfs_mark_request_dirty(req); nfs_unlock_request(req); nfs_end_page_writeback(req->wb_page); nfs_release_request(req); } static void nfs_async_write_error(struct list_head *head) { struct nfs_page *req; while (!list_empty(head)) { req = nfs_list_entry(head->next); nfs_list_remove_request(req); nfs_redirty_request(req); } } static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = { .error_cleanup = nfs_async_write_error, .completion = nfs_write_completion, }; static void nfs_flush_error(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { set_bit(NFS_IOHDR_REDO, &hdr->flags); while (!list_empty(&hdr->rpc_list)) { struct nfs_write_data *data = list_first_entry(&hdr->rpc_list, struct nfs_write_data, list); list_del(&data->list); nfs_writedata_release(data); } desc->pg_completion_ops->error_cleanup(&desc->pg_list); } /* * Generate multiple small requests to write out a single * contiguous dirty area on one page. */ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { struct nfs_page *req = hdr->req; struct page *page = req->wb_page; struct nfs_write_data *data; size_t wsize = desc->pg_bsize, nbytes; unsigned int offset; int requests = 0; struct nfs_commit_info cinfo; nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); if ((desc->pg_ioflags & FLUSH_COND_STABLE) && (desc->pg_moreio || nfs_reqs_to_commit(&cinfo) || desc->pg_count > wsize)) desc->pg_ioflags &= ~FLUSH_COND_STABLE; offset = 0; nbytes = desc->pg_count; do { size_t len = min(nbytes, wsize); data = nfs_writedata_alloc(hdr, 1); if (!data) { nfs_flush_error(desc, hdr); return -ENOMEM; } data->pages.pagevec[0] = page; nfs_write_rpcsetup(data, len, offset, desc->pg_ioflags, &cinfo); list_add(&data->list, &hdr->rpc_list); requests++; nbytes -= len; offset += len; } while (nbytes != 0); nfs_list_remove_request(req); nfs_list_add_request(req, &hdr->pages); desc->pg_rpc_callops = &nfs_write_common_ops; return 0; } /* * Create an RPC task for the given write request and kick it. * The page must have been locked by the caller. * * It may happen that the page we're passed is not marked dirty. * This is the case if nfs_updatepage detects a conflicting request * that has been written but not committed. */ static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { struct nfs_page *req; struct page **pages; struct nfs_write_data *data; struct list_head *head = &desc->pg_list; struct nfs_commit_info cinfo; data = nfs_writedata_alloc(hdr, nfs_page_array_len(desc->pg_base, desc->pg_count)); if (!data) { nfs_flush_error(desc, hdr); return -ENOMEM; } nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); pages = data->pages.pagevec; while (!list_empty(head)) { req = nfs_list_entry(head->next); nfs_list_remove_request(req); nfs_list_add_request(req, &hdr->pages); *pages++ = req->wb_page; } if ((desc->pg_ioflags & FLUSH_COND_STABLE) && (desc->pg_moreio || nfs_reqs_to_commit(&cinfo))) desc->pg_ioflags &= ~FLUSH_COND_STABLE; /* Set up the argument struct */ nfs_write_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags, &cinfo); list_add(&data->list, &hdr->rpc_list); desc->pg_rpc_callops = &nfs_write_common_ops; return 0; } int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { if (desc->pg_bsize < PAGE_CACHE_SIZE) return nfs_flush_multi(desc, hdr); return nfs_flush_one(desc, hdr); } EXPORT_SYMBOL_GPL(nfs_generic_flush); static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) { struct nfs_write_header *whdr; struct nfs_pgio_header *hdr; int ret; whdr = nfs_writehdr_alloc(); if (!whdr) { desc->pg_completion_ops->error_cleanup(&desc->pg_list); return -ENOMEM; } hdr = &whdr->header; nfs_pgheader_init(desc, hdr, nfs_writehdr_free); atomic_inc(&hdr->refcnt); ret = nfs_generic_flush(desc, hdr); if (ret == 0) ret = nfs_do_multiple_writes(&hdr->rpc_list, desc->pg_rpc_callops, desc->pg_ioflags); if (atomic_dec_and_test(&hdr->refcnt)) hdr->completion_ops->completion(hdr); return ret; } static const struct nfs_pageio_ops nfs_pageio_write_ops = { .pg_test = nfs_generic_pg_test, .pg_doio = nfs_generic_pg_writepages, }; void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags, const struct nfs_pgio_completion_ops *compl_ops) { nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops, compl_ops, NFS_SERVER(inode)->wsize, ioflags); } EXPORT_SYMBOL_GPL(nfs_pageio_init_write); void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) { pgio->pg_ops = &nfs_pageio_write_ops; pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; } EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); void nfs_write_prepare(struct rpc_task *task, void *calldata) { struct nfs_write_data *data = calldata; NFS_PROTO(data->header->inode)->write_rpc_prepare(task, data); if (unlikely(test_bit(NFS_CONTEXT_BAD, &data->args.context->flags))) rpc_exit(task, -EIO); } void nfs_commit_prepare(struct rpc_task *task, void *calldata) { struct nfs_commit_data *data = calldata; NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); } /* * Handle a write reply that flushes a whole page. * * FIXME: There is an inherent race with invalidate_inode_pages and * writebacks since the page->count is kept > 1 for as long * as the page has a write request pending. */ static void nfs_writeback_done_common(struct rpc_task *task, void *calldata) { struct nfs_write_data *data = calldata; nfs_writeback_done(task, data); } static void nfs_writeback_release_common(void *calldata) { struct nfs_write_data *data = calldata; struct nfs_pgio_header *hdr = data->header; int status = data->task.tk_status; if ((status >= 0) && nfs_write_need_commit(data)) { spin_lock(&hdr->lock); if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) ; /* Do nothing */ else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) memcpy(hdr->verf, &data->verf, sizeof(*hdr->verf)); else if (memcmp(hdr->verf, &data->verf, sizeof(*hdr->verf))) set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags); spin_unlock(&hdr->lock); } nfs_writedata_release(data); } static const struct rpc_call_ops nfs_write_common_ops = { .rpc_call_prepare = nfs_write_prepare, .rpc_call_done = nfs_writeback_done_common, .rpc_release = nfs_writeback_release_common, }; /* * This function is called when the WRITE call is complete. */ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data) { struct nfs_writeargs *argp = &data->args; struct nfs_writeres *resp = &data->res; struct inode *inode = data->header->inode; int status; dprintk("NFS: %5u nfs_writeback_done (status %d)\n", task->tk_pid, task->tk_status); /* * ->write_done will attempt to use post-op attributes to detect * conflicting writes by other clients. A strict interpretation * of close-to-open would allow us to continue caching even if * another writer had changed the file, but some applications * depend on tighter cache coherency when writing. */ status = NFS_PROTO(inode)->write_done(task, data); if (status != 0) return; nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, resp->count); #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) if (resp->verf->committed < argp->stable && task->tk_status >= 0) { /* We tried a write call, but the server did not * commit data to stable storage even though we * requested it. * Note: There is a known bug in Tru64 < 5.0 in which * the server reports NFS_DATA_SYNC, but performs * NFS_FILE_SYNC. We therefore implement this checking * as a dprintk() in order to avoid filling syslog. */ static unsigned long complain; /* Note this will print the MDS for a DS write */ if (time_before(complain, jiffies)) { dprintk("NFS: faulty NFS server %s:" " (committed = %d) != (stable = %d)\n", NFS_SERVER(inode)->nfs_client->cl_hostname, resp->verf->committed, argp->stable); complain = jiffies + 300 * HZ; } } #endif if (task->tk_status < 0) nfs_set_pgio_error(data->header, task->tk_status, argp->offset); else if (resp->count < argp->count) { static unsigned long complain; /* This a short write! */ nfs_inc_stats(inode, NFSIOS_SHORTWRITE); /* Has the server at least made some progress? */ if (resp->count == 0) { if (time_before(complain, jiffies)) { printk(KERN_WARNING "NFS: Server wrote zero bytes, expected %u.\n", argp->count); complain = jiffies + 300 * HZ; } nfs_set_pgio_error(data->header, -EIO, argp->offset); task->tk_status = -EIO; return; } /* Was this an NFSv2 write or an NFSv3 stable write? */ if (resp->verf->committed != NFS_UNSTABLE) { /* Resend from where the server left off */ data->mds_offset += resp->count; argp->offset += resp->count; argp->pgbase += resp->count; argp->count -= resp->count; } else { /* Resend as a stable write in order to avoid * headaches in the case of a server crash. */ argp->stable = NFS_FILE_SYNC; } rpc_restart_call_prepare(task); } } #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait) { int ret; if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags)) return 1; if (!may_wait) return 0; ret = out_of_line_wait_on_bit_lock(&nfsi->flags, NFS_INO_COMMIT, nfs_wait_bit_killable, TASK_KILLABLE); return (ret < 0) ? ret : 1; } static void nfs_commit_clear_lock(struct nfs_inode *nfsi) { clear_bit(NFS_INO_COMMIT, &nfsi->flags); smp_mb__after_clear_bit(); wake_up_bit(&nfsi->flags, NFS_INO_COMMIT); } void nfs_commitdata_release(struct nfs_commit_data *data) { put_nfs_open_context(data->context); nfs_commit_free(data); } EXPORT_SYMBOL_GPL(nfs_commitdata_release); int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, const struct rpc_call_ops *call_ops, int how, int flags) { struct rpc_task *task; int priority = flush_task_priority(how); struct rpc_message msg = { .rpc_argp = &data->args, .rpc_resp = &data->res, .rpc_cred = data->cred, }; struct rpc_task_setup task_setup_data = { .task = &data->task, .rpc_client = clnt, .rpc_message = &msg, .callback_ops = call_ops, .callback_data = data, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC | flags, .priority = priority, }; /* Set up the initial task struct. */ NFS_PROTO(data->inode)->commit_setup(data, &msg); dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); if (how & FLUSH_SYNC) rpc_wait_for_completion_task(task); rpc_put_task(task); return 0; } EXPORT_SYMBOL_GPL(nfs_initiate_commit); /* * Set up the argument/result storage required for the RPC call. */ void nfs_init_commit(struct nfs_commit_data *data, struct list_head *head, struct pnfs_layout_segment *lseg, struct nfs_commit_info *cinfo) { struct nfs_page *first = nfs_list_entry(head->next); struct inode *inode = first->wb_context->dentry->d_inode; /* Set up the RPC argument and reply structs * NB: take care not to mess about with data->commit et al. */ list_splice_init(head, &data->pages); data->inode = inode; data->cred = first->wb_context->cred; data->lseg = lseg; /* reference transferred */ data->mds_ops = &nfs_commit_ops; data->completion_ops = cinfo->completion_ops; data->dreq = cinfo->dreq; data->args.fh = NFS_FH(data->inode); /* Note: we always request a commit of the entire inode */ data->args.offset = 0; data->args.count = 0; data->context = get_nfs_open_context(first->wb_context); data->res.fattr = &data->fattr; data->res.verf = &data->verf; nfs_fattr_init(&data->fattr); } EXPORT_SYMBOL_GPL(nfs_init_commit); void nfs_retry_commit(struct list_head *page_list, struct pnfs_layout_segment *lseg, struct nfs_commit_info *cinfo) { struct nfs_page *req; while (!list_empty(page_list)) { req = nfs_list_entry(page_list->next); nfs_list_remove_request(req); nfs_mark_request_commit(req, lseg, cinfo); if (!cinfo->dreq) { dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, BDI_RECLAIMABLE); } nfs_unlock_and_release_request(req); } } EXPORT_SYMBOL_GPL(nfs_retry_commit); /* * Commit dirty pages */ static int nfs_commit_list(struct inode *inode, struct list_head *head, int how, struct nfs_commit_info *cinfo) { struct nfs_commit_data *data; data = nfs_commitdata_alloc(); if (!data) goto out_bad; /* Set up the argument struct */ nfs_init_commit(data, head, NULL, cinfo); atomic_inc(&cinfo->mds->rpcs_out); return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops, how, 0); out_bad: nfs_retry_commit(head, NULL, cinfo); cinfo->completion_ops->error_cleanup(NFS_I(inode)); return -ENOMEM; } /* * COMMIT call returned */ static void nfs_commit_done(struct rpc_task *task, void *calldata) { struct nfs_commit_data *data = calldata; dprintk("NFS: %5u nfs_commit_done (status %d)\n", task->tk_pid, task->tk_status); /* Call the NFS version-specific code */ NFS_PROTO(data->inode)->commit_done(task, data); } static void nfs_commit_release_pages(struct nfs_commit_data *data) { struct nfs_page *req; int status = data->task.tk_status; struct nfs_commit_info cinfo; while (!list_empty(&data->pages)) { req = nfs_list_entry(data->pages.next); nfs_list_remove_request(req); nfs_clear_page_commit(req->wb_page); dprintk("NFS: commit (%s/%lld %d@%lld)", req->wb_context->dentry->d_sb->s_id, (long long)NFS_FILEID(req->wb_context->dentry->d_inode), req->wb_bytes, (long long)req_offset(req)); if (status < 0) { nfs_context_set_write_error(req->wb_context, status); nfs_inode_remove_request(req); dprintk(", error = %d\n", status); goto next; } /* Okay, COMMIT succeeded, apparently. Check the verifier * returned by the server against all stored verfs. */ if (!memcmp(&req->wb_verf, &data->verf.verifier, sizeof(req->wb_verf))) { /* We have a match */ nfs_inode_remove_request(req); dprintk(" OK\n"); goto next; } /* We have a mismatch. Write the page again */ dprintk(" mismatch\n"); nfs_mark_request_dirty(req); set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); next: nfs_unlock_and_release_request(req); } nfs_init_cinfo(&cinfo, data->inode, data->dreq); if (atomic_dec_and_test(&cinfo.mds->rpcs_out)) nfs_commit_clear_lock(NFS_I(data->inode)); } static void nfs_commit_release(void *calldata) { struct nfs_commit_data *data = calldata; data->completion_ops->completion(data); nfs_commitdata_release(calldata); } static const struct rpc_call_ops nfs_commit_ops = { .rpc_call_prepare = nfs_commit_prepare, .rpc_call_done = nfs_commit_done, .rpc_release = nfs_commit_release, }; static const struct nfs_commit_completion_ops nfs_commit_completion_ops = { .completion = nfs_commit_release_pages, .error_cleanup = nfs_commit_clear_lock, }; int nfs_generic_commit_list(struct inode *inode, struct list_head *head, int how, struct nfs_commit_info *cinfo) { int status; status = pnfs_commit_list(inode, head, how, cinfo); if (status == PNFS_NOT_ATTEMPTED) status = nfs_commit_list(inode, head, how, cinfo); return status; } int nfs_commit_inode(struct inode *inode, int how) { LIST_HEAD(head); struct nfs_commit_info cinfo; int may_wait = how & FLUSH_SYNC; int res; res = nfs_commit_set_lock(NFS_I(inode), may_wait); if (res <= 0) goto out_mark_dirty; nfs_init_cinfo_from_inode(&cinfo, inode); res = nfs_scan_commit(inode, &head, &cinfo); if (res) { int error; error = nfs_generic_commit_list(inode, &head, how, &cinfo); if (error < 0) return error; if (!may_wait) goto out_mark_dirty; error = wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT, nfs_wait_bit_killable, TASK_KILLABLE); if (error < 0) return error; } else nfs_commit_clear_lock(NFS_I(inode)); return res; /* Note: If we exit without ensuring that the commit is complete, * we must mark the inode as dirty. Otherwise, future calls to * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure * that the data is on the disk. */ out_mark_dirty: __mark_inode_dirty(inode, I_DIRTY_DATASYNC); return res; } static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) { struct nfs_inode *nfsi = NFS_I(inode); int flags = FLUSH_SYNC; int ret = 0; /* no commits means nothing needs to be done */ if (!nfsi->commit_info.ncommit) return ret; if (wbc->sync_mode == WB_SYNC_NONE) { /* Don't commit yet if this is a non-blocking flush and there * are a lot of outstanding writes for this mapping. */ if (nfsi->commit_info.ncommit <= (nfsi->npages >> 1)) goto out_mark_dirty; /* don't wait for the COMMIT response */ flags = 0; } ret = nfs_commit_inode(inode, flags); if (ret >= 0) { if (wbc->sync_mode == WB_SYNC_NONE) { if (ret < wbc->nr_to_write) wbc->nr_to_write -= ret; else wbc->nr_to_write = 0; } return 0; } out_mark_dirty: __mark_inode_dirty(inode, I_DIRTY_DATASYNC); return ret; } #else static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) { return 0; } #endif int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) { return nfs_commit_unstable_pages(inode, wbc); } EXPORT_SYMBOL_GPL(nfs_write_inode); /* * flush the inode to disk. */ int nfs_wb_all(struct inode *inode) { struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, .range_start = 0, .range_end = LLONG_MAX, }; return sync_inode(inode, &wbc); } EXPORT_SYMBOL_GPL(nfs_wb_all); int nfs_wb_page_cancel(struct inode *inode, struct page *page) { struct nfs_page *req; int ret = 0; for (;;) { wait_on_page_writeback(page); req = nfs_page_find_request(page); if (req == NULL) break; if (nfs_lock_request(req)) { nfs_clear_request_commit(req); nfs_inode_remove_request(req); /* * In case nfs_inode_remove_request has marked the * page as being dirty */ cancel_dirty_page(page, PAGE_CACHE_SIZE); nfs_unlock_and_release_request(req); break; } ret = nfs_wait_on_request(req); nfs_release_request(req); if (ret < 0) break; } return ret; } /* * Write back all requests on one page - we do this before reading it. */ int nfs_wb_page(struct inode *inode, struct page *page) { loff_t range_start = page_file_offset(page); loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 0, .range_start = range_start, .range_end = range_end, }; int ret; for (;;) { wait_on_page_writeback(page); if (clear_page_dirty_for_io(page)) { ret = nfs_writepage_locked(page, &wbc); if (ret < 0) goto out_error; continue; } if (!PagePrivate(page)) break; ret = nfs_commit_inode(inode, FLUSH_SYNC); if (ret < 0) goto out_error; } return 0; out_error: return ret; } #ifdef CONFIG_MIGRATION int nfs_migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) { /* * If PagePrivate is set, then the page is currently associated with * an in-progress read or write request. Don't try to migrate it. * * FIXME: we could do this in principle, but we'll need a way to ensure * that we can safely release the inode reference while holding * the page lock. */ if (PagePrivate(page)) return -EBUSY; if (!nfs_fscache_release_page(page, GFP_KERNEL)) return -EBUSY; return migrate_page(mapping, newpage, page, mode); } #endif int __init nfs_init_writepagecache(void) { nfs_wdata_cachep = kmem_cache_create("nfs_write_data", sizeof(struct nfs_write_header), 0, SLAB_HWCACHE_ALIGN, NULL); if (nfs_wdata_cachep == NULL) return -ENOMEM; nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, nfs_wdata_cachep); if (nfs_wdata_mempool == NULL) goto out_destroy_write_cache; nfs_cdata_cachep = kmem_cache_create("nfs_commit_data", sizeof(struct nfs_commit_data), 0, SLAB_HWCACHE_ALIGN, NULL); if (nfs_cdata_cachep == NULL) goto out_destroy_write_mempool; nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, nfs_cdata_cachep); if (nfs_commit_mempool == NULL) goto out_destroy_commit_cache; /* * NFS congestion size, scale with available memory. * * 64MB: 8192k * 128MB: 11585k * 256MB: 16384k * 512MB: 23170k * 1GB: 32768k * 2GB: 46340k * 4GB: 65536k * 8GB: 92681k * 16GB: 131072k * * This allows larger machines to have larger/more transfers. * Limit the default to 256M */ nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); if (nfs_congestion_kb > 256*1024) nfs_congestion_kb = 256*1024; return 0; out_destroy_commit_cache: kmem_cache_destroy(nfs_cdata_cachep); out_destroy_write_mempool: mempool_destroy(nfs_wdata_mempool); out_destroy_write_cache: kmem_cache_destroy(nfs_wdata_cachep); return -ENOMEM; } void nfs_destroy_writepagecache(void) { mempool_destroy(nfs_commit_mempool); kmem_cache_destroy(nfs_cdata_cachep); mempool_destroy(nfs_wdata_mempool); kmem_cache_destroy(nfs_wdata_cachep); }
gchild320/shamu-old
fs/nfs/write.c
C
gpl-2.0
49,247
/* user_defined.c: user defined key type * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/err.h> #include <keys/user-type.h> #include <asm/uaccess.h> #include "internal.h" static int logon_vet_description(const char *desc); /* * user defined keys take an arbitrary string as the description and an * arbitrary blob of data as the payload */ struct key_type key_type_user = { .name = "user", .instantiate = user_instantiate, .update = user_update, .match = user_match, .revoke = user_revoke, .destroy = user_destroy, .describe = user_describe, .read = user_read, }; EXPORT_SYMBOL_GPL(key_type_user); /* * This key type is essentially the same as key_type_user, but it does * not define a .read op. This is suitable for storing username and * password pairs in the keyring that you do not want to be readable * from userspace. */ struct key_type key_type_logon = { .name = "logon", .instantiate = user_instantiate, .update = user_update, .match = user_match, .revoke = user_revoke, .destroy = user_destroy, .describe = user_describe, .vet_description = logon_vet_description, }; EXPORT_SYMBOL_GPL(key_type_logon); /* * instantiate a user defined key */ int user_instantiate(struct key *key, const void *data, size_t datalen) { struct user_key_payload *upayload; int ret; ret = -EINVAL; if (datalen <= 0 || datalen > 32767 || !data) goto error; ret = key_payload_reserve(key, datalen); if (ret < 0) goto error; ret = -ENOMEM; upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL); if (!upayload) goto error; /* attach the data */ upayload->datalen = datalen; memcpy(upayload->data, data, datalen); rcu_assign_keypointer(key, upayload); ret = 0; error: return ret; } EXPORT_SYMBOL_GPL(user_instantiate); /* * update a user defined key * - the key's semaphore is write-locked */ int user_update(struct key *key, const void *data, size_t datalen) { struct user_key_payload *upayload, *zap; int ret; ret = -EINVAL; if (datalen <= 0 || datalen > 32767 || !data) goto error; /* construct a replacement payload */ ret = -ENOMEM; upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL); if (!upayload) goto error; upayload->datalen = datalen; memcpy(upayload->data, data, datalen); /* check the quota and attach the new data */ zap = upayload; ret = key_payload_reserve(key, datalen); if (ret == 0) { /* attach the new data, displacing the old */ zap = key->payload.data; rcu_assign_keypointer(key, upayload); key->expiry = 0; } if (zap) kfree_rcu(zap, rcu); error: return ret; } EXPORT_SYMBOL_GPL(user_update); /* * match users on their name */ int user_match(const struct key *key, const void *description) { return strcmp(key->description, description) == 0; } EXPORT_SYMBOL_GPL(user_match); /* * dispose of the links from a revoked keyring * - called with the key sem write-locked */ void user_revoke(struct key *key) { struct user_key_payload *upayload = key->payload.data; /* clear the quota */ key_payload_reserve(key, 0); if (upayload) { rcu_assign_keypointer(key, NULL); kfree_rcu(upayload, rcu); } } EXPORT_SYMBOL(user_revoke); /* * dispose of the data dangling from the corpse of a user key */ void user_destroy(struct key *key) { struct user_key_payload *upayload = key->payload.data; kfree(upayload); } EXPORT_SYMBOL_GPL(user_destroy); /* * describe the user key */ void user_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); if (key_is_instantiated(key)) seq_printf(m, ": %u", key->datalen); } EXPORT_SYMBOL_GPL(user_describe); /* * read the key data * - the key's semaphore is read-locked */ long user_read(const struct key *key, char __user *buffer, size_t buflen) { struct user_key_payload *upayload; long ret; upayload = rcu_dereference_key(key); ret = upayload->datalen; /* we can return the data as is */ if (buffer && buflen > 0) { if (buflen > upayload->datalen) buflen = upayload->datalen; if (copy_to_user(buffer, upayload->data, buflen) != 0) ret = -EFAULT; } return ret; } EXPORT_SYMBOL_GPL(user_read); /* Vet the description for a "logon" key */ static int logon_vet_description(const char *desc) { char *p; /* require a "qualified" description string */ p = strchr(desc, ':'); if (!p) return -EINVAL; /* also reject description with ':' as first char */ if (p == desc) return -EINVAL; return 0; }
talnoah/m8
security/keys/user_defined.c
C
gpl-2.0
4,894
/* * linux/drivers/video/omap2/omapfb-sysfs.c * * Copyright (C) 2008 Nokia Corporation * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> * * Some code and ideas taken from drivers/video/omap/ driver * by Imre Deak. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/fb.h> #include <linux/sysfs.h> #include <linux/device.h> #include <linux/uaccess.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/omapfb.h> #include <video/omapdss.h> #include <video/omapvrfb.h> #include "omapfb.h" static ssize_t show_rotate_type(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->rotation_type); } static ssize_t store_rotate_type(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_mem_region *rg; int rot_type; int r; r = kstrtoint(buf, 0, &rot_type); if (r) return r; if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB) return -EINVAL; if (!lock_fb_info(fbi)) return -ENODEV; r = 0; if (rot_type == ofbi->rotation_type) goto out; rg = omapfb_get_mem_region(ofbi->region); if (rg->size) { r = -EBUSY; goto put_region; } ofbi->rotation_type = rot_type; /* * Since the VRAM for this FB is not allocated at the moment we don't * need to do any further parameter checking at this point. */ put_region: omapfb_put_mem_region(rg); out: unlock_fb_info(fbi); return r ? r : count; } static ssize_t show_mirror(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->mirror); } static ssize_t store_mirror(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); bool mirror; int r; struct fb_var_screeninfo new_var; r = strtobool(buf, &mirror); if (r) return r; if (!lock_fb_info(fbi)) return -ENODEV; ofbi->mirror = mirror; omapfb_get_mem_region(ofbi->region); memcpy(&new_var, &fbi->var, sizeof(new_var)); r = check_fb_var(fbi, &new_var); if (r) goto out; memcpy(&fbi->var, &new_var, sizeof(fbi->var)); set_fb_fix(fbi); r = omapfb_apply_changes(fbi, 0); if (r) goto out; r = count; out: omapfb_put_mem_region(ofbi->region); unlock_fb_info(fbi); return r; } static ssize_t show_overlays(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; ssize_t l = 0; int t; if (!lock_fb_info(fbi)) return -ENODEV; omapfb_lock(fbdev); for (t = 0; t < ofbi->num_overlays; t++) { struct omap_overlay *ovl = ofbi->overlays[t]; int ovlnum; for (ovlnum = 0; ovlnum < fbdev->num_overlays; ++ovlnum) if (ovl == fbdev->overlays[ovlnum]) break; l += snprintf(buf + l, PAGE_SIZE - l, "%s%d", t == 0 ? "" : ",", ovlnum); } l += snprintf(buf + l, PAGE_SIZE - l, "\n"); omapfb_unlock(fbdev); unlock_fb_info(fbi); return l; } static struct omapfb_info *get_overlay_fb(struct omapfb2_device *fbdev, struct omap_overlay *ovl) { int i, t; for (i = 0; i < fbdev->num_fbs; i++) { struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]); for (t = 0; t < ofbi->num_overlays; t++) { if (ofbi->overlays[t] == ovl) return ofbi; } } return NULL; } static ssize_t store_overlays(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; struct omap_overlay *ovls[OMAPFB_MAX_OVL_PER_FB]; struct omap_overlay *ovl; int num_ovls, r, i; int len; bool added = false; num_ovls = 0; len = strlen(buf); if (buf[len - 1] == '\n') len = len - 1; if (!lock_fb_info(fbi)) return -ENODEV; omapfb_lock(fbdev); if (len > 0) { char *p = (char *)buf; int ovlnum; while (p < buf + len) { int found; if (num_ovls == OMAPFB_MAX_OVL_PER_FB) { r = -EINVAL; goto out; } ovlnum = simple_strtoul(p, &p, 0); if (ovlnum > fbdev->num_overlays) { r = -EINVAL; goto out; } found = 0; for (i = 0; i < num_ovls; ++i) { if (ovls[i] == fbdev->overlays[ovlnum]) { found = 1; break; } } if (!found) ovls[num_ovls++] = fbdev->overlays[ovlnum]; p++; } } for (i = 0; i < num_ovls; ++i) { struct omapfb_info *ofbi2 = get_overlay_fb(fbdev, ovls[i]); if (ofbi2 && ofbi2 != ofbi) { dev_err(fbdev->dev, "overlay already in use\n"); r = -EINVAL; goto out; } } /* detach unused overlays */ for (i = 0; i < ofbi->num_overlays; ++i) { int t, found; ovl = ofbi->overlays[i]; found = 0; for (t = 0; t < num_ovls; ++t) { if (ovl == ovls[t]) { found = 1; break; } } if (found) continue; DBG("detaching %d\n", ofbi->overlays[i]->id); omapfb_get_mem_region(ofbi->region); omapfb_overlay_enable(ovl, 0); if (ovl->manager) ovl->manager->apply(ovl->manager); omapfb_put_mem_region(ofbi->region); for (t = i + 1; t < ofbi->num_overlays; t++) { ofbi->rotation[t-1] = ofbi->rotation[t]; ofbi->overlays[t-1] = ofbi->overlays[t]; } ofbi->num_overlays--; i--; } for (i = 0; i < num_ovls; ++i) { int t, found; ovl = ovls[i]; found = 0; for (t = 0; t < ofbi->num_overlays; ++t) { if (ovl == ofbi->overlays[t]) { found = 1; break; } } if (found) continue; ofbi->rotation[ofbi->num_overlays] = 0; ofbi->overlays[ofbi->num_overlays++] = ovl; added = true; } if (added) { omapfb_get_mem_region(ofbi->region); r = omapfb_apply_changes(fbi, 0); omapfb_put_mem_region(ofbi->region); if (r) goto out; } r = count; out: omapfb_unlock(fbdev); unlock_fb_info(fbi); return r; } static ssize_t show_overlays_rotate(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); ssize_t l = 0; int t; if (!lock_fb_info(fbi)) return -ENODEV; for (t = 0; t < ofbi->num_overlays; t++) { l += snprintf(buf + l, PAGE_SIZE - l, "%s%d", t == 0 ? "" : ",", ofbi->rotation[t]); } l += snprintf(buf + l, PAGE_SIZE - l, "\n"); unlock_fb_info(fbi); return l; } static ssize_t store_overlays_rotate(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); int num_ovls = 0, r, i; int len; bool changed = false; u8 rotation[OMAPFB_MAX_OVL_PER_FB]; len = strlen(buf); if (buf[len - 1] == '\n') len = len - 1; if (!lock_fb_info(fbi)) return -ENODEV; if (len > 0) { char *p = (char *)buf; while (p < buf + len) { int rot; if (num_ovls == ofbi->num_overlays) { r = -EINVAL; goto out; } rot = simple_strtoul(p, &p, 0); if (rot < 0 || rot > 3) { r = -EINVAL; goto out; } if (ofbi->rotation[num_ovls] != rot) changed = true; rotation[num_ovls++] = rot; p++; } } if (num_ovls != ofbi->num_overlays) { r = -EINVAL; goto out; } if (changed) { for (i = 0; i < num_ovls; ++i) ofbi->rotation[i] = rotation[i]; omapfb_get_mem_region(ofbi->region); r = omapfb_apply_changes(fbi, 0); omapfb_put_mem_region(ofbi->region); if (r) goto out; /* FIXME error handling? */ } r = count; out: unlock_fb_info(fbi); return r; } static ssize_t show_size(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); return snprintf(buf, PAGE_SIZE, "%lu\n", ofbi->region->size); } static ssize_t store_size(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; struct omap_dss_device *display = fb2display(fbi); struct omapfb2_mem_region *rg; unsigned long size; int r; int i; r = kstrtoul(buf, 0, &size); if (r) return r; size = PAGE_ALIGN(size); if (!lock_fb_info(fbi)) return -ENODEV; if (display && display->driver->sync) display->driver->sync(display); rg = ofbi->region; down_write_nested(&rg->lock, rg->id); atomic_inc(&rg->lock_count); if (atomic_read(&rg->map_count)) { r = -EBUSY; goto out; } for (i = 0; i < fbdev->num_fbs; i++) { struct omapfb_info *ofbi2 = FB2OFB(fbdev->fbs[i]); int j; if (ofbi2->region != rg) continue; for (j = 0; j < ofbi2->num_overlays; j++) { struct omap_overlay *ovl; ovl = ofbi2->overlays[j]; if (ovl->is_enabled(ovl)) { r = -EBUSY; goto out; } } } if (size != ofbi->region->size) { r = omapfb_realloc_fbmem(fbi, size, ofbi->region->type); if (r) { dev_err(dev, "realloc fbmem failed\n"); goto out; } } r = count; out: atomic_dec(&rg->lock_count); up_write(&rg->lock); unlock_fb_info(fbi); return r; } static ssize_t show_phys(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); return snprintf(buf, PAGE_SIZE, "%0x\n", ofbi->region->paddr); } static ssize_t show_virt(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr); } static ssize_t show_upd_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); enum omapfb_update_mode mode; int r; r = omapfb_get_update_mode(fbi, &mode); if (r) return r; return snprintf(buf, PAGE_SIZE, "%u\n", (unsigned)mode); } static ssize_t store_upd_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); unsigned mode; int r; r = kstrtouint(buf, 0, &mode); if (r) return r; r = omapfb_set_update_mode(fbi, mode); if (r) return r; return count; } static struct device_attribute omapfb_attrs[] = { __ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type, store_rotate_type), __ATTR(mirror, S_IRUGO | S_IWUSR, show_mirror, store_mirror), __ATTR(size, S_IRUGO | S_IWUSR, show_size, store_size), __ATTR(overlays, S_IRUGO | S_IWUSR, show_overlays, store_overlays), __ATTR(overlays_rotate, S_IRUGO | S_IWUSR, show_overlays_rotate, store_overlays_rotate), __ATTR(phys_addr, S_IRUGO, show_phys, NULL), __ATTR(virt_addr, S_IRUGO, show_virt, NULL), __ATTR(update_mode, S_IRUGO | S_IWUSR, show_upd_mode, store_upd_mode), }; int omapfb_create_sysfs(struct omapfb2_device *fbdev) { int i; int r; DBG("create sysfs for fbs\n"); for (i = 0; i < fbdev->num_fbs; i++) { int t; for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++) { r = device_create_file(fbdev->fbs[i]->dev, &omapfb_attrs[t]); if (r) { dev_err(fbdev->dev, "failed to create sysfs " "file\n"); return r; } } } return 0; } void omapfb_remove_sysfs(struct omapfb2_device *fbdev) { int i, t; DBG("remove sysfs for fbs\n"); for (i = 0; i < fbdev->num_fbs; i++) { for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++) device_remove_file(fbdev->fbs[i]->dev, &omapfb_attrs[t]); } }
munoz0raul/linux-toradex_ACM_iMX6
drivers/video/omap2/omapfb/omapfb-sysfs.c
C
gpl-2.0
12,283
/* * Sane locale-independent, ASCII ctype. * * No surprises, and works with signed and unsigned chars. */ #include "cache.h" enum { S = GIT_SPACE, A = GIT_ALPHA, D = GIT_DIGIT, G = GIT_GLOB_SPECIAL, /* *, ?, [, \\ */ R = GIT_REGEX_SPECIAL, /* $, (, ), +, ., ^, {, | * */ P = GIT_PRINT_EXTRA, /* printable - alpha - digit - glob - regex */ PS = GIT_SPACE | GIT_PRINT_EXTRA, }; unsigned char sane_ctype[256] = { /* 0 1 2 3 4 5 6 7 8 9 A B C D E F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, S, S, 0, 0, S, 0, 0, /* 0.. 15 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 16.. 31 */ PS,P, P, P, R, P, P, P, R, R, G, R, P, P, R, P, /* 32.. 47 */ D, D, D, D, D, D, D, D, D, D, P, P, P, P, P, G, /* 48.. 63 */ P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 64.. 79 */ A, A, A, A, A, A, A, A, A, A, A, G, G, P, R, P, /* 80.. 95 */ P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 96..111 */ A, A, A, A, A, A, A, A, A, A, A, R, R, P, P, 0, /* 112..127 */ /* Nothing in the 128.. range */ }; const char *graph_line = "_____________________________________________________________________" "_____________________________________________________________________"; const char *graph_dotted_line = "---------------------------------------------------------------------" "---------------------------------------------------------------------" "---------------------------------------------------------------------";
nitroglycerine33/kernel_samsung_tuna
tools/perf/util/ctype.c
C
gpl-2.0
1,466
/* * drivers/s390/cio/blacklist.c * S/390 common I/O routines -- blacklisting of specific devices * * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation * Author(s): Ingo Adlung (adlung@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com) */ #define KMSG_COMPONENT "cio" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/ctype.h> #include <linux/device.h> #include <asm/cio.h> #include <asm/uaccess.h> #include "blacklist.h" #include "cio.h" #include "cio_debug.h" #include "css.h" #include "device.h" /* * "Blacklisting" of certain devices: * Device numbers given in the commandline as cio_ignore=... won't be known * to Linux. * * These can be single devices or ranges of devices */ /* 65536 bits for each set to indicate if a devno is blacklisted or not */ #define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \ (8*sizeof(long))) static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS]; typedef enum {add, free} range_action; /* * Function: blacklist_range * (Un-)blacklist the devices from-to */ static int blacklist_range(range_action action, unsigned int from_ssid, unsigned int to_ssid, unsigned int from, unsigned int to, int msgtrigger) { if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) { if (msgtrigger) pr_warning("0.%x.%04x to 0.%x.%04x is not a valid " "range for cio_ignore\n", from_ssid, from, to_ssid, to); return 1; } while ((from_ssid < to_ssid) || ((from_ssid == to_ssid) && (from <= to))) { if (action == add) set_bit(from, bl_dev[from_ssid]); else clear_bit(from, bl_dev[from_ssid]); from++; if (from > __MAX_SUBCHANNEL) { from_ssid++; from = 0; } } return 0; } static int pure_hex(char **cp, unsigned int *val, int min_digit, int max_digit, int max_val) { int diff; diff = 0; *val = 0; while (diff <= max_digit) { int value = hex_to_bin(**cp); if (value < 0) break; *val = *val * 16 + value; (*cp)++; diff++; } if ((diff < min_digit) || (diff > max_digit) || (*val > max_val)) return 1; return 0; } static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid, unsigned int *devno, int msgtrigger) { char *str_work; int val, rc, ret; rc = 1; if (*str == '\0') goto out; /* old style */ str_work = str; val = simple_strtoul(str, &str_work, 16); if (*str_work == '\0') { if (val <= __MAX_SUBCHANNEL) { *devno = val; *ssid = 0; *cssid = 0; rc = 0; } goto out; } /* new style */ str_work = str; ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID); if (ret || (str_work[0] != '.')) goto out; str_work++; ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID); if (ret || (str_work[0] != '.')) goto out; str_work++; ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL); if (ret || (str_work[0] != '\0')) goto out; rc = 0; out: if (rc && msgtrigger) pr_warning("%s is not a valid device for the cio_ignore " "kernel parameter\n", str); return rc; } static int blacklist_parse_parameters(char *str, range_action action, int msgtrigger) { unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; int rc, totalrc; char *parm; range_action ra; totalrc = 0; while ((parm = strsep(&str, ","))) { rc = 0; ra = action; if (*parm == '!') { if (ra == add) ra = free; else ra = add; parm++; } if (strcmp(parm, "all") == 0) { from_cssid = 0; from_ssid = 0; from = 0; to_cssid = __MAX_CSSID; to_ssid = __MAX_SSID; to = __MAX_SUBCHANNEL; } else { rc = parse_busid(strsep(&parm, "-"), &from_cssid, &from_ssid, &from, msgtrigger); if (!rc) { if (parm != NULL) rc = parse_busid(parm, &to_cssid, &to_ssid, &to, msgtrigger); else { to_cssid = from_cssid; to_ssid = from_ssid; to = from; } } } if (!rc) { rc = blacklist_range(ra, from_ssid, to_ssid, from, to, msgtrigger); if (rc) totalrc = -EINVAL; } else totalrc = -EINVAL; } return totalrc; } static int __init blacklist_setup (char *str) { CIO_MSG_EVENT(6, "Reading blacklist parameters\n"); if (blacklist_parse_parameters(str, add, 1)) return 0; return 1; } __setup ("cio_ignore=", blacklist_setup); /* Checking if devices are blacklisted */ /* * Function: is_blacklisted * Returns 1 if the given devicenumber can be found in the blacklist, * otherwise 0. * Used by validate_subchannel() */ int is_blacklisted (int ssid, int devno) { return test_bit (devno, bl_dev[ssid]); } #ifdef CONFIG_PROC_FS /* * Function: blacklist_parse_proc_parameters * parse the stuff which is piped to /proc/cio_ignore */ static int blacklist_parse_proc_parameters(char *buf) { int rc; char *parm; parm = strsep(&buf, " "); if (strcmp("free", parm) == 0) rc = blacklist_parse_parameters(buf, free, 0); else if (strcmp("add", parm) == 0) rc = blacklist_parse_parameters(buf, add, 0); else if (strcmp("purge", parm) == 0) return ccw_purge_blacklisted(); else return -EINVAL; css_schedule_reprobe(); return rc; } /* Iterator struct for all devices. */ struct ccwdev_iter { int devno; int ssid; int in_range; }; static void * cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset) { struct ccwdev_iter *iter = s->private; if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) return NULL; memset(iter, 0, sizeof(*iter)); iter->ssid = *offset / (__MAX_SUBCHANNEL + 1); iter->devno = *offset % (__MAX_SUBCHANNEL + 1); return iter; } static void cio_ignore_proc_seq_stop(struct seq_file *s, void *it) { } static void * cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset) { struct ccwdev_iter *iter; if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) return NULL; iter = it; if (iter->devno == __MAX_SUBCHANNEL) { iter->devno = 0; iter->ssid++; if (iter->ssid > __MAX_SSID) return NULL; } else iter->devno++; (*offset)++; return iter; } static int cio_ignore_proc_seq_show(struct seq_file *s, void *it) { struct ccwdev_iter *iter; iter = it; if (!is_blacklisted(iter->ssid, iter->devno)) /* Not blacklisted, nothing to output. */ return 0; if (!iter->in_range) { /* First device in range. */ if ((iter->devno == __MAX_SUBCHANNEL) || !is_blacklisted(iter->ssid, iter->devno + 1)) /* Singular device. */ return seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno); iter->in_range = 1; return seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno); } if ((iter->devno == __MAX_SUBCHANNEL) || !is_blacklisted(iter->ssid, iter->devno + 1)) { /* Last device in range. */ iter->in_range = 0; return seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno); } return 0; } static ssize_t cio_ignore_write(struct file *file, const char __user *user_buf, size_t user_len, loff_t *offset) { char *buf; ssize_t rc, ret, i; if (*offset) return -EINVAL; if (user_len > 65536) user_len = 65536; buf = vzalloc(user_len + 1); /* maybe better use the stack? */ if (buf == NULL) return -ENOMEM; if (strncpy_from_user (buf, user_buf, user_len) < 0) { rc = -EFAULT; goto out_free; } i = user_len - 1; while ((i >= 0) && (isspace(buf[i]) || (buf[i] == 0))) { buf[i] = '\0'; i--; } ret = blacklist_parse_proc_parameters(buf); if (ret) rc = ret; else rc = user_len; out_free: vfree (buf); return rc; } static const struct seq_operations cio_ignore_proc_seq_ops = { .start = cio_ignore_proc_seq_start, .stop = cio_ignore_proc_seq_stop, .next = cio_ignore_proc_seq_next, .show = cio_ignore_proc_seq_show, }; static int cio_ignore_proc_open(struct inode *inode, struct file *file) { return seq_open_private(file, &cio_ignore_proc_seq_ops, sizeof(struct ccwdev_iter)); } static const struct file_operations cio_ignore_proc_fops = { .open = cio_ignore_proc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, .write = cio_ignore_write, }; static int cio_ignore_proc_init (void) { struct proc_dir_entry *entry; entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL, &cio_ignore_proc_fops); if (!entry) return -ENOENT; return 0; } __initcall (cio_ignore_proc_init); #endif /* CONFIG_PROC_FS */
NoelMacwan/Kernel-C6806-KOT49H.S2.2052
drivers/s390/cio/blacklist.c
C
gpl-2.0
8,499
/* * drivers/media/video/samsung/mfc5/s5p_mfc_intr.c * * C file for Samsung MFC (Multi Function Codec - FIMV) driver * This file contains functions used to wait for command completion. * * Kamil Debski, Copyright (C) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/sched.h> #include <linux/wait.h> #include "regs-mfc.h" #include "s5p_mfc_common.h" #include "s5p_mfc_debug.h" #include "s5p_mfc_intr.h" int s5p_mfc_wait_for_done_dev(struct s5p_mfc_dev *dev, int command) { int ret; ret = wait_event_interruptible_timeout(dev->queue, (dev->int_cond && (dev->int_type == command || dev->int_type == S5P_FIMV_R2H_CMD_ERR_RET)), msecs_to_jiffies(MFC_INT_TIMEOUT)); if (ret == 0) { mfc_err("Interrupt (dev->int_type:%d, command:%d) timed out\n", dev->int_type, command); return 1; } else if (ret == -ERESTARTSYS) { mfc_err("Interrupted by a signal\n"); return 1; } mfc_debug(1, "Finished waiting (dev->int_type:%d, command: %d)\n", dev->int_type, command); if (dev->int_type == S5P_FIMV_R2H_CMD_ERR_RET) return 1; return 0; } void s5p_mfc_clean_dev_int_flags(struct s5p_mfc_dev *dev) { dev->int_cond = 0; dev->int_type = 0; dev->int_err = 0; } int s5p_mfc_wait_for_done_ctx(struct s5p_mfc_ctx *ctx, int command, int interrupt) { int ret; if (interrupt) { ret = wait_event_interruptible_timeout(ctx->queue, (ctx->int_cond && (ctx->int_type == command || ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)), msecs_to_jiffies(MFC_INT_TIMEOUT)); } else { ret = wait_event_timeout(ctx->queue, (ctx->int_cond && (ctx->int_type == command || ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)), msecs_to_jiffies(MFC_INT_TIMEOUT)); } if (ret == 0) { mfc_err("Interrupt (ctx->int_type:%d, command:%d) timed out\n", ctx->int_type, command); return 1; } else if (ret == -ERESTARTSYS) { mfc_err("Interrupted by a signal\n"); return 1; } mfc_debug(1, "Finished waiting (ctx->int_type:%d, command: %d)\n", ctx->int_type, command); if (ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET) return 1; return 0; } void s5p_mfc_clean_ctx_int_flags(struct s5p_mfc_ctx *ctx) { ctx->int_cond = 0; ctx->int_type = 0; ctx->int_err = 0; }
flar2/m7-Sense
drivers/media/video/s5p-mfc/s5p_mfc_intr.c
C
gpl-2.0
2,505
/* * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. * * Copyright (C) 2002-2010 Aleph One Ltd. * for Toby Churchill Ltd and Brightstar Engineering * * Created by Charles Manning <charles@aleph1.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "yaffs_yaffs1.h" #include "yportenv.h" #include "yaffs_trace.h" #include "yaffs_bitmap.h" #include "yaffs_getblockinfo.h" #include "yaffs_nand.h" #include "yaffs_attribs.h" int yaffs1_scan(struct yaffs_dev *dev) { struct yaffs_ext_tags tags; int blk; int result; int chunk; int c; int deleted; enum yaffs_block_state state; struct yaffs_obj *hard_list = NULL; struct yaffs_block_info *bi; u32 seq_number; struct yaffs_obj_hdr *oh; struct yaffs_obj *in; struct yaffs_obj *parent; int alloc_failed = 0; struct yaffs_shadow_fixer *shadow_fixers = NULL; u8 *chunk_data; yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan starts intstartblk %d intendblk %d...", dev->internal_start_block, dev->internal_end_block); chunk_data = yaffs_get_temp_buffer(dev, __LINE__); dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER; /* Scan all the blocks to determine their state */ bi = dev->block_info; for (blk = dev->internal_start_block; blk <= dev->internal_end_block; blk++) { yaffs_clear_chunk_bits(dev, blk); bi->pages_in_use = 0; bi->soft_del_pages = 0; yaffs_query_init_block_state(dev, blk, &state, &seq_number); bi->block_state = state; bi->seq_number = seq_number; if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK) bi->block_state = state = YAFFS_BLOCK_STATE_DEAD; yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block scanning block %d state %d seq %d", blk, state, seq_number); if (state == YAFFS_BLOCK_STATE_DEAD) { yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "block %d is bad", blk); } else if (state == YAFFS_BLOCK_STATE_EMPTY) { yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty "); dev->n_erased_blocks++; dev->n_free_chunks += dev->param.chunks_per_block; } bi++; } /* For each block.... */ for (blk = dev->internal_start_block; !alloc_failed && blk <= dev->internal_end_block; blk++) { cond_resched(); bi = yaffs_get_block_info(dev, blk); state = bi->block_state; deleted = 0; /* For each chunk in each block that needs scanning.... */ for (c = 0; !alloc_failed && c < dev->param.chunks_per_block && state == YAFFS_BLOCK_STATE_NEEDS_SCANNING; c++) { /* Read the tags and decide what to do */ chunk = blk * dev->param.chunks_per_block + c; result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL, &tags); /* Let's have a good look at this chunk... */ if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED || tags.is_deleted) { /* YAFFS1 only... * A deleted chunk */ deleted++; dev->n_free_chunks++; /*T((" %d %d deleted\n",blk,c)); */ } else if (!tags.chunk_used) { /* An unassigned chunk in the block * This means that either the block is empty or * this is the one being allocated from */ if (c == 0) { /* We're looking at the first chunk in the block so the block is unused */ state = YAFFS_BLOCK_STATE_EMPTY; dev->n_erased_blocks++; } else { /* this is the block being allocated from */ yaffs_trace(YAFFS_TRACE_SCAN, " Allocating from %d %d", blk, c); state = YAFFS_BLOCK_STATE_ALLOCATING; dev->alloc_block = blk; dev->alloc_page = c; dev->alloc_block_finder = blk; /* Set block finder here to encourage the allocator to go forth from here. */ } dev->n_free_chunks += (dev->param.chunks_per_block - c); } else if (tags.chunk_id > 0) { /* chunk_id > 0 so it is a data chunk... */ unsigned int endpos; yaffs_set_chunk_bit(dev, blk, c); bi->pages_in_use++; in = yaffs_find_or_create_by_number(dev, tags.obj_id, YAFFS_OBJECT_TYPE_FILE); /* PutChunkIntoFile checks for a clash (two data chunks with * the same chunk_id). */ if (!in) alloc_failed = 1; if (in) { if (!yaffs_put_chunk_in_file (in, tags.chunk_id, chunk, 1)) alloc_failed = 1; } endpos = (tags.chunk_id - 1) * dev->data_bytes_per_chunk + tags.n_bytes; if (in && in->variant_type == YAFFS_OBJECT_TYPE_FILE && in->variant.file_variant.scanned_size < endpos) { in->variant.file_variant.scanned_size = endpos; if (!dev->param.use_header_file_size) { in->variant. file_variant.file_size = in->variant. file_variant.scanned_size; } } /* T((" %d %d data %d %d\n",blk,c,tags.obj_id,tags.chunk_id)); */ } else { /* chunk_id == 0, so it is an ObjectHeader. * Thus, we read in the object header and make the object */ yaffs_set_chunk_bit(dev, blk, c); bi->pages_in_use++; result = yaffs_rd_chunk_tags_nand(dev, chunk, chunk_data, NULL); oh = (struct yaffs_obj_hdr *)chunk_data; in = yaffs_find_by_number(dev, tags.obj_id); if (in && in->variant_type != oh->type) { /* This should not happen, but somehow * Wev'e ended up with an obj_id that has been reused but not yet * deleted, and worse still it has changed type. Delete the old object. */ yaffs_del_obj(in); in = 0; } in = yaffs_find_or_create_by_number(dev, tags.obj_id, oh->type); if (!in) alloc_failed = 1; if (in && oh->shadows_obj > 0) { struct yaffs_shadow_fixer *fixer; fixer = kmalloc(sizeof (struct yaffs_shadow_fixer), GFP_NOFS); if (fixer) { fixer->next = shadow_fixers; shadow_fixers = fixer; fixer->obj_id = tags.obj_id; fixer->shadowed_id = oh->shadows_obj; yaffs_trace(YAFFS_TRACE_SCAN, " Shadow fixer: %d shadows %d", fixer->obj_id, fixer->shadowed_id); } } if (in && in->valid) { /* We have already filled this one. We have a duplicate and need to resolve it. */ unsigned existing_serial = in->serial; unsigned new_serial = tags.serial_number; if (((existing_serial + 1) & 3) == new_serial) { /* Use new one - destroy the exisiting one */ yaffs_chunk_del(dev, in->hdr_chunk, 1, __LINE__); in->valid = 0; } else { /* Use existing - destroy this one. */ yaffs_chunk_del(dev, chunk, 1, __LINE__); } } if (in && !in->valid && (tags.obj_id == YAFFS_OBJECTID_ROOT || tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND)) { /* We only load some info, don't fiddle with directory structure */ in->valid = 1; in->variant_type = oh->type; in->yst_mode = oh->yst_mode; yaffs_load_attribs(in, oh); in->hdr_chunk = chunk; in->serial = tags.serial_number; } else if (in && !in->valid) { /* we need to load this info */ in->valid = 1; in->variant_type = oh->type; in->yst_mode = oh->yst_mode; yaffs_load_attribs(in, oh); in->hdr_chunk = chunk; in->serial = tags.serial_number; yaffs_set_obj_name_from_oh(in, oh); in->dirty = 0; /* directory stuff... * hook up to parent */ parent = yaffs_find_or_create_by_number (dev, oh->parent_obj_id, YAFFS_OBJECT_TYPE_DIRECTORY); if (!parent) alloc_failed = 1; if (parent && parent->variant_type == YAFFS_OBJECT_TYPE_UNKNOWN) { /* Set up as a directory */ parent->variant_type = YAFFS_OBJECT_TYPE_DIRECTORY; INIT_LIST_HEAD(&parent-> variant.dir_variant.children); } else if (!parent || parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { /* Hoosterman, another problem.... * We're trying to use a non-directory as a directory */ yaffs_trace(YAFFS_TRACE_ERROR, "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found." ); parent = dev->lost_n_found; } yaffs_add_obj_to_dir(parent, in); if (0 && (parent == dev->del_dir || parent == dev->unlinked_dir)) { in->deleted = 1; /* If it is unlinked at start up then it wants deleting */ dev->n_deleted_files++; } /* Note re hardlinks. * Since we might scan a hardlink before its equivalent object is scanned * we put them all in a list. * After scanning is complete, we should have all the objects, so we run through this * list and fix up all the chains. */ switch (in->variant_type) { case YAFFS_OBJECT_TYPE_UNKNOWN: /* Todo got a problem */ break; case YAFFS_OBJECT_TYPE_FILE: if (dev->param. use_header_file_size) in->variant. file_variant.file_size = oh->file_size; break; case YAFFS_OBJECT_TYPE_HARDLINK: in->variant. hardlink_variant.equiv_id = oh->equiv_id; in->hard_links.next = (struct list_head *) hard_list; hard_list = in; break; case YAFFS_OBJECT_TYPE_DIRECTORY: /* Do nothing */ break; case YAFFS_OBJECT_TYPE_SPECIAL: /* Do nothing */ break; case YAFFS_OBJECT_TYPE_SYMLINK: in->variant.symlink_variant. alias = yaffs_clone_str(oh->alias); if (!in->variant. symlink_variant.alias) alloc_failed = 1; break; } } } } if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) { /* If we got this far while scanning, then the block is fully allocated. */ state = YAFFS_BLOCK_STATE_FULL; } if (state == YAFFS_BLOCK_STATE_ALLOCATING) { /* If the block was partially allocated then treat it as fully allocated. */ state = YAFFS_BLOCK_STATE_FULL; dev->alloc_block = -1; } bi->block_state = state; /* Now let's see if it was dirty */ if (bi->pages_in_use == 0 && !bi->has_shrink_hdr && bi->block_state == YAFFS_BLOCK_STATE_FULL) { yaffs_block_became_dirty(dev, blk); } } /* Ok, we've done all the scanning. * Fix up the hard link chains. * We should now have scanned all the objects, now it's time to add these * hardlinks. */ yaffs_link_fixup(dev, hard_list); /* Fix up any shadowed objects */ { struct yaffs_shadow_fixer *fixer; struct yaffs_obj *obj; while (shadow_fixers) { fixer = shadow_fixers; shadow_fixers = fixer->next; /* Complete the rename transaction by deleting the shadowed object * then setting the object header to unshadowed. */ obj = yaffs_find_by_number(dev, fixer->shadowed_id); if (obj) yaffs_del_obj(obj); obj = yaffs_find_by_number(dev, fixer->obj_id); if (obj) yaffs_update_oh(obj, NULL, 1, 0, 0, NULL); kfree(fixer); } } yaffs_release_temp_buffer(dev, chunk_data, __LINE__); if (alloc_failed) return YAFFS_FAIL; yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan ends"); return YAFFS_OK; }
Flemmard/akh8960_cm
fs/yaffs2/yaffs_yaffs1.c
C
gpl-2.0
11,299
/* * Example of using hugepage memory in a user application using the mmap * system call with MAP_HUGETLB flag. Before running this program make * sure the administrator has allocated enough default sized huge pages * to cover the 256 MB allocation. * * For ia64 architecture, Linux kernel reserves Region number 4 for hugepages. * That means the addresses starting with 0x800000... will need to be * specified. Specifying a fixed address is not required on ppc64, i386 * or x86_64. */ #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <sys/mman.h> #include <fcntl.h> #define LENGTH (256UL*1024*1024) #define PROTECTION (PROT_READ | PROT_WRITE) #ifndef MAP_HUGETLB #define MAP_HUGETLB 0x40000 /* arch specific */ #endif /* Only ia64 requires this */ #ifdef __ia64__ #define ADDR (void *)(0x8000000000000000UL) #define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_FIXED) #else #define ADDR (void *)(0x0UL) #define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB) #endif static void check_bytes(char *addr) { printf("First hex is %x\n", *((unsigned int *)addr)); } static void write_bytes(char *addr) { unsigned long i; for (i = 0; i < LENGTH; i++) *(addr + i) = (char)i; } static int read_bytes(char *addr) { unsigned long i; check_bytes(addr); for (i = 0; i < LENGTH; i++) if (*(addr + i) != (char)i) { printf("Mismatch at %lu\n", i); return 1; } return 0; } int main(void) { void *addr; int ret; addr = mmap(ADDR, LENGTH, PROTECTION, FLAGS, 0, 0); if (addr == MAP_FAILED) { perror("mmap"); exit(1); } printf("Returned address is %p\n", addr); check_bytes(addr); write_bytes(addr); ret = read_bytes(addr); munmap(addr, LENGTH); return ret; }
TeamExodus/kernel_oneplus_msm8994
tools/testing/selftests/vm/map_hugetlb.c
C
gpl-2.0
1,730
/* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 * Phillip Lougher <phillip@squashfs.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * symlink.c */ /* * This file implements code to handle symbolic links. * * The data contents of symbolic links are stored inside the symbolic * link inode within the inode table. This allows the normally small symbolic * link to be compressed as part of the inode table, achieving much greater * compression than if the symbolic link was compressed individually. */ #include <linux/fs.h> #include <linux/vfs.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/pagemap.h> #include <linux/xattr.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs_fs_i.h" #include "squashfs.h" #include "xattr.h" static int squashfs_symlink_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct super_block *sb = inode->i_sb; struct squashfs_sb_info *msblk = sb->s_fs_info; int index = page->index << PAGE_CACHE_SHIFT; u64 block = squashfs_i(inode)->start; int offset = squashfs_i(inode)->offset; int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE); int bytes, copied; void *pageaddr; struct squashfs_cache_entry *entry; TRACE("Entered squashfs_symlink_readpage, page index %ld, start block " "%llx, offset %x\n", page->index, block, offset); /* * Skip index bytes into symlink metadata. */ if (index) { bytes = squashfs_read_metadata(sb, NULL, &block, &offset, index); if (bytes < 0) { ERROR("Unable to read symlink [%llx:%x]\n", squashfs_i(inode)->start, squashfs_i(inode)->offset); goto error_out; } } /* * Read length bytes from symlink metadata. Squashfs_read_metadata * is not used here because it can sleep and we want to use * kmap_atomic to map the page. Instead call the underlying * squashfs_cache_get routine. As length bytes may overlap metadata * blocks, we may need to call squashfs_cache_get multiple times. */ for (bytes = 0; bytes < length; offset = 0, bytes += copied) { entry = squashfs_cache_get(sb, msblk->block_cache, block, 0); if (entry->error) { ERROR("Unable to read symlink [%llx:%x]\n", squashfs_i(inode)->start, squashfs_i(inode)->offset); squashfs_cache_put(entry); goto error_out; } pageaddr = kmap_atomic(page); copied = squashfs_copy_data(pageaddr + bytes, entry, offset, length - bytes); if (copied == length - bytes) memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length); else block = entry->next_index; kunmap_atomic(pageaddr); squashfs_cache_put(entry); } flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); return 0; error_out: SetPageError(page); unlock_page(page); return 0; } const struct address_space_operations squashfs_symlink_aops = { .readpage = squashfs_symlink_readpage }; const struct inode_operations squashfs_symlink_inode_ops = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .getxattr = generic_getxattr, .listxattr = squashfs_listxattr };
steev/luna-kernel
fs/squashfs/symlink.c
C
gpl-2.0
3,870
/* * Copyright (C) International Business Machines Corp., 2000-2005 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * jfs_xtree.c: extent allocation descriptor B+-tree manager */ #include <linux/fs.h> #include <linux/module.h> #include <linux/quotaops.h> #include <linux/seq_file.h> #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_metapage.h" #include "jfs_dmap.h" #include "jfs_dinode.h" #include "jfs_superblock.h" #include "jfs_debug.h" /* * xtree local flag */ #define XT_INSERT 0x00000001 /* * xtree key/entry comparison: extent offset * * return: * -1: k < start of extent * 0: start_of_extent <= k <= end_of_extent * 1: k > end_of_extent */ #define XT_CMP(CMP, K, X, OFFSET64)\ {\ OFFSET64 = offsetXAD(X);\ (CMP) = ((K) >= OFFSET64 + lengthXAD(X)) ? 1 :\ ((K) < OFFSET64) ? -1 : 0;\ } /* write a xad entry */ #define XT_PUTENTRY(XAD, FLAG, OFF, LEN, ADDR)\ {\ (XAD)->flag = (FLAG);\ XADoffset((XAD), (OFF));\ XADlength((XAD), (LEN));\ XADaddress((XAD), (ADDR));\ } #define XT_PAGE(IP, MP) BT_PAGE(IP, MP, xtpage_t, i_xtroot) /* get page buffer for specified block address */ /* ToDo: Replace this ugly macro with a function */ #define XT_GETPAGE(IP, BN, MP, SIZE, P, RC)\ {\ BT_GETPAGE(IP, BN, MP, xtpage_t, SIZE, P, RC, i_xtroot)\ if (!(RC))\ {\ if ((le16_to_cpu((P)->header.nextindex) < XTENTRYSTART) ||\ (le16_to_cpu((P)->header.nextindex) > le16_to_cpu((P)->header.maxentry)) ||\ (le16_to_cpu((P)->header.maxentry) > (((BN)==0)?XTROOTMAXSLOT:PSIZE>>L2XTSLOTSIZE)))\ {\ jfs_error((IP)->i_sb, "XT_GETPAGE: xtree page corrupt");\ BT_PUTPAGE(MP);\ MP = NULL;\ RC = -EIO;\ }\ }\ } /* for consistency */ #define XT_PUTPAGE(MP) BT_PUTPAGE(MP) #define XT_GETSEARCH(IP, LEAF, BN, MP, P, INDEX) \ BT_GETSEARCH(IP, LEAF, BN, MP, xtpage_t, P, INDEX, i_xtroot) /* xtree entry parameter descriptor */ struct xtsplit { struct metapage *mp; s16 index; u8 flag; s64 off; s64 addr; int len; struct pxdlist *pxdlist; }; /* * statistics */ #ifdef CONFIG_JFS_STATISTICS static struct { uint search; uint fastSearch; uint split; } xtStat; #endif /* * forward references */ static int xtSearch(struct inode *ip, s64 xoff, s64 *next, int *cmpp, struct btstack * btstack, int flag); static int xtSplitUp(tid_t tid, struct inode *ip, struct xtsplit * split, struct btstack * btstack); static int xtSplitPage(tid_t tid, struct inode *ip, struct xtsplit * split, struct metapage ** rmpp, s64 * rbnp); static int xtSplitRoot(tid_t tid, struct inode *ip, struct xtsplit * split, struct metapage ** rmpp); #ifdef _STILL_TO_PORT static int xtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp, xtpage_t * fp, struct btstack * btstack); static int xtSearchNode(struct inode *ip, xad_t * xad, int *cmpp, struct btstack * btstack, int flag); static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * fp); #endif /* _STILL_TO_PORT */ /* * xtLookup() * * function: map a single page into a physical extent; */ int xtLookup(struct inode *ip, s64 lstart, s64 llen, int *pflag, s64 * paddr, s32 * plen, int no_check) { int rc = 0; struct btstack btstack; int cmp; s64 bn; struct metapage *mp; xtpage_t *p; int index; xad_t *xad; s64 next, size, xoff, xend; int xlen; s64 xaddr; *paddr = 0; *plen = llen; if (!no_check) { /* is lookup offset beyond eof ? */ size = ((u64) ip->i_size + (JFS_SBI(ip->i_sb)->bsize - 1)) >> JFS_SBI(ip->i_sb)->l2bsize; if (lstart >= size) return 0; } /* * search for the xad entry covering the logical extent */ //search: if ((rc = xtSearch(ip, lstart, &next, &cmp, &btstack, 0))) { jfs_err("xtLookup: xtSearch returned %d", rc); return rc; } /* * compute the physical extent covering logical extent * * N.B. search may have failed (e.g., hole in sparse file), * and returned the index of the next entry. */ /* retrieve search result */ XT_GETSEARCH(ip, btstack.top, bn, mp, p, index); /* is xad found covering start of logical extent ? * lstart is a page start address, * i.e., lstart cannot start in a hole; */ if (cmp) { if (next) *plen = min(next - lstart, llen); goto out; } /* * lxd covered by xad */ xad = &p->xad[index]; xoff = offsetXAD(xad); xlen = lengthXAD(xad); xend = xoff + xlen; xaddr = addressXAD(xad); /* initialize new pxd */ *pflag = xad->flag; *paddr = xaddr + (lstart - xoff); /* a page must be fully covered by an xad */ *plen = min(xend - lstart, llen); out: XT_PUTPAGE(mp); return rc; } /* * xtSearch() * * function: search for the xad entry covering specified offset. * * parameters: * ip - file object; * xoff - extent offset; * nextp - address of next extent (if any) for search miss * cmpp - comparison result: * btstack - traverse stack; * flag - search process flag (XT_INSERT); * * returns: * btstack contains (bn, index) of search path traversed to the entry. * *cmpp is set to result of comparison with the entry returned. * the page containing the entry is pinned at exit. */ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp, int *cmpp, struct btstack * btstack, int flag) { struct jfs_inode_info *jfs_ip = JFS_IP(ip); int rc = 0; int cmp = 1; /* init for empty page */ s64 bn; /* block number */ struct metapage *mp; /* page buffer */ xtpage_t *p; /* page */ xad_t *xad; int base, index, lim, btindex; struct btframe *btsp; int nsplit = 0; /* number of pages to split */ s64 t64; s64 next = 0; INCREMENT(xtStat.search); BT_CLR(btstack); btstack->nsplit = 0; /* * search down tree from root: * * between two consecutive entries of <Ki, Pi> and <Kj, Pj> of * internal page, child page Pi contains entry with k, Ki <= K < Kj. * * if entry with search key K is not found * internal page search find the entry with largest key Ki * less than K which point to the child page to search; * leaf page search find the entry with smallest key Kj * greater than K so that the returned index is the position of * the entry to be shifted right for insertion of new entry. * for empty tree, search key is greater than any key of the tree. * * by convention, root bn = 0. */ for (bn = 0;;) { /* get/pin the page to search */ XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) return rc; /* try sequential access heuristics with the previous * access entry in target leaf page: * once search narrowed down into the target leaf, * key must either match an entry in the leaf or * key entry does not exist in the tree; */ //fastSearch: if ((jfs_ip->btorder & BT_SEQUENTIAL) && (p->header.flag & BT_LEAF) && (index = jfs_ip->btindex) < le16_to_cpu(p->header.nextindex)) { xad = &p->xad[index]; t64 = offsetXAD(xad); if (xoff < t64 + lengthXAD(xad)) { if (xoff >= t64) { *cmpp = 0; goto out; } /* stop sequential access heuristics */ goto binarySearch; } else { /* (t64 + lengthXAD(xad)) <= xoff */ /* try next sequential entry */ index++; if (index < le16_to_cpu(p->header.nextindex)) { xad++; t64 = offsetXAD(xad); if (xoff < t64 + lengthXAD(xad)) { if (xoff >= t64) { *cmpp = 0; goto out; } /* miss: key falls between * previous and this entry */ *cmpp = 1; next = t64; goto out; } /* (xoff >= t64 + lengthXAD(xad)); * matching entry may be further out: * stop heuristic search */ /* stop sequential access heuristics */ goto binarySearch; } /* (index == p->header.nextindex); * miss: key entry does not exist in * the target leaf/tree */ *cmpp = 1; goto out; } /* * if hit, return index of the entry found, and * if miss, where new entry with search key is * to be inserted; */ out: /* compute number of pages to split */ if (flag & XT_INSERT) { if (p->header.nextindex == /* little-endian */ p->header.maxentry) nsplit++; else nsplit = 0; btstack->nsplit = nsplit; } /* save search result */ btsp = btstack->top; btsp->bn = bn; btsp->index = index; btsp->mp = mp; /* update sequential access heuristics */ jfs_ip->btindex = index; if (nextp) *nextp = next; INCREMENT(xtStat.fastSearch); return 0; } /* well, ... full search now */ binarySearch: lim = le16_to_cpu(p->header.nextindex) - XTENTRYSTART; /* * binary search with search key K on the current page */ for (base = XTENTRYSTART; lim; lim >>= 1) { index = base + (lim >> 1); XT_CMP(cmp, xoff, &p->xad[index], t64); if (cmp == 0) { /* * search hit */ /* search hit - leaf page: * return the entry found */ if (p->header.flag & BT_LEAF) { *cmpp = cmp; /* compute number of pages to split */ if (flag & XT_INSERT) { if (p->header.nextindex == p->header.maxentry) nsplit++; else nsplit = 0; btstack->nsplit = nsplit; } /* save search result */ btsp = btstack->top; btsp->bn = bn; btsp->index = index; btsp->mp = mp; /* init sequential access heuristics */ btindex = jfs_ip->btindex; if (index == btindex || index == btindex + 1) jfs_ip->btorder = BT_SEQUENTIAL; else jfs_ip->btorder = BT_RANDOM; jfs_ip->btindex = index; return 0; } /* search hit - internal page: * descend/search its child page */ if (index < le16_to_cpu(p->header.nextindex)-1) next = offsetXAD(&p->xad[index + 1]); goto next; } if (cmp > 0) { base = index + 1; --lim; } } /* * search miss * * base is the smallest index with key (Kj) greater than * search key (K) and may be zero or maxentry index. */ if (base < le16_to_cpu(p->header.nextindex)) next = offsetXAD(&p->xad[base]); /* * search miss - leaf page: * * return location of entry (base) where new entry with * search key K is to be inserted. */ if (p->header.flag & BT_LEAF) { *cmpp = cmp; /* compute number of pages to split */ if (flag & XT_INSERT) { if (p->header.nextindex == p->header.maxentry) nsplit++; else nsplit = 0; btstack->nsplit = nsplit; } /* save search result */ btsp = btstack->top; btsp->bn = bn; btsp->index = base; btsp->mp = mp; /* init sequential access heuristics */ btindex = jfs_ip->btindex; if (base == btindex || base == btindex + 1) jfs_ip->btorder = BT_SEQUENTIAL; else jfs_ip->btorder = BT_RANDOM; jfs_ip->btindex = base; if (nextp) *nextp = next; return 0; } /* * search miss - non-leaf page: * * if base is non-zero, decrement base by one to get the parent * entry of the child page to search. */ index = base ? base - 1 : base; /* * go down to child page */ next: /* update number of pages to split */ if (p->header.nextindex == p->header.maxentry) nsplit++; else nsplit = 0; /* push (bn, index) of the parent page/entry */ if (BT_STACK_FULL(btstack)) { jfs_error(ip->i_sb, "stack overrun in xtSearch!"); XT_PUTPAGE(mp); return -EIO; } BT_PUSH(btstack, bn, index); /* get the child page block number */ bn = addressXAD(&p->xad[index]); /* unpin the parent page */ XT_PUTPAGE(mp); } } /* * xtInsert() * * function: * * parameter: * tid - transaction id; * ip - file object; * xflag - extent flag (XAD_NOTRECORDED): * xoff - extent offset; * xlen - extent length; * xaddrp - extent address pointer (in/out): * if (*xaddrp) * caller allocated data extent at *xaddrp; * else * allocate data extent and return its xaddr; * flag - * * return: */ int xtInsert(tid_t tid, /* transaction id */ struct inode *ip, int xflag, s64 xoff, s32 xlen, s64 * xaddrp, int flag) { int rc = 0; s64 xaddr, hint; struct metapage *mp; /* meta-page buffer */ xtpage_t *p; /* base B+-tree index page */ s64 bn; int index, nextindex; struct btstack btstack; /* traverse stack */ struct xtsplit split; /* split information */ xad_t *xad; int cmp; s64 next; struct tlock *tlck; struct xtlock *xtlck; jfs_info("xtInsert: nxoff:0x%lx nxlen:0x%x", (ulong) xoff, xlen); /* * search for the entry location at which to insert: * * xtFastSearch() and xtSearch() both returns (leaf page * pinned, index at which to insert). * n.b. xtSearch() may return index of maxentry of * the full page. */ if ((rc = xtSearch(ip, xoff, &next, &cmp, &btstack, XT_INSERT))) return rc; /* retrieve search result */ XT_GETSEARCH(ip, btstack.top, bn, mp, p, index); /* This test must follow XT_GETSEARCH since mp must be valid if * we branch to out: */ if ((cmp == 0) || (next && (xlen > next - xoff))) { rc = -EEXIST; goto out; } /* * allocate data extent requested * * allocation hint: last xad */ if ((xaddr = *xaddrp) == 0) { if (index > XTENTRYSTART) { xad = &p->xad[index - 1]; hint = addressXAD(xad) + lengthXAD(xad) - 1; } else hint = 0; if ((rc = dquot_alloc_block(ip, xlen))) goto out; if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) { dquot_free_block(ip, xlen); goto out; } } /* * insert entry for new extent */ xflag |= XAD_NEW; /* * if the leaf page is full, split the page and * propagate up the router entry for the new page from split * * The xtSplitUp() will insert the entry and unpin the leaf page. */ nextindex = le16_to_cpu(p->header.nextindex); if (nextindex == le16_to_cpu(p->header.maxentry)) { split.mp = mp; split.index = index; split.flag = xflag; split.off = xoff; split.len = xlen; split.addr = xaddr; split.pxdlist = NULL; if ((rc = xtSplitUp(tid, ip, &split, &btstack))) { /* undo data extent allocation */ if (*xaddrp == 0) { dbFree(ip, xaddr, (s64) xlen); dquot_free_block(ip, xlen); } return rc; } *xaddrp = xaddr; return 0; } /* * insert the new entry into the leaf page */ /* * acquire a transaction lock on the leaf page; * * action: xad insertion/extension; */ BT_MARK_DIRTY(mp, ip); /* if insert into middle, shift right remaining entries. */ if (index < nextindex) memmove(&p->xad[index + 1], &p->xad[index], (nextindex - index) * sizeof(xad_t)); /* insert the new entry: mark the entry NEW */ xad = &p->xad[index]; XT_PUTENTRY(xad, xflag, xoff, xlen, xaddr); /* advance next available entry index */ le16_add_cpu(&p->header.nextindex, 1); /* Don't log it if there are no links to the file */ if (!test_cflag(COMMIT_Nolink, ip)) { tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW); xtlck = (struct xtlock *) & tlck->lock; xtlck->lwm.offset = (xtlck->lwm.offset) ? min(index, (int)xtlck->lwm.offset) : index; xtlck->lwm.length = le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset; } *xaddrp = xaddr; out: /* unpin the leaf page */ XT_PUTPAGE(mp); return rc; } /* * xtSplitUp() * * function: * split full pages as propagating insertion up the tree * * parameter: * tid - transaction id; * ip - file object; * split - entry parameter descriptor; * btstack - traverse stack from xtSearch() * * return: */ static int xtSplitUp(tid_t tid, struct inode *ip, struct xtsplit * split, struct btstack * btstack) { int rc = 0; struct metapage *smp; xtpage_t *sp; /* split page */ struct metapage *rmp; s64 rbn; /* new right page block number */ struct metapage *rcmp; xtpage_t *rcp; /* right child page */ s64 rcbn; /* right child page block number */ int skip; /* index of entry of insertion */ int nextindex; /* next available entry index of p */ struct btframe *parent; /* parent page entry on traverse stack */ xad_t *xad; s64 xaddr; int xlen; int nsplit; /* number of pages split */ struct pxdlist pxdlist; pxd_t *pxd; struct tlock *tlck; struct xtlock *xtlck; smp = split->mp; sp = XT_PAGE(ip, smp); /* is inode xtree root extension/inline EA area free ? */ if ((sp->header.flag & BT_ROOT) && (!S_ISDIR(ip->i_mode)) && (le16_to_cpu(sp->header.maxentry) < XTROOTMAXSLOT) && (JFS_IP(ip)->mode2 & INLINEEA)) { sp->header.maxentry = cpu_to_le16(XTROOTMAXSLOT); JFS_IP(ip)->mode2 &= ~INLINEEA; BT_MARK_DIRTY(smp, ip); /* * acquire a transaction lock on the leaf page; * * action: xad insertion/extension; */ /* if insert into middle, shift right remaining entries. */ skip = split->index; nextindex = le16_to_cpu(sp->header.nextindex); if (skip < nextindex) memmove(&sp->xad[skip + 1], &sp->xad[skip], (nextindex - skip) * sizeof(xad_t)); /* insert the new entry: mark the entry NEW */ xad = &sp->xad[skip]; XT_PUTENTRY(xad, split->flag, split->off, split->len, split->addr); /* advance next available entry index */ le16_add_cpu(&sp->header.nextindex, 1); /* Don't log it if there are no links to the file */ if (!test_cflag(COMMIT_Nolink, ip)) { tlck = txLock(tid, ip, smp, tlckXTREE | tlckGROW); xtlck = (struct xtlock *) & tlck->lock; xtlck->lwm.offset = (xtlck->lwm.offset) ? min(skip, (int)xtlck->lwm.offset) : skip; xtlck->lwm.length = le16_to_cpu(sp->header.nextindex) - xtlck->lwm.offset; } return 0; } /* * allocate new index blocks to cover index page split(s) * * allocation hint: ? */ if (split->pxdlist == NULL) { nsplit = btstack->nsplit; split->pxdlist = &pxdlist; pxdlist.maxnpxd = pxdlist.npxd = 0; pxd = &pxdlist.pxd[0]; xlen = JFS_SBI(ip->i_sb)->nbperpage; for (; nsplit > 0; nsplit--, pxd++) { if ((rc = dbAlloc(ip, (s64) 0, (s64) xlen, &xaddr)) == 0) { PXDaddress(pxd, xaddr); PXDlength(pxd, xlen); pxdlist.maxnpxd++; continue; } /* undo allocation */ XT_PUTPAGE(smp); return rc; } } /* * Split leaf page <sp> into <sp> and a new right page <rp>. * * The split routines insert the new entry into the leaf page, * and acquire txLock as appropriate. * return <rp> pinned and its block number <rpbn>. */ rc = (sp->header.flag & BT_ROOT) ? xtSplitRoot(tid, ip, split, &rmp) : xtSplitPage(tid, ip, split, &rmp, &rbn); XT_PUTPAGE(smp); if (rc) return -EIO; /* * propagate up the router entry for the leaf page just split * * insert a router entry for the new page into the parent page, * propagate the insert/split up the tree by walking back the stack * of (bn of parent page, index of child page entry in parent page) * that were traversed during the search for the page that split. * * the propagation of insert/split up the tree stops if the root * splits or the page inserted into doesn't have to split to hold * the new entry. * * the parent entry for the split page remains the same, and * a new entry is inserted at its right with the first key and * block number of the new right page. * * There are a maximum of 3 pages pinned at any time: * right child, left parent and right parent (when the parent splits) * to keep the child page pinned while working on the parent. * make sure that all pins are released at exit. */ while ((parent = BT_POP(btstack)) != NULL) { /* parent page specified by stack frame <parent> */ /* keep current child pages <rcp> pinned */ rcmp = rmp; rcbn = rbn; rcp = XT_PAGE(ip, rcmp); /* * insert router entry in parent for new right child page <rp> */ /* get/pin the parent page <sp> */ XT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc); if (rc) { XT_PUTPAGE(rcmp); return rc; } /* * The new key entry goes ONE AFTER the index of parent entry, * because the split was to the right. */ skip = parent->index + 1; /* * split or shift right remaining entries of the parent page */ nextindex = le16_to_cpu(sp->header.nextindex); /* * parent page is full - split the parent page */ if (nextindex == le16_to_cpu(sp->header.maxentry)) { /* init for parent page split */ split->mp = smp; split->index = skip; /* index at insert */ split->flag = XAD_NEW; split->off = offsetXAD(&rcp->xad[XTENTRYSTART]); split->len = JFS_SBI(ip->i_sb)->nbperpage; split->addr = rcbn; /* unpin previous right child page */ XT_PUTPAGE(rcmp); /* The split routines insert the new entry, * and acquire txLock as appropriate. * return <rp> pinned and its block number <rpbn>. */ rc = (sp->header.flag & BT_ROOT) ? xtSplitRoot(tid, ip, split, &rmp) : xtSplitPage(tid, ip, split, &rmp, &rbn); if (rc) { XT_PUTPAGE(smp); return rc; } XT_PUTPAGE(smp); /* keep new child page <rp> pinned */ } /* * parent page is not full - insert in parent page */ else { /* * insert router entry in parent for the right child * page from the first entry of the right child page: */ /* * acquire a transaction lock on the parent page; * * action: router xad insertion; */ BT_MARK_DIRTY(smp, ip); /* * if insert into middle, shift right remaining entries */ if (skip < nextindex) memmove(&sp->xad[skip + 1], &sp->xad[skip], (nextindex - skip) << L2XTSLOTSIZE); /* insert the router entry */ xad = &sp->xad[skip]; XT_PUTENTRY(xad, XAD_NEW, offsetXAD(&rcp->xad[XTENTRYSTART]), JFS_SBI(ip->i_sb)->nbperpage, rcbn); /* advance next available entry index. */ le16_add_cpu(&sp->header.nextindex, 1); /* Don't log it if there are no links to the file */ if (!test_cflag(COMMIT_Nolink, ip)) { tlck = txLock(tid, ip, smp, tlckXTREE | tlckGROW); xtlck = (struct xtlock *) & tlck->lock; xtlck->lwm.offset = (xtlck->lwm.offset) ? min(skip, (int)xtlck->lwm.offset) : skip; xtlck->lwm.length = le16_to_cpu(sp->header.nextindex) - xtlck->lwm.offset; } /* unpin parent page */ XT_PUTPAGE(smp); /* exit propagate up */ break; } } /* unpin current right page */ XT_PUTPAGE(rmp); return 0; } /* * xtSplitPage() * * function: * split a full non-root page into * original/split/left page and new right page * i.e., the original/split page remains as left page. * * parameter: * int tid, * struct inode *ip, * struct xtsplit *split, * struct metapage **rmpp, * u64 *rbnp, * * return: * Pointer to page in which to insert or NULL on error. */ static int xtSplitPage(tid_t tid, struct inode *ip, struct xtsplit * split, struct metapage ** rmpp, s64 * rbnp) { int rc = 0; struct metapage *smp; xtpage_t *sp; struct metapage *rmp; xtpage_t *rp; /* new right page allocated */ s64 rbn; /* new right page block number */ struct metapage *mp; xtpage_t *p; s64 nextbn; int skip, maxentry, middle, righthalf, n; xad_t *xad; struct pxdlist *pxdlist; pxd_t *pxd; struct tlock *tlck; struct xtlock *sxtlck = NULL, *rxtlck = NULL; int quota_allocation = 0; smp = split->mp; sp = XT_PAGE(ip, smp); INCREMENT(xtStat.split); pxdlist = split->pxdlist; pxd = &pxdlist->pxd[pxdlist->npxd]; pxdlist->npxd++; rbn = addressPXD(pxd); /* Allocate blocks to quota. */ rc = dquot_alloc_block(ip, lengthPXD(pxd)); if (rc) goto clean_up; quota_allocation += lengthPXD(pxd); /* * allocate the new right page for the split */ rmp = get_metapage(ip, rbn, PSIZE, 1); if (rmp == NULL) { rc = -EIO; goto clean_up; } jfs_info("xtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp); BT_MARK_DIRTY(rmp, ip); /* * action: new page; */ rp = (xtpage_t *) rmp->data; rp->header.self = *pxd; rp->header.flag = sp->header.flag & BT_TYPE; rp->header.maxentry = sp->header.maxentry; /* little-endian */ rp->header.nextindex = cpu_to_le16(XTENTRYSTART); BT_MARK_DIRTY(smp, ip); /* Don't log it if there are no links to the file */ if (!test_cflag(COMMIT_Nolink, ip)) { /* * acquire a transaction lock on the new right page; */ tlck = txLock(tid, ip, rmp, tlckXTREE | tlckNEW); rxtlck = (struct xtlock *) & tlck->lock; rxtlck->lwm.offset = XTENTRYSTART; /* * acquire a transaction lock on the split page */ tlck = txLock(tid, ip, smp, tlckXTREE | tlckGROW); sxtlck = (struct xtlock *) & tlck->lock; } /* * initialize/update sibling pointers of <sp> and <rp> */ nextbn = le64_to_cpu(sp->header.next); rp->header.next = cpu_to_le64(nextbn); rp->header.prev = cpu_to_le64(addressPXD(&sp->header.self)); sp->header.next = cpu_to_le64(rbn); skip = split->index; /* * sequential append at tail (after last entry of last page) * * if splitting the last page on a level because of appending * a entry to it (skip is maxentry), it's likely that the access is * sequential. adding an empty page on the side of the level is less * work and can push the fill factor much higher than normal. * if we're wrong it's no big deal - we will do the split the right * way next time. * (it may look like it's equally easy to do a similar hack for * reverse sorted data, that is, split the tree left, but it's not. * Be my guest.) */ if (nextbn == 0 && skip == le16_to_cpu(sp->header.maxentry)) { /* * acquire a transaction lock on the new/right page; * * action: xad insertion; */ /* insert entry at the first entry of the new right page */ xad = &rp->xad[XTENTRYSTART]; XT_PUTENTRY(xad, split->flag, split->off, split->len, split->addr); rp->header.nextindex = cpu_to_le16(XTENTRYSTART + 1); if (!test_cflag(COMMIT_Nolink, ip)) { /* rxtlck->lwm.offset = XTENTRYSTART; */ rxtlck->lwm.length = 1; } *rmpp = rmp; *rbnp = rbn; jfs_info("xtSplitPage: sp:0x%p rp:0x%p", sp, rp); return 0; } /* * non-sequential insert (at possibly middle page) */ /* * update previous pointer of old next/right page of <sp> */ if (nextbn != 0) { XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc); if (rc) { XT_PUTPAGE(rmp); goto clean_up; } BT_MARK_DIRTY(mp, ip); /* * acquire a transaction lock on the next page; * * action:sibling pointer update; */ if (!test_cflag(COMMIT_Nolink, ip)) tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK); p->header.prev = cpu_to_le64(rbn); /* sibling page may have been updated previously, or * it may be updated later; */ XT_PUTPAGE(mp); } /* * split the data between the split and new/right pages */ maxentry = le16_to_cpu(sp->header.maxentry); middle = maxentry >> 1; righthalf = maxentry - middle; /* * skip index in old split/left page - insert into left page: */ if (skip <= middle) { /* move right half of split page to the new right page */ memmove(&rp->xad[XTENTRYSTART], &sp->xad[middle], righthalf << L2XTSLOTSIZE); /* shift right tail of left half to make room for new entry */ if (skip < middle) memmove(&sp->xad[skip + 1], &sp->xad[skip], (middle - skip) << L2XTSLOTSIZE); /* insert new entry */ xad = &sp->xad[skip]; XT_PUTENTRY(xad, split->flag, split->off, split->len, split->addr); /* update page header */ sp->header.nextindex = cpu_to_le16(middle + 1); if (!test_cflag(COMMIT_Nolink, ip)) { sxtlck->lwm.offset = (sxtlck->lwm.offset) ? min(skip, (int)sxtlck->lwm.offset) : skip; } rp->header.nextindex = cpu_to_le16(XTENTRYSTART + righthalf); } /* * skip index in new right page - insert into right page: */ else { /* move left head of right half to right page */ n = skip - middle; memmove(&rp->xad[XTENTRYSTART], &sp->xad[middle], n << L2XTSLOTSIZE); /* insert new entry */ n += XTENTRYSTART; xad = &rp->xad[n]; XT_PUTENTRY(xad, split->flag, split->off, split->len, split->addr); /* move right tail of right half to right page */ if (skip < maxentry) memmove(&rp->xad[n + 1], &sp->xad[skip], (maxentry - skip) << L2XTSLOTSIZE); /* update page header */ sp->header.nextindex = cpu_to_le16(middle); if (!test_cflag(COMMIT_Nolink, ip)) { sxtlck->lwm.offset = (sxtlck->lwm.offset) ? min(middle, (int)sxtlck->lwm.offset) : middle; } rp->header.nextindex = cpu_to_le16(XTENTRYSTART + righthalf + 1); } if (!test_cflag(COMMIT_Nolink, ip)) { sxtlck->lwm.length = le16_to_cpu(sp->header.nextindex) - sxtlck->lwm.offset; /* rxtlck->lwm.offset = XTENTRYSTART; */ rxtlck->lwm.length = le16_to_cpu(rp->header.nextindex) - XTENTRYSTART; } *rmpp = rmp; *rbnp = rbn; jfs_info("xtSplitPage: sp:0x%p rp:0x%p", sp, rp); return rc; clean_up: /* Rollback quota allocation. */ if (quota_allocation) dquot_free_block(ip, quota_allocation); return (rc); } /* * xtSplitRoot() * * function: * split the full root page into original/root/split page and new * right page * i.e., root remains fixed in tree anchor (inode) and the root is * copied to a single new right child page since root page << * non-root page, and the split root page contains a single entry * for the new right child page. * * parameter: * int tid, * struct inode *ip, * struct xtsplit *split, * struct metapage **rmpp) * * return: * Pointer to page in which to insert or NULL on error. */ static int xtSplitRoot(tid_t tid, struct inode *ip, struct xtsplit * split, struct metapage ** rmpp) { xtpage_t *sp; struct metapage *rmp; xtpage_t *rp; s64 rbn; int skip, nextindex; xad_t *xad; pxd_t *pxd; struct pxdlist *pxdlist; struct tlock *tlck; struct xtlock *xtlck; int rc; sp = &JFS_IP(ip)->i_xtroot; INCREMENT(xtStat.split); /* * allocate a single (right) child page */ pxdlist = split->pxdlist; pxd = &pxdlist->pxd[pxdlist->npxd]; pxdlist->npxd++; rbn = addressPXD(pxd); rmp = get_metapage(ip, rbn, PSIZE, 1); if (rmp == NULL) return -EIO; /* Allocate blocks to quota. */ rc = dquot_alloc_block(ip, lengthPXD(pxd)); if (rc) { release_metapage(rmp); return rc; } jfs_info("xtSplitRoot: ip:0x%p rmp:0x%p", ip, rmp); /* * acquire a transaction lock on the new right page; * * action: new page; */ BT_MARK_DIRTY(rmp, ip); rp = (xtpage_t *) rmp->data; rp->header.flag = (sp->header.flag & BT_LEAF) ? BT_LEAF : BT_INTERNAL; rp->header.self = *pxd; rp->header.nextindex = cpu_to_le16(XTENTRYSTART); rp->header.maxentry = cpu_to_le16(PSIZE >> L2XTSLOTSIZE); /* initialize sibling pointers */ rp->header.next = 0; rp->header.prev = 0; /* * copy the in-line root page into new right page extent */ nextindex = le16_to_cpu(sp->header.maxentry); memmove(&rp->xad[XTENTRYSTART], &sp->xad[XTENTRYSTART], (nextindex - XTENTRYSTART) << L2XTSLOTSIZE); /* * insert the new entry into the new right/child page * (skip index in the new right page will not change) */ skip = split->index; /* if insert into middle, shift right remaining entries */ if (skip != nextindex) memmove(&rp->xad[skip + 1], &rp->xad[skip], (nextindex - skip) * sizeof(xad_t)); xad = &rp->xad[skip]; XT_PUTENTRY(xad, split->flag, split->off, split->len, split->addr); /* update page header */ rp->header.nextindex = cpu_to_le16(nextindex + 1); if (!test_cflag(COMMIT_Nolink, ip)) { tlck = txLock(tid, ip, rmp, tlckXTREE | tlckNEW); xtlck = (struct xtlock *) & tlck->lock; xtlck->lwm.offset = XTENTRYSTART; xtlck->lwm.length = le16_to_cpu(rp->header.nextindex) - XTENTRYSTART; } /* * reset the root * * init root with the single entry for the new right page * set the 1st entry offset to 0, which force the left-most key * at any level of the tree to be less than any search key. */ /* * acquire a transaction lock on the root page (in-memory inode); * * action: root split; */ BT_MARK_DIRTY(split->mp, ip); xad = &sp->xad[XTENTRYSTART]; XT_PUTENTRY(xad, XAD_NEW, 0, JFS_SBI(ip->i_sb)->nbperpage, rbn); /* update page header of root */ sp->header.flag &= ~BT_LEAF; sp->header.flag |= BT_INTERNAL; sp->header.nextindex = cpu_to_le16(XTENTRYSTART + 1); if (!test_cflag(COMMIT_Nolink, ip)) { tlck = txLock(tid, ip, split->mp, tlckXTREE | tlckGROW); xtlck = (struct xtlock *) & tlck->lock; xtlck->lwm.offset = XTENTRYSTART; xtlck->lwm.length = 1; } *rmpp = rmp; jfs_info("xtSplitRoot: sp:0x%p rp:0x%p", sp, rp); return 0; } /* * xtExtend() * * function: extend in-place; * * note: existing extent may or may not have been committed. * caller is responsible for pager buffer cache update, and * working block allocation map update; * update pmap: alloc whole extended extent; */ int xtExtend(tid_t tid, /* transaction id */ struct inode *ip, s64 xoff, /* delta extent offset */ s32 xlen, /* delta extent length */ int flag) { int rc = 0; int cmp; struct metapage *mp; /* meta-page buffer */ xtpage_t *p; /* base B+-tree index page */ s64 bn; int index, nextindex, len; struct btstack btstack; /* traverse stack */ struct xtsplit split; /* split information */ xad_t *xad; s64 xaddr; struct tlock *tlck; struct xtlock *xtlck = NULL; jfs_info("xtExtend: nxoff:0x%lx nxlen:0x%x", (ulong) xoff, xlen); /* there must exist extent to be extended */ if ((rc = xtSearch(ip, xoff - 1, NULL, &cmp, &btstack, XT_INSERT))) return rc; /* retrieve search result */ XT_GETSEARCH(ip, btstack.top, bn, mp, p, index); if (cmp != 0) { XT_PUTPAGE(mp); jfs_error(ip->i_sb, "xtExtend: xtSearch did not find extent"); return -EIO; } /* extension must be contiguous */ xad = &p->xad[index]; if ((offsetXAD(xad) + lengthXAD(xad)) != xoff) { XT_PUTPAGE(mp); jfs_error(ip->i_sb, "xtExtend: extension is not contiguous"); return -EIO; } /* * acquire a transaction lock on the leaf page; * * action: xad insertion/extension; */ BT_MARK_DIRTY(mp, ip); if (!test_cflag(COMMIT_Nolink, ip)) { tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW); xtlck = (struct xtlock *) & tlck->lock; } /* extend will overflow extent ? */ xlen = lengthXAD(xad) + xlen; if ((len = xlen - MAXXLEN) <= 0) goto extendOld; /* * extent overflow: insert entry for new extent */ //insertNew: xoff = offsetXAD(xad) + MAXXLEN; xaddr = addressXAD(xad) + MAXXLEN; nextindex = le16_to_cpu(p->header.nextindex); /* * if the leaf page is full, insert the new entry and * propagate up the router entry for the new page from split * * The xtSplitUp() will insert the entry and unpin the leaf page. */ if (nextindex == le16_to_cpu(p->header.maxentry)) { /* xtSpliUp() unpins leaf pages */ split.mp = mp; split.index = index + 1; split.flag = XAD_NEW; split.off = xoff; /* split offset */ split.len = len; split.addr = xaddr; split.pxdlist = NULL; if ((rc = xtSplitUp(tid, ip, &split, &btstack))) return rc; /* get back old page */ XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) return rc; /* * if leaf root has been split, original root has been * copied to new child page, i.e., original entry now * resides on the new child page; */ if (p->header.flag & BT_INTERNAL) { ASSERT(p->header.nextindex == cpu_to_le16(XTENTRYSTART + 1)); xad = &p->xad[XTENTRYSTART]; bn = addressXAD(xad); XT_PUTPAGE(mp); /* get new child page */ XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) return rc; BT_MARK_DIRTY(mp, ip); if (!test_cflag(COMMIT_Nolink, ip)) { tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW); xtlck = (struct xtlock *) & tlck->lock; } } } /* * insert the new entry into the leaf page */ else { /* insert the new entry: mark the entry NEW */ xad = &p->xad[index + 1]; XT_PUTENTRY(xad, XAD_NEW, xoff, len, xaddr); /* advance next available entry index */ le16_add_cpu(&p->header.nextindex, 1); } /* get back old entry */ xad = &p->xad[index]; xlen = MAXXLEN; /* * extend old extent */ extendOld: XADlength(xad, xlen); if (!(xad->flag & XAD_NEW)) xad->flag |= XAD_EXTENDED; if (!test_cflag(COMMIT_Nolink, ip)) { xtlck->lwm.offset = (xtlck->lwm.offset) ? min(index, (int)xtlck->lwm.offset) : index; xtlck->lwm.length = le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset; } /* unpin the leaf page */ XT_PUTPAGE(mp); return rc; } #ifdef _NOTYET /* * xtTailgate() * * function: split existing 'tail' extent * (split offset >= start offset of tail extent), and * relocate and extend the split tail half; * * note: existing extent may or may not have been committed. * caller is responsible for pager buffer cache update, and * working block allocation map update; * update pmap: free old split tail extent, alloc new extent; */ int xtTailgate(tid_t tid, /* transaction id */ struct inode *ip, s64 xoff, /* split/new extent offset */ s32 xlen, /* new extent length */ s64 xaddr, /* new extent address */ int flag) { int rc = 0; int cmp; struct metapage *mp; /* meta-page buffer */ xtpage_t *p; /* base B+-tree index page */ s64 bn; int index, nextindex, llen, rlen; struct btstack btstack; /* traverse stack */ struct xtsplit split; /* split information */ xad_t *xad; struct tlock *tlck; struct xtlock *xtlck = 0; struct tlock *mtlck; struct maplock *pxdlock; /* printf("xtTailgate: nxoff:0x%lx nxlen:0x%x nxaddr:0x%lx\n", (ulong)xoff, xlen, (ulong)xaddr); */ /* there must exist extent to be tailgated */ if ((rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, XT_INSERT))) return rc; /* retrieve search result */ XT_GETSEARCH(ip, btstack.top, bn, mp, p, index); if (cmp != 0) { XT_PUTPAGE(mp); jfs_error(ip->i_sb, "xtTailgate: couldn't find extent"); return -EIO; } /* entry found must be last entry */ nextindex = le16_to_cpu(p->header.nextindex); if (index != nextindex - 1) { XT_PUTPAGE(mp); jfs_error(ip->i_sb, "xtTailgate: the entry found is not the last entry"); return -EIO; } BT_MARK_DIRTY(mp, ip); /* * acquire tlock of the leaf page containing original entry */ if (!test_cflag(COMMIT_Nolink, ip)) { tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW); xtlck = (struct xtlock *) & tlck->lock; } /* completely replace extent ? */ xad = &p->xad[index]; /* printf("xtTailgate: xoff:0x%lx xlen:0x%x xaddr:0x%lx\n", (ulong)offsetXAD(xad), lengthXAD(xad), (ulong)addressXAD(xad)); */ if ((llen = xoff - offsetXAD(xad)) == 0) goto updateOld; /* * partially replace extent: insert entry for new extent */ //insertNew: /* * if the leaf page is full, insert the new entry and * propagate up the router entry for the new page from split * * The xtSplitUp() will insert the entry and unpin the leaf page. */ if (nextindex == le16_to_cpu(p->header.maxentry)) { /* xtSpliUp() unpins leaf pages */ split.mp = mp; split.index = index + 1; split.flag = XAD_NEW; split.off = xoff; /* split offset */ split.len = xlen; split.addr = xaddr; split.pxdlist = NULL; if ((rc = xtSplitUp(tid, ip, &split, &btstack))) return rc; /* get back old page */ XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) return rc; /* * if leaf root has been split, original root has been * copied to new child page, i.e., original entry now * resides on the new child page; */ if (p->header.flag & BT_INTERNAL) { ASSERT(p->header.nextindex == cpu_to_le16(XTENTRYSTART + 1)); xad = &p->xad[XTENTRYSTART]; bn = addressXAD(xad); XT_PUTPAGE(mp); /* get new child page */ XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) return rc; BT_MARK_DIRTY(mp, ip); if (!test_cflag(COMMIT_Nolink, ip)) { tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW); xtlck = (struct xtlock *) & tlck->lock; } } } /* * insert the new entry into the leaf page */ else { /* insert the new entry: mark the entry NEW */ xad = &p->xad[index + 1]; XT_PUTENTRY(xad, XAD_NEW, xoff, xlen, xaddr); /* advance next available entry index */ le16_add_cpu(&p->header.nextindex, 1); } /* get back old XAD */ xad = &p->xad[index]; /* * truncate/relocate old extent at split offset */ updateOld: /* update dmap for old/committed/truncated extent */ rlen = lengthXAD(xad) - llen; if (!(xad->flag & XAD_NEW)) { /* free from PWMAP at commit */ if (!test_cflag(COMMIT_Nolink, ip)) { mtlck = txMaplock(tid, ip, tlckMAP); pxdlock = (struct maplock *) & mtlck->lock; pxdlock->flag = mlckFREEPXD; PXDaddress(&pxdlock->pxd, addressXAD(xad) + llen); PXDlength(&pxdlock->pxd, rlen); pxdlock->index = 1; } } else /* free from WMAP */ dbFree(ip, addressXAD(xad) + llen, (s64) rlen); if (llen) /* truncate */ XADlength(xad, llen); else /* replace */ XT_PUTENTRY(xad, XAD_NEW, xoff, xlen, xaddr); if (!test_cflag(COMMIT_Nolink, ip)) { xtlck->lwm.offset = (xtlck->lwm.offset) ? min(index, (int)xtlck->lwm.offset) : index; xtlck->lwm.length = le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset; } /* unpin the leaf page */ XT_PUTPAGE(mp); return rc; } #endif /* _NOTYET */ /* * xtUpdate() * * function: update XAD; * * update extent for allocated_but_not_recorded or * compressed extent; * * parameter: * nxad - new XAD; * logical extent of the specified XAD must be completely * contained by an existing XAD; */ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad) { /* new XAD */ int rc = 0; int cmp; struct metapage *mp; /* meta-page buffer */ xtpage_t *p; /* base B+-tree index page */ s64 bn; int index0, index, newindex, nextindex; struct btstack btstack; /* traverse stack */ struct xtsplit split; /* split information */ xad_t *xad, *lxad, *rxad; int xflag; s64 nxoff, xoff; int nxlen, xlen, lxlen, rxlen; s64 nxaddr, xaddr; struct tlock *tlck; struct xtlock *xtlck = NULL; int newpage = 0; /* there must exist extent to be tailgated */ nxoff = offsetXAD(nxad); nxlen = lengthXAD(nxad); nxaddr = addressXAD(nxad); if ((rc = xtSearch(ip, nxoff, NULL, &cmp, &btstack, XT_INSERT))) return rc; /* retrieve search result */ XT_GETSEARCH(ip, btstack.top, bn, mp, p, index0); if (cmp != 0) { XT_PUTPAGE(mp); jfs_error(ip->i_sb, "xtUpdate: Could not find extent"); return -EIO; } BT_MARK_DIRTY(mp, ip); /* * acquire tlock of the leaf page containing original entry */ if (!test_cflag(COMMIT_Nolink, ip)) { tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW); xtlck = (struct xtlock *) & tlck->lock; } xad = &p->xad[index0]; xflag = xad->flag; xoff = offsetXAD(xad); xlen = lengthXAD(xad); xaddr = addressXAD(xad); /* nXAD must be completely contained within XAD */ if ((xoff > nxoff) || (nxoff + nxlen > xoff + xlen)) { XT_PUTPAGE(mp); jfs_error(ip->i_sb, "xtUpdate: nXAD in not completely contained within XAD"); return -EIO; } index = index0; newindex = index + 1; nextindex = le16_to_cpu(p->header.nextindex); #ifdef _JFS_WIP_NOCOALESCE if (xoff < nxoff) goto updateRight; /* * replace XAD with nXAD */ replace: /* (nxoff == xoff) */ if (nxlen == xlen) { /* replace XAD with nXAD:recorded */ *xad = *nxad; xad->flag = xflag & ~XAD_NOTRECORDED; goto out; } else /* (nxlen < xlen) */ goto updateLeft; #endif /* _JFS_WIP_NOCOALESCE */ /* #ifdef _JFS_WIP_COALESCE */ if (xoff < nxoff) goto coalesceRight; /* * coalesce with left XAD */ //coalesceLeft: /* (xoff == nxoff) */ /* is XAD first entry of page ? */ if (index == XTENTRYSTART) goto replace; /* is nXAD logically and physically contiguous with lXAD ? */ lxad = &p->xad[index - 1]; lxlen = lengthXAD(lxad); if (!(lxad->flag & XAD_NOTRECORDED) && (nxoff == offsetXAD(lxad) + lxlen) && (nxaddr == addressXAD(lxad) + lxlen) && (lxlen + nxlen < MAXXLEN)) { /* extend right lXAD */ index0 = index - 1; XADlength(lxad, lxlen + nxlen); /* If we just merged two extents together, need to make sure the * right extent gets logged. If the left one is marked XAD_NEW, * then we know it will be logged. Otherwise, mark as * XAD_EXTENDED */ if (!(lxad->flag & XAD_NEW)) lxad->flag |= XAD_EXTENDED; if (xlen > nxlen) { /* truncate XAD */ XADoffset(xad, xoff + nxlen); XADlength(xad, xlen - nxlen); XADaddress(xad, xaddr + nxlen); goto out; } else { /* (xlen == nxlen) */ /* remove XAD */ if (index < nextindex - 1) memmove(&p->xad[index], &p->xad[index + 1], (nextindex - index - 1) << L2XTSLOTSIZE); p->header.nextindex = cpu_to_le16(le16_to_cpu(p->header.nextindex) - 1); index = index0; newindex = index + 1; nextindex = le16_to_cpu(p->header.nextindex); xoff = nxoff = offsetXAD(lxad); xlen = nxlen = lxlen + nxlen; xaddr = nxaddr = addressXAD(lxad); goto coalesceRight; } } /* * replace XAD with nXAD */ replace: /* (nxoff == xoff) */ if (nxlen == xlen) { /* replace XAD with nXAD:recorded */ *xad = *nxad; xad->flag = xflag & ~XAD_NOTRECORDED; goto coalesceRight; } else /* (nxlen < xlen) */ goto updateLeft; /* * coalesce with right XAD */ coalesceRight: /* (xoff <= nxoff) */ /* is XAD last entry of page ? */ if (newindex == nextindex) { if (xoff == nxoff) goto out; goto updateRight; } /* is nXAD logically and physically contiguous with rXAD ? */ rxad = &p->xad[index + 1]; rxlen = lengthXAD(rxad); if (!(rxad->flag & XAD_NOTRECORDED) && (nxoff + nxlen == offsetXAD(rxad)) && (nxaddr + nxlen == addressXAD(rxad)) && (rxlen + nxlen < MAXXLEN)) { /* extend left rXAD */ XADoffset(rxad, nxoff); XADlength(rxad, rxlen + nxlen); XADaddress(rxad, nxaddr); /* If we just merged two extents together, need to make sure * the left extent gets logged. If the right one is marked * XAD_NEW, then we know it will be logged. Otherwise, mark as * XAD_EXTENDED */ if (!(rxad->flag & XAD_NEW)) rxad->flag |= XAD_EXTENDED; if (xlen > nxlen) /* truncate XAD */ XADlength(xad, xlen - nxlen); else { /* (xlen == nxlen) */ /* remove XAD */ memmove(&p->xad[index], &p->xad[index + 1], (nextindex - index - 1) << L2XTSLOTSIZE); p->header.nextindex = cpu_to_le16(le16_to_cpu(p->header.nextindex) - 1); } goto out; } else if (xoff == nxoff) goto out; if (xoff >= nxoff) { XT_PUTPAGE(mp); jfs_error(ip->i_sb, "xtUpdate: xoff >= nxoff"); return -EIO; } /* #endif _JFS_WIP_COALESCE */ /* * split XAD into (lXAD, nXAD): * * |---nXAD---> * --|----------XAD----------|-- * |-lXAD-| */ updateRight: /* (xoff < nxoff) */ /* truncate old XAD as lXAD:not_recorded */ xad = &p->xad[index]; XADlength(xad, nxoff - xoff); /* insert nXAD:recorded */ if (nextindex == le16_to_cpu(p->header.maxentry)) { /* xtSpliUp() unpins leaf pages */ split.mp = mp; split.index = newindex; split.flag = xflag & ~XAD_NOTRECORDED; split.off = nxoff; split.len = nxlen; split.addr = nxaddr; split.pxdlist = NULL; if ((rc = xtSplitUp(tid, ip, &split, &btstack))) return rc; /* get back old page */ XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) return rc; /* * if leaf root has been split, original root has been * copied to new child page, i.e., original entry now * resides on the new child page; */ if (p->header.flag & BT_INTERNAL) { ASSERT(p->header.nextindex == cpu_to_le16(XTENTRYSTART + 1)); xad = &p->xad[XTENTRYSTART]; bn = addressXAD(xad); XT_PUTPAGE(mp); /* get new child page */ XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) return rc; BT_MARK_DIRTY(mp, ip); if (!test_cflag(COMMIT_Nolink, ip)) { tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW); xtlck = (struct xtlock *) & tlck->lock; } } else { /* is nXAD on new page ? */ if (newindex > (le16_to_cpu(p->header.maxentry) >> 1)) { newindex = newindex - le16_to_cpu(p->header.nextindex) + XTENTRYSTART; newpage = 1; } } } else { /* if insert into middle, shift right remaining entries */ if (newindex < nextindex) memmove(&p->xad[newindex + 1], &p->xad[newindex], (nextindex - newindex) << L2XTSLOTSIZE); /* insert the entry */ xad = &p->xad[newindex]; *xad = *nxad; xad->flag = xflag & ~XAD_NOTRECORDED; /* advance next available entry index. */ p->header.nextindex = cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1); } /* * does nXAD force 3-way split ? * * |---nXAD--->| * --|----------XAD-------------|-- * |-lXAD-| |-rXAD -| */ if (nxoff + nxlen == xoff + xlen) goto out; /* reorient nXAD as XAD for further split XAD into (nXAD, rXAD) */ if (newpage) { /* close out old page */ if (!test_cflag(COMMIT_Nolink, ip)) { xtlck->lwm.offset = (xtlck->lwm.offset) ? min(index0, (int)xtlck->lwm.offset) : index0; xtlck->lwm.length = le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset; } bn = le64_to_cpu(p->header.next); XT_PUTPAGE(mp); /* get new right page */ XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) return rc; BT_MARK_DIRTY(mp, ip); if (!test_cflag(COMMIT_Nolink, ip)) { tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW); xtlck = (struct xtlock *) & tlck->lock; } index0 = index = newindex; } else index++; newindex = index + 1; nextindex = le16_to_cpu(p->header.nextindex); xlen = xlen - (nxoff - xoff); xoff = nxoff; xaddr = nxaddr; /* recompute split pages */ if (nextindex == le16_to_cpu(p->header.maxentry)) { XT_PUTPAGE(mp); if ((rc = xtSearch(ip, nxoff, NULL, &cmp, &btstack, XT_INSERT))) return rc; /* retrieve search result */ XT_GETSEARCH(ip, btstack.top, bn, mp, p, index0); if (cmp != 0) { XT_PUTPAGE(mp); jfs_error(ip->i_sb, "xtUpdate: xtSearch failed"); return -EIO; } if (index0 != index) { XT_PUTPAGE(mp); jfs_error(ip->i_sb, "xtUpdate: unexpected value of index"); return -EIO; } } /* * split XAD into (nXAD, rXAD) * * ---nXAD---| * --|----------XAD----------|-- * |-rXAD-| */ updateLeft: /* (nxoff == xoff) && (nxlen < xlen) */ /* update old XAD with nXAD:recorded */ xad = &p->xad[index]; *xad = *nxad; xad->flag = xflag & ~XAD_NOTRECORDED; /* insert rXAD:not_recorded */ xoff = xoff + nxlen; xlen = xlen - nxlen; xaddr = xaddr + nxlen; if (nextindex == le16_to_cpu(p->header.maxentry)) { /* printf("xtUpdate.updateLeft.split p:0x%p\n", p); */ /* xtSpliUp() unpins leaf pages */ split.mp = mp; split.index = newindex; split.flag = xflag; split.off = xoff; split.len = xlen; split.addr = xaddr; split.pxdlist = NULL; if ((rc = xtSplitUp(tid, ip, &split, &btstack))) return rc; /* get back old page */ XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) return rc; /* * if leaf root has been split, original root has been * copied to new child page, i.e., original entry now * resides on the new child page; */ if (p->header.flag & BT_INTERNAL) { ASSERT(p->header.nextindex == cpu_to_le16(XTENTRYSTART + 1)); xad = &p->xad[XTENTRYSTART]; bn = addressXAD(xad); XT_PUTPAGE(mp); /* get new child page */ XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) return rc; BT_MARK_DIRTY(mp, ip); if (!test_cflag(COMMIT_Nolink, ip)) { tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW); xtlck = (struct xtlock *) & tlck->lock; } } } else { /* if insert into middle, shift right remaining entries */ if (newindex < nextindex) memmove(&p->xad[newindex + 1], &p->xad[newindex], (nextindex - newindex) << L2XTSLOTSIZE); /* insert the entry */ xad = &p->xad[newindex]; XT_PUTENTRY(xad, xflag, xoff, xlen, xaddr); /* advance next available entry index. */ p->header.nextindex = cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1); } out: if (!test_cflag(COMMIT_Nolink, ip)) { xtlck->lwm.offset = (xtlck->lwm.offset) ? min(index0, (int)xtlck->lwm.offset) : index0; xtlck->lwm.length = le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset; } /* unpin the leaf page */ XT_PUTPAGE(mp); return rc; } /* * xtAppend() * * function: grow in append mode from contiguous region specified ; * * parameter: * tid - transaction id; * ip - file object; * xflag - extent flag: * xoff - extent offset; * maxblocks - max extent length; * xlen - extent length (in/out); * xaddrp - extent address pointer (in/out): * flag - * * return: */ int xtAppend(tid_t tid, /* transaction id */ struct inode *ip, int xflag, s64 xoff, s32 maxblocks, s32 * xlenp, /* (in/out) */ s64 * xaddrp, /* (in/out) */ int flag) { int rc = 0; struct metapage *mp; /* meta-page buffer */ xtpage_t *p; /* base B+-tree index page */ s64 bn, xaddr; int index, nextindex; struct btstack btstack; /* traverse stack */ struct xtsplit split; /* split information */ xad_t *xad; int cmp; struct tlock *tlck; struct xtlock *xtlck; int nsplit, nblocks, xlen; struct pxdlist pxdlist; pxd_t *pxd; s64 next; xaddr = *xaddrp; xlen = *xlenp; jfs_info("xtAppend: xoff:0x%lx maxblocks:%d xlen:%d xaddr:0x%lx", (ulong) xoff, maxblocks, xlen, (ulong) xaddr); /* * search for the entry location at which to insert: * * xtFastSearch() and xtSearch() both returns (leaf page * pinned, index at which to insert). * n.b. xtSearch() may return index of maxentry of * the full page. */ if ((rc = xtSearch(ip, xoff, &next, &cmp, &btstack, XT_INSERT))) return rc; /* retrieve search result */ XT_GETSEARCH(ip, btstack.top, bn, mp, p, index); if (cmp == 0) { rc = -EEXIST; goto out; } if (next) xlen = min(xlen, (int)(next - xoff)); //insert: /* * insert entry for new extent */ xflag |= XAD_NEW; /* * if the leaf page is full, split the page and * propagate up the router entry for the new page from split * * The xtSplitUp() will insert the entry and unpin the leaf page. */ nextindex = le16_to_cpu(p->header.nextindex); if (nextindex < le16_to_cpu(p->header.maxentry)) goto insertLeaf; /* * allocate new index blocks to cover index page split(s) */ nsplit = btstack.nsplit; split.pxdlist = &pxdlist; pxdlist.maxnpxd = pxdlist.npxd = 0; pxd = &pxdlist.pxd[0]; nblocks = JFS_SBI(ip->i_sb)->nbperpage; for (; nsplit > 0; nsplit--, pxd++, xaddr += nblocks, maxblocks -= nblocks) { if ((rc = dbAllocBottomUp(ip, xaddr, (s64) nblocks)) == 0) { PXDaddress(pxd, xaddr); PXDlength(pxd, nblocks); pxdlist.maxnpxd++; continue; } /* undo allocation */ goto out; } xlen = min(xlen, maxblocks); /* * allocate data extent requested */ if ((rc = dbAllocBottomUp(ip, xaddr, (s64) xlen))) goto out; split.mp = mp; split.index = index; split.flag = xflag; split.off = xoff; split.len = xlen; split.addr = xaddr; if ((rc = xtSplitUp(tid, ip, &split, &btstack))) { /* undo data extent allocation */ dbFree(ip, *xaddrp, (s64) * xlenp); return rc; } *xaddrp = xaddr; *xlenp = xlen; return 0; /* * insert the new entry into the leaf page */ insertLeaf: /* * allocate data extent requested */ if ((rc = dbAllocBottomUp(ip, xaddr, (s64) xlen))) goto out; BT_MARK_DIRTY(mp, ip); /* * acquire a transaction lock on the leaf page; * * action: xad insertion/extension; */ tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW); xtlck = (struct xtlock *) & tlck->lock; /* insert the new entry: mark the entry NEW */ xad = &p->xad[index]; XT_PUTENTRY(xad, xflag, xoff, xlen, xaddr); /* advance next available entry index */ le16_add_cpu(&p->header.nextindex, 1); xtlck->lwm.offset = (xtlck->lwm.offset) ? min(index,(int) xtlck->lwm.offset) : index; xtlck->lwm.length = le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset; *xaddrp = xaddr; *xlenp = xlen; out: /* unpin the leaf page */ XT_PUTPAGE(mp); return rc; } #ifdef _STILL_TO_PORT /* - TBD for defragmentaion/reorganization - * * xtDelete() * * function: * delete the entry with the specified key. * * N.B.: whole extent of the entry is assumed to be deleted. * * parameter: * * return: * ENOENT: if the entry is not found. * * exception: */ int xtDelete(tid_t tid, struct inode *ip, s64 xoff, s32 xlen, int flag) { int rc = 0; struct btstack btstack; int cmp; s64 bn; struct metapage *mp; xtpage_t *p; int index, nextindex; struct tlock *tlck; struct xtlock *xtlck; /* * find the matching entry; xtSearch() pins the page */ if ((rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0))) return rc; XT_GETSEARCH(ip, btstack.top, bn, mp, p, index); if (cmp) { /* unpin the leaf page */ XT_PUTPAGE(mp); return -ENOENT; } /* * delete the entry from the leaf page */ nextindex = le16_to_cpu(p->header.nextindex); le16_add_cpu(&p->header.nextindex, -1); /* * if the leaf page bocome empty, free the page */ if (p->header.nextindex == cpu_to_le16(XTENTRYSTART)) return (xtDeleteUp(tid, ip, mp, p, &btstack)); BT_MARK_DIRTY(mp, ip); /* * acquire a transaction lock on the leaf page; * * action:xad deletion; */ tlck = txLock(tid, ip, mp, tlckXTREE); xtlck = (struct xtlock *) & tlck->lock; xtlck->lwm.offset = (xtlck->lwm.offset) ? min(index, xtlck->lwm.offset) : index; /* if delete from middle, shift left/compact the remaining entries */ if (index < nextindex - 1) memmove(&p->xad[index], &p->xad[index + 1], (nextindex - index - 1) * sizeof(xad_t)); XT_PUTPAGE(mp); return 0; } /* - TBD for defragmentaion/reorganization - * * xtDeleteUp() * * function: * free empty pages as propagating deletion up the tree * * parameter: * * return: */ static int xtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp, xtpage_t * fp, struct btstack * btstack) { int rc = 0; struct metapage *mp; xtpage_t *p; int index, nextindex; s64 xaddr; int xlen; struct btframe *parent; struct tlock *tlck; struct xtlock *xtlck; /* * keep root leaf page which has become empty */ if (fp->header.flag & BT_ROOT) { /* keep the root page */ fp->header.flag &= ~BT_INTERNAL; fp->header.flag |= BT_LEAF; fp->header.nextindex = cpu_to_le16(XTENTRYSTART); /* XT_PUTPAGE(fmp); */ return 0; } /* * free non-root leaf page */ if ((rc = xtRelink(tid, ip, fp))) { XT_PUTPAGE(fmp); return rc; } xaddr = addressPXD(&fp->header.self); xlen = lengthPXD(&fp->header.self); /* free the page extent */ dbFree(ip, xaddr, (s64) xlen); /* free the buffer page */ discard_metapage(fmp); /* * propagate page deletion up the index tree * * If the delete from the parent page makes it empty, * continue all the way up the tree. * stop if the root page is reached (which is never deleted) or * if the entry deletion does not empty the page. */ while ((parent = BT_POP(btstack)) != NULL) { /* get/pin the parent page <sp> */ XT_GETPAGE(ip, parent->bn, mp, PSIZE, p, rc); if (rc) return rc; index = parent->index; /* delete the entry for the freed child page from parent. */ nextindex = le16_to_cpu(p->header.nextindex); /* * the parent has the single entry being deleted: * free the parent page which has become empty. */ if (nextindex == 1) { if (p->header.flag & BT_ROOT) { /* keep the root page */ p->header.flag &= ~BT_INTERNAL; p->header.flag |= BT_LEAF; p->header.nextindex = cpu_to_le16(XTENTRYSTART); /* XT_PUTPAGE(mp); */ break; } else { /* free the parent page */ if ((rc = xtRelink(tid, ip, p))) return rc; xaddr = addressPXD(&p->header.self); /* free the page extent */ dbFree(ip, xaddr, (s64) JFS_SBI(ip->i_sb)->nbperpage); /* unpin/free the buffer page */ discard_metapage(mp); /* propagate up */ continue; } } /* * the parent has other entries remaining: * delete the router entry from the parent page. */ else { BT_MARK_DIRTY(mp, ip); /* * acquire a transaction lock on the leaf page; * * action:xad deletion; */ tlck = txLock(tid, ip, mp, tlckXTREE); xtlck = (struct xtlock *) & tlck->lock; xtlck->lwm.offset = (xtlck->lwm.offset) ? min(index, xtlck->lwm. offset) : index; /* if delete from middle, * shift left/compact the remaining entries in the page */ if (index < nextindex - 1) memmove(&p->xad[index], &p->xad[index + 1], (nextindex - index - 1) << L2XTSLOTSIZE); le16_add_cpu(&p->header.nextindex, -1); jfs_info("xtDeleteUp(entry): 0x%lx[%d]", (ulong) parent->bn, index); } /* unpin the parent page */ XT_PUTPAGE(mp); /* exit propagation up */ break; } return 0; } /* * NAME: xtRelocate() * * FUNCTION: relocate xtpage or data extent of regular file; * This function is mainly used by defragfs utility. * * NOTE: This routine does not have the logic to handle * uncommitted allocated extent. The caller should call * txCommit() to commit all the allocation before call * this routine. */ int xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */ s64 nxaddr, /* new xaddr */ int xtype) { /* extent type: XTPAGE or DATAEXT */ int rc = 0; struct tblock *tblk; struct tlock *tlck; struct xtlock *xtlck; struct metapage *mp, *pmp, *lmp, *rmp; /* meta-page buffer */ xtpage_t *p, *pp, *rp, *lp; /* base B+-tree index page */ xad_t *xad; pxd_t *pxd; s64 xoff, xsize; int xlen; s64 oxaddr, sxaddr, dxaddr, nextbn, prevbn; cbuf_t *cp; s64 offset, nbytes, nbrd, pno; int nb, npages, nblks; s64 bn; int cmp; int index; struct pxd_lock *pxdlock; struct btstack btstack; /* traverse stack */ xtype = xtype & EXTENT_TYPE; xoff = offsetXAD(oxad); oxaddr = addressXAD(oxad); xlen = lengthXAD(oxad); /* validate extent offset */ offset = xoff << JFS_SBI(ip->i_sb)->l2bsize; if (offset >= ip->i_size) return -ESTALE; /* stale extent */ jfs_info("xtRelocate: xtype:%d xoff:0x%lx xlen:0x%x xaddr:0x%lx:0x%lx", xtype, (ulong) xoff, xlen, (ulong) oxaddr, (ulong) nxaddr); /* * 1. get and validate the parent xtpage/xad entry * covering the source extent to be relocated; */ if (xtype == DATAEXT) { /* search in leaf entry */ rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0); if (rc) return rc; /* retrieve search result */ XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index); if (cmp) { XT_PUTPAGE(pmp); return -ESTALE; } /* validate for exact match with a single entry */ xad = &pp->xad[index]; if (addressXAD(xad) != oxaddr || lengthXAD(xad) != xlen) { XT_PUTPAGE(pmp); return -ESTALE; } } else { /* (xtype == XTPAGE) */ /* search in internal entry */ rc = xtSearchNode(ip, oxad, &cmp, &btstack, 0); if (rc) return rc; /* retrieve search result */ XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index); if (cmp) { XT_PUTPAGE(pmp); return -ESTALE; } /* xtSearchNode() validated for exact match with a single entry */ xad = &pp->xad[index]; } jfs_info("xtRelocate: parent xad entry validated."); /* * 2. relocate the extent */ if (xtype == DATAEXT) { /* if the extent is allocated-but-not-recorded * there is no real data to be moved in this extent, */ if (xad->flag & XAD_NOTRECORDED) goto out; else /* release xtpage for cmRead()/xtLookup() */ XT_PUTPAGE(pmp); /* * cmRelocate() * * copy target data pages to be relocated; * * data extent must start at page boundary and * multiple of page size (except the last data extent); * read in each page of the source data extent into cbuf, * update the cbuf extent descriptor of the page to be * homeward bound to new dst data extent * copy the data from the old extent to new extent. * copy is essential for compressed files to avoid problems * that can arise if there was a change in compression * algorithms. * it is a good strategy because it may disrupt cache * policy to keep the pages in memory afterwards. */ offset = xoff << JFS_SBI(ip->i_sb)->l2bsize; assert((offset & CM_OFFSET) == 0); nbytes = xlen << JFS_SBI(ip->i_sb)->l2bsize; pno = offset >> CM_L2BSIZE; npages = (nbytes + (CM_BSIZE - 1)) >> CM_L2BSIZE; /* npages = ((offset + nbytes - 1) >> CM_L2BSIZE) - (offset >> CM_L2BSIZE) + 1; */ sxaddr = oxaddr; dxaddr = nxaddr; /* process the request one cache buffer at a time */ for (nbrd = 0; nbrd < nbytes; nbrd += nb, offset += nb, pno++, npages--) { /* compute page size */ nb = min(nbytes - nbrd, CM_BSIZE); /* get the cache buffer of the page */ if (rc = cmRead(ip, offset, npages, &cp)) break; assert(addressPXD(&cp->cm_pxd) == sxaddr); assert(!cp->cm_modified); /* bind buffer with the new extent address */ nblks = nb >> JFS_IP(ip->i_sb)->l2bsize; cmSetXD(ip, cp, pno, dxaddr, nblks); /* release the cbuf, mark it as modified */ cmPut(cp, true); dxaddr += nblks; sxaddr += nblks; } /* get back parent page */ if ((rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0))) return rc; XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index); jfs_info("xtRelocate: target data extent relocated."); } else { /* (xtype == XTPAGE) */ /* * read in the target xtpage from the source extent; */ XT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc); if (rc) { XT_PUTPAGE(pmp); return rc; } /* * read in sibling pages if any to update sibling pointers; */ rmp = NULL; if (p->header.next) { nextbn = le64_to_cpu(p->header.next); XT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc); if (rc) { XT_PUTPAGE(pmp); XT_PUTPAGE(mp); return (rc); } } lmp = NULL; if (p->header.prev) { prevbn = le64_to_cpu(p->header.prev); XT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc); if (rc) { XT_PUTPAGE(pmp); XT_PUTPAGE(mp); if (rmp) XT_PUTPAGE(rmp); return (rc); } } /* at this point, all xtpages to be updated are in memory */ /* * update sibling pointers of sibling xtpages if any; */ if (lmp) { BT_MARK_DIRTY(lmp, ip); tlck = txLock(tid, ip, lmp, tlckXTREE | tlckRELINK); lp->header.next = cpu_to_le64(nxaddr); XT_PUTPAGE(lmp); } if (rmp) { BT_MARK_DIRTY(rmp, ip); tlck = txLock(tid, ip, rmp, tlckXTREE | tlckRELINK); rp->header.prev = cpu_to_le64(nxaddr); XT_PUTPAGE(rmp); } /* * update the target xtpage to be relocated * * update the self address of the target page * and write to destination extent; * redo image covers the whole xtpage since it is new page * to the destination extent; * update of bmap for the free of source extent * of the target xtpage itself: * update of bmap for the allocation of destination extent * of the target xtpage itself: * update of bmap for the extents covered by xad entries in * the target xtpage is not necessary since they are not * updated; * if not committed before this relocation, * target page may contain XAD_NEW entries which must * be scanned for bmap update (logredo() always * scan xtpage REDOPAGE image for bmap update); * if committed before this relocation (tlckRELOCATE), * scan may be skipped by commit() and logredo(); */ BT_MARK_DIRTY(mp, ip); /* tlckNEW init xtlck->lwm.offset = XTENTRYSTART; */ tlck = txLock(tid, ip, mp, tlckXTREE | tlckNEW); xtlck = (struct xtlock *) & tlck->lock; /* update the self address in the xtpage header */ pxd = &p->header.self; PXDaddress(pxd, nxaddr); /* linelock for the after image of the whole page */ xtlck->lwm.length = le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset; /* update the buffer extent descriptor of target xtpage */ xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize; bmSetXD(mp, nxaddr, xsize); /* unpin the target page to new homeward bound */ XT_PUTPAGE(mp); jfs_info("xtRelocate: target xtpage relocated."); } /* * 3. acquire maplock for the source extent to be freed; * * acquire a maplock saving the src relocated extent address; * to free of the extent at commit time; */ out: /* if DATAEXT relocation, write a LOG_UPDATEMAP record for * free PXD of the source data extent (logredo() will update * bmap for free of source data extent), and update bmap for * free of the source data extent; */ if (xtype == DATAEXT) tlck = txMaplock(tid, ip, tlckMAP); /* if XTPAGE relocation, write a LOG_NOREDOPAGE record * for the source xtpage (logredo() will init NoRedoPage * filter and will also update bmap for free of the source * xtpage), and update bmap for free of the source xtpage; * N.B. We use tlckMAP instead of tlkcXTREE because there * is no buffer associated with this lock since the buffer * has been redirected to the target location. */ else /* (xtype == XTPAGE) */ tlck = txMaplock(tid, ip, tlckMAP | tlckRELOCATE); pxdlock = (struct pxd_lock *) & tlck->lock; pxdlock->flag = mlckFREEPXD; PXDaddress(&pxdlock->pxd, oxaddr); PXDlength(&pxdlock->pxd, xlen); pxdlock->index = 1; /* * 4. update the parent xad entry for relocation; * * acquire tlck for the parent entry with XAD_NEW as entry * update which will write LOG_REDOPAGE and update bmap for * allocation of XAD_NEW destination extent; */ jfs_info("xtRelocate: update parent xad entry."); BT_MARK_DIRTY(pmp, ip); tlck = txLock(tid, ip, pmp, tlckXTREE | tlckGROW); xtlck = (struct xtlock *) & tlck->lock; /* update the XAD with the new destination extent; */ xad = &pp->xad[index]; xad->flag |= XAD_NEW; XADaddress(xad, nxaddr); xtlck->lwm.offset = min(index, xtlck->lwm.offset); xtlck->lwm.length = le16_to_cpu(pp->header.nextindex) - xtlck->lwm.offset; /* unpin the parent xtpage */ XT_PUTPAGE(pmp); return rc; } /* * xtSearchNode() * * function: search for the internal xad entry covering specified extent. * This function is mainly used by defragfs utility. * * parameters: * ip - file object; * xad - extent to find; * cmpp - comparison result: * btstack - traverse stack; * flag - search process flag; * * returns: * btstack contains (bn, index) of search path traversed to the entry. * *cmpp is set to result of comparison with the entry returned. * the page containing the entry is pinned at exit. */ static int xtSearchNode(struct inode *ip, xad_t * xad, /* required XAD entry */ int *cmpp, struct btstack * btstack, int flag) { int rc = 0; s64 xoff, xaddr; int xlen; int cmp = 1; /* init for empty page */ s64 bn; /* block number */ struct metapage *mp; /* meta-page buffer */ xtpage_t *p; /* page */ int base, index, lim; struct btframe *btsp; s64 t64; BT_CLR(btstack); xoff = offsetXAD(xad); xlen = lengthXAD(xad); xaddr = addressXAD(xad); /* * search down tree from root: * * between two consecutive entries of <Ki, Pi> and <Kj, Pj> of * internal page, child page Pi contains entry with k, Ki <= K < Kj. * * if entry with search key K is not found * internal page search find the entry with largest key Ki * less than K which point to the child page to search; * leaf page search find the entry with smallest key Kj * greater than K so that the returned index is the position of * the entry to be shifted right for insertion of new entry. * for empty tree, search key is greater than any key of the tree. * * by convention, root bn = 0. */ for (bn = 0;;) { /* get/pin the page to search */ XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) return rc; if (p->header.flag & BT_LEAF) { XT_PUTPAGE(mp); return -ESTALE; } lim = le16_to_cpu(p->header.nextindex) - XTENTRYSTART; /* * binary search with search key K on the current page */ for (base = XTENTRYSTART; lim; lim >>= 1) { index = base + (lim >> 1); XT_CMP(cmp, xoff, &p->xad[index], t64); if (cmp == 0) { /* * search hit * * verify for exact match; */ if (xaddr == addressXAD(&p->xad[index]) && xoff == offsetXAD(&p->xad[index])) { *cmpp = cmp; /* save search result */ btsp = btstack->top; btsp->bn = bn; btsp->index = index; btsp->mp = mp; return 0; } /* descend/search its child page */ goto next; } if (cmp > 0) { base = index + 1; --lim; } } /* * search miss - non-leaf page: * * base is the smallest index with key (Kj) greater than * search key (K) and may be zero or maxentry index. * if base is non-zero, decrement base by one to get the parent * entry of the child page to search. */ index = base ? base - 1 : base; /* * go down to child page */ next: /* get the child page block number */ bn = addressXAD(&p->xad[index]); /* unpin the parent page */ XT_PUTPAGE(mp); } } /* * xtRelink() * * function: * link around a freed page. * * Parameter: * int tid, * struct inode *ip, * xtpage_t *p) * * returns: */ static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * p) { int rc = 0; struct metapage *mp; s64 nextbn, prevbn; struct tlock *tlck; nextbn = le64_to_cpu(p->header.next); prevbn = le64_to_cpu(p->header.prev); /* update prev pointer of the next page */ if (nextbn != 0) { XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc); if (rc) return rc; /* * acquire a transaction lock on the page; * * action: update prev pointer; */ BT_MARK_DIRTY(mp, ip); tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK); /* the page may already have been tlock'd */ p->header.prev = cpu_to_le64(prevbn); XT_PUTPAGE(mp); } /* update next pointer of the previous page */ if (prevbn != 0) { XT_GETPAGE(ip, prevbn, mp, PSIZE, p, rc); if (rc) return rc; /* * acquire a transaction lock on the page; * * action: update next pointer; */ BT_MARK_DIRTY(mp, ip); tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK); /* the page may already have been tlock'd */ p->header.next = le64_to_cpu(nextbn); XT_PUTPAGE(mp); } return 0; } #endif /* _STILL_TO_PORT */ /* * xtInitRoot() * * initialize file root (inline in inode) */ void xtInitRoot(tid_t tid, struct inode *ip) { xtpage_t *p; /* * acquire a transaction lock on the root * * action: */ txLock(tid, ip, (struct metapage *) &JFS_IP(ip)->bxflag, tlckXTREE | tlckNEW); p = &JFS_IP(ip)->i_xtroot; p->header.flag = DXD_INDEX | BT_ROOT | BT_LEAF; p->header.nextindex = cpu_to_le16(XTENTRYSTART); if (S_ISDIR(ip->i_mode)) p->header.maxentry = cpu_to_le16(XTROOTINITSLOT_DIR); else { p->header.maxentry = cpu_to_le16(XTROOTINITSLOT); ip->i_size = 0; } return; } /* * We can run into a deadlock truncating a file with a large number of * xtree pages (large fragmented file). A robust fix would entail a * reservation system where we would reserve a number of metadata pages * and tlocks which we would be guaranteed without a deadlock. Without * this, a partial fix is to limit number of metadata pages we will lock * in a single transaction. Currently we will truncate the file so that * no more than 50 leaf pages will be locked. The caller of xtTruncate * will be responsible for ensuring that the current transaction gets * committed, and that subsequent transactions are created to truncate * the file further if needed. */ #define MAX_TRUNCATE_LEAVES 50 /* * xtTruncate() * * function: * traverse for truncation logging backward bottom up; * terminate at the last extent entry at the current subtree * root page covering new down size. * truncation may occur within the last extent entry. * * parameter: * int tid, * struct inode *ip, * s64 newsize, * int type) {PWMAP, PMAP, WMAP; DELETE, TRUNCATE} * * return: * * note: * PWMAP: * 1. truncate (non-COMMIT_NOLINK file) * by jfs_truncate() or jfs_open(O_TRUNC): * xtree is updated; * 2. truncate index table of directory when last entry removed * map update via tlock at commit time; * PMAP: * Call xtTruncate_pmap instead * WMAP: * 1. remove (free zero link count) on last reference release * (pmap has been freed at commit zero link count); * 2. truncate (COMMIT_NOLINK file, i.e., tmp file): * xtree is updated; * map update directly at truncation time; * * if (DELETE) * no LOG_NOREDOPAGE is required (NOREDOFILE is sufficient); * else if (TRUNCATE) * must write LOG_NOREDOPAGE for deleted index page; * * pages may already have been tlocked by anonymous transactions * during file growth (i.e., write) before truncation; * * except last truncated entry, deleted entries remains as is * in the page (nextindex is updated) for other use * (e.g., log/update allocation map): this avoid copying the page * info but delay free of pages; * */ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag) { int rc = 0; s64 teof; struct metapage *mp; xtpage_t *p; s64 bn; int index, nextindex; xad_t *xad; s64 xoff, xaddr; int xlen, len, freexlen; struct btstack btstack; struct btframe *parent; struct tblock *tblk = NULL; struct tlock *tlck = NULL; struct xtlock *xtlck = NULL; struct xdlistlock xadlock; /* maplock for COMMIT_WMAP */ struct pxd_lock *pxdlock; /* maplock for COMMIT_WMAP */ s64 nfreed; int freed, log; int locked_leaves = 0; /* save object truncation type */ if (tid) { tblk = tid_to_tblock(tid); tblk->xflag |= flag; } nfreed = 0; flag &= COMMIT_MAP; assert(flag != COMMIT_PMAP); if (flag == COMMIT_PWMAP) log = 1; else { log = 0; xadlock.flag = mlckFREEXADLIST; xadlock.index = 1; } /* * if the newsize is not an integral number of pages, * the file between newsize and next page boundary will * be cleared. * if truncating into a file hole, it will cause * a full block to be allocated for the logical block. */ /* * release page blocks of truncated region <teof, eof> * * free the data blocks from the leaf index blocks. * delete the parent index entries corresponding to * the freed child data/index blocks. * free the index blocks themselves which aren't needed * in new sized file. * * index blocks are updated only if the blocks are to be * retained in the new sized file. * if type is PMAP, the data and index pages are NOT * freed, and the data and index blocks are NOT freed * from working map. * (this will allow continued access of data/index of * temporary file (zerolink count file truncated to zero-length)). */ teof = (newsize + (JFS_SBI(ip->i_sb)->bsize - 1)) >> JFS_SBI(ip->i_sb)->l2bsize; /* clear stack */ BT_CLR(&btstack); /* * start with root * * root resides in the inode */ bn = 0; /* * first access of each page: */ getPage: XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) return rc; /* process entries backward from last index */ index = le16_to_cpu(p->header.nextindex) - 1; /* Since this is the rightmost page at this level, and we may have * already freed a page that was formerly to the right, let's make * sure that the next pointer is zero. */ if (p->header.next) { if (log) /* * Make sure this change to the header is logged. * If we really truncate this leaf, the flag * will be changed to tlckTRUNCATE */ tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW); BT_MARK_DIRTY(mp, ip); p->header.next = 0; } if (p->header.flag & BT_INTERNAL) goto getChild; /* * leaf page */ freed = 0; /* does region covered by leaf page precede Teof ? */ xad = &p->xad[index]; xoff = offsetXAD(xad); xlen = lengthXAD(xad); if (teof >= xoff + xlen) { XT_PUTPAGE(mp); goto getParent; } /* (re)acquire tlock of the leaf page */ if (log) { if (++locked_leaves > MAX_TRUNCATE_LEAVES) { /* * We need to limit the size of the transaction * to avoid exhausting pagecache & tlocks */ XT_PUTPAGE(mp); newsize = (xoff + xlen) << JFS_SBI(ip->i_sb)->l2bsize; goto getParent; } tlck = txLock(tid, ip, mp, tlckXTREE); tlck->type = tlckXTREE | tlckTRUNCATE; xtlck = (struct xtlock *) & tlck->lock; xtlck->hwm.offset = le16_to_cpu(p->header.nextindex) - 1; } BT_MARK_DIRTY(mp, ip); /* * scan backward leaf page entries */ for (; index >= XTENTRYSTART; index--) { xad = &p->xad[index]; xoff = offsetXAD(xad); xlen = lengthXAD(xad); xaddr = addressXAD(xad); /* * The "data" for a directory is indexed by the block * device's address space. This metadata must be invalidated * here */ if (S_ISDIR(ip->i_mode) && (teof == 0)) invalidate_xad_metapages(ip, *xad); /* * entry beyond eof: continue scan of current page * xad * ---|---=======-------> * eof */ if (teof < xoff) { nfreed += xlen; continue; } /* * (xoff <= teof): last entry to be deleted from page; * If other entries remain in page: keep and update the page. */ /* * eof == entry_start: delete the entry * xad * -------|=======-------> * eof * */ if (teof == xoff) { nfreed += xlen; if (index == XTENTRYSTART) break; nextindex = index; } /* * eof within the entry: truncate the entry. * xad * -------===|===-------> * eof */ else if (teof < xoff + xlen) { /* update truncated entry */ len = teof - xoff; freexlen = xlen - len; XADlength(xad, len); /* save pxd of truncated extent in tlck */ xaddr += len; if (log) { /* COMMIT_PWMAP */ xtlck->lwm.offset = (xtlck->lwm.offset) ? min(index, (int)xtlck->lwm.offset) : index; xtlck->lwm.length = index + 1 - xtlck->lwm.offset; xtlck->twm.offset = index; pxdlock = (struct pxd_lock *) & xtlck->pxdlock; pxdlock->flag = mlckFREEPXD; PXDaddress(&pxdlock->pxd, xaddr); PXDlength(&pxdlock->pxd, freexlen); } /* free truncated extent */ else { /* COMMIT_WMAP */ pxdlock = (struct pxd_lock *) & xadlock; pxdlock->flag = mlckFREEPXD; PXDaddress(&pxdlock->pxd, xaddr); PXDlength(&pxdlock->pxd, freexlen); txFreeMap(ip, pxdlock, NULL, COMMIT_WMAP); /* reset map lock */ xadlock.flag = mlckFREEXADLIST; } /* current entry is new last entry; */ nextindex = index + 1; nfreed += freexlen; } /* * eof beyond the entry: * xad * -------=======---|---> * eof */ else { /* (xoff + xlen < teof) */ nextindex = index + 1; } if (nextindex < le16_to_cpu(p->header.nextindex)) { if (!log) { /* COMMIT_WAMP */ xadlock.xdlist = &p->xad[nextindex]; xadlock.count = le16_to_cpu(p->header.nextindex) - nextindex; txFreeMap(ip, (struct maplock *) & xadlock, NULL, COMMIT_WMAP); } p->header.nextindex = cpu_to_le16(nextindex); } XT_PUTPAGE(mp); /* assert(freed == 0); */ goto getParent; } /* end scan of leaf page entries */ freed = 1; /* * leaf page become empty: free the page if type != PMAP */ if (log) { /* COMMIT_PWMAP */ /* txCommit() with tlckFREE: * free data extents covered by leaf [XTENTRYSTART:hwm); * invalidate leaf if COMMIT_PWMAP; * if (TRUNCATE), will write LOG_NOREDOPAGE; */ tlck->type = tlckXTREE | tlckFREE; } else { /* COMMIT_WAMP */ /* free data extents covered by leaf */ xadlock.xdlist = &p->xad[XTENTRYSTART]; xadlock.count = le16_to_cpu(p->header.nextindex) - XTENTRYSTART; txFreeMap(ip, (struct maplock *) & xadlock, NULL, COMMIT_WMAP); } if (p->header.flag & BT_ROOT) { p->header.flag &= ~BT_INTERNAL; p->header.flag |= BT_LEAF; p->header.nextindex = cpu_to_le16(XTENTRYSTART); XT_PUTPAGE(mp); /* debug */ goto out; } else { if (log) { /* COMMIT_PWMAP */ /* page will be invalidated at tx completion */ XT_PUTPAGE(mp); } else { /* COMMIT_WMAP */ if (mp->lid) lid_to_tlock(mp->lid)->flag |= tlckFREELOCK; /* invalidate empty leaf page */ discard_metapage(mp); } } /* * the leaf page become empty: delete the parent entry * for the leaf page if the parent page is to be kept * in the new sized file. */ /* * go back up to the parent page */ getParent: /* pop/restore parent entry for the current child page */ if ((parent = BT_POP(&btstack)) == NULL) /* current page must have been root */ goto out; /* get back the parent page */ bn = parent->bn; XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) return rc; index = parent->index; /* * child page was not empty: */ if (freed == 0) { /* has any entry deleted from parent ? */ if (index < le16_to_cpu(p->header.nextindex) - 1) { /* (re)acquire tlock on the parent page */ if (log) { /* COMMIT_PWMAP */ /* txCommit() with tlckTRUNCATE: * free child extents covered by parent [); */ tlck = txLock(tid, ip, mp, tlckXTREE); xtlck = (struct xtlock *) & tlck->lock; if (!(tlck->type & tlckTRUNCATE)) { xtlck->hwm.offset = le16_to_cpu(p->header. nextindex) - 1; tlck->type = tlckXTREE | tlckTRUNCATE; } } else { /* COMMIT_WMAP */ /* free child extents covered by parent */ xadlock.xdlist = &p->xad[index + 1]; xadlock.count = le16_to_cpu(p->header.nextindex) - index - 1; txFreeMap(ip, (struct maplock *) & xadlock, NULL, COMMIT_WMAP); } BT_MARK_DIRTY(mp, ip); p->header.nextindex = cpu_to_le16(index + 1); } XT_PUTPAGE(mp); goto getParent; } /* * child page was empty: */ nfreed += lengthXAD(&p->xad[index]); /* * During working map update, child page's tlock must be handled * before parent's. This is because the parent's tlock will cause * the child's disk space to be marked available in the wmap, so * it's important that the child page be released by that time. * * ToDo: tlocks should be on doubly-linked list, so we can * quickly remove it and add it to the end. */ /* * Move parent page's tlock to the end of the tid's tlock list */ if (log && mp->lid && (tblk->last != mp->lid) && lid_to_tlock(mp->lid)->tid) { lid_t lid = mp->lid; struct tlock *prev; tlck = lid_to_tlock(lid); if (tblk->next == lid) tblk->next = tlck->next; else { for (prev = lid_to_tlock(tblk->next); prev->next != lid; prev = lid_to_tlock(prev->next)) { assert(prev->next); } prev->next = tlck->next; } lid_to_tlock(tblk->last)->next = lid; tlck->next = 0; tblk->last = lid; } /* * parent page become empty: free the page */ if (index == XTENTRYSTART) { if (log) { /* COMMIT_PWMAP */ /* txCommit() with tlckFREE: * free child extents covered by parent; * invalidate parent if COMMIT_PWMAP; */ tlck = txLock(tid, ip, mp, tlckXTREE); xtlck = (struct xtlock *) & tlck->lock; xtlck->hwm.offset = le16_to_cpu(p->header.nextindex) - 1; tlck->type = tlckXTREE | tlckFREE; } else { /* COMMIT_WMAP */ /* free child extents covered by parent */ xadlock.xdlist = &p->xad[XTENTRYSTART]; xadlock.count = le16_to_cpu(p->header.nextindex) - XTENTRYSTART; txFreeMap(ip, (struct maplock *) & xadlock, NULL, COMMIT_WMAP); } BT_MARK_DIRTY(mp, ip); if (p->header.flag & BT_ROOT) { p->header.flag &= ~BT_INTERNAL; p->header.flag |= BT_LEAF; p->header.nextindex = cpu_to_le16(XTENTRYSTART); if (le16_to_cpu(p->header.maxentry) == XTROOTMAXSLOT) { /* * Shrink root down to allow inline * EA (otherwise fsck complains) */ p->header.maxentry = cpu_to_le16(XTROOTINITSLOT); JFS_IP(ip)->mode2 |= INLINEEA; } XT_PUTPAGE(mp); /* debug */ goto out; } else { if (log) { /* COMMIT_PWMAP */ /* page will be invalidated at tx completion */ XT_PUTPAGE(mp); } else { /* COMMIT_WMAP */ if (mp->lid) lid_to_tlock(mp->lid)->flag |= tlckFREELOCK; /* invalidate parent page */ discard_metapage(mp); } /* parent has become empty and freed: * go back up to its parent page */ /* freed = 1; */ goto getParent; } } /* * parent page still has entries for front region; */ else { /* try truncate region covered by preceding entry * (process backward) */ index--; /* go back down to the child page corresponding * to the entry */ goto getChild; } /* * internal page: go down to child page of current entry */ getChild: /* save current parent entry for the child page */ if (BT_STACK_FULL(&btstack)) { jfs_error(ip->i_sb, "stack overrun in xtTruncate!"); XT_PUTPAGE(mp); return -EIO; } BT_PUSH(&btstack, bn, index); /* get child page */ xad = &p->xad[index]; bn = addressXAD(xad); /* * first access of each internal entry: */ /* release parent page */ XT_PUTPAGE(mp); /* process the child page */ goto getPage; out: /* * update file resource stat */ /* set size */ if (S_ISDIR(ip->i_mode) && !newsize) ip->i_size = 1; /* fsck hates zero-length directories */ else ip->i_size = newsize; /* update quota allocation to reflect freed blocks */ dquot_free_block(ip, nfreed); /* * free tlock of invalidated pages */ if (flag == COMMIT_WMAP) txFreelock(ip); return newsize; } /* * xtTruncate_pmap() * * function: * Perform truncate to zero length for deleted file, leaving the * the xtree and working map untouched. This allows the file to * be accessed via open file handles, while the delete of the file * is committed to disk. * * parameter: * tid_t tid, * struct inode *ip, * s64 committed_size) * * return: new committed size * * note: * * To avoid deadlock by holding too many transaction locks, the * truncation may be broken up into multiple transactions. * The committed_size keeps track of part of the file has been * freed from the pmaps. */ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size) { s64 bn; struct btstack btstack; int cmp; int index; int locked_leaves = 0; struct metapage *mp; xtpage_t *p; struct btframe *parent; int rc; struct tblock *tblk; struct tlock *tlck = NULL; xad_t *xad; int xlen; s64 xoff; struct xtlock *xtlck = NULL; /* save object truncation type */ tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_PMAP; /* clear stack */ BT_CLR(&btstack); if (committed_size) { xoff = (committed_size >> JFS_SBI(ip->i_sb)->l2bsize) - 1; rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0); if (rc) return rc; XT_GETSEARCH(ip, btstack.top, bn, mp, p, index); if (cmp != 0) { XT_PUTPAGE(mp); jfs_error(ip->i_sb, "xtTruncate_pmap: did not find extent"); return -EIO; } } else { /* * start with root * * root resides in the inode */ bn = 0; /* * first access of each page: */ getPage: XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) return rc; /* process entries backward from last index */ index = le16_to_cpu(p->header.nextindex) - 1; if (p->header.flag & BT_INTERNAL) goto getChild; } /* * leaf page */ if (++locked_leaves > MAX_TRUNCATE_LEAVES) { /* * We need to limit the size of the transaction * to avoid exhausting pagecache & tlocks */ xad = &p->xad[index]; xoff = offsetXAD(xad); xlen = lengthXAD(xad); XT_PUTPAGE(mp); return (xoff + xlen) << JFS_SBI(ip->i_sb)->l2bsize; } tlck = txLock(tid, ip, mp, tlckXTREE); tlck->type = tlckXTREE | tlckFREE; xtlck = (struct xtlock *) & tlck->lock; xtlck->hwm.offset = index; XT_PUTPAGE(mp); /* * go back up to the parent page */ getParent: /* pop/restore parent entry for the current child page */ if ((parent = BT_POP(&btstack)) == NULL) /* current page must have been root */ goto out; /* get back the parent page */ bn = parent->bn; XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); if (rc) return rc; index = parent->index; /* * parent page become empty: free the page */ if (index == XTENTRYSTART) { /* txCommit() with tlckFREE: * free child extents covered by parent; * invalidate parent if COMMIT_PWMAP; */ tlck = txLock(tid, ip, mp, tlckXTREE); xtlck = (struct xtlock *) & tlck->lock; xtlck->hwm.offset = le16_to_cpu(p->header.nextindex) - 1; tlck->type = tlckXTREE | tlckFREE; XT_PUTPAGE(mp); if (p->header.flag & BT_ROOT) { goto out; } else { goto getParent; } } /* * parent page still has entries for front region; */ else index--; /* * internal page: go down to child page of current entry */ getChild: /* save current parent entry for the child page */ if (BT_STACK_FULL(&btstack)) { jfs_error(ip->i_sb, "stack overrun in xtTruncate_pmap!"); XT_PUTPAGE(mp); return -EIO; } BT_PUSH(&btstack, bn, index); /* get child page */ xad = &p->xad[index]; bn = addressXAD(xad); /* * first access of each internal entry: */ /* release parent page */ XT_PUTPAGE(mp); /* process the child page */ goto getPage; out: return 0; } #ifdef CONFIG_JFS_STATISTICS static int jfs_xtstat_proc_show(struct seq_file *m, void *v) { seq_printf(m, "JFS Xtree statistics\n" "====================\n" "searches = %d\n" "fast searches = %d\n" "splits = %d\n", xtStat.search, xtStat.fastSearch, xtStat.split); return 0; } static int jfs_xtstat_proc_open(struct inode *inode, struct file *file) { return single_open(file, jfs_xtstat_proc_show, NULL); } const struct file_operations jfs_xtstat_proc_fops = { .owner = THIS_MODULE, .open = jfs_xtstat_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif
ElectryDev/android_kernel_kingdom_row
fs/jfs/jfs_xtree.c
C
gpl-2.0
94,348
/* -*- Mode: C; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Suite 500, Boston, MA 02110-1335, USA. */ #include "nemo-separator-action.h" G_DEFINE_TYPE (NemoSeparatorAction, nemo_separator_action, GTK_TYPE_ACTION); static void nemo_separator_action_init (NemoSeparatorAction *action); static void nemo_separator_action_class_init (NemoSeparatorActionClass *klass); static GtkWidget *create_menu_item (GtkAction *action); static GtkWidget *create_tool_item (GtkAction *action); static gpointer parent_class; static void nemo_separator_action_init (NemoSeparatorAction *action) { } static void nemo_separator_action_class_init (NemoSeparatorActionClass *klass) { GtkActionClass *action_class = GTK_ACTION_CLASS (klass); parent_class = g_type_class_peek_parent (klass); action_class->create_menu_item = create_menu_item; action_class->create_tool_item = create_tool_item; action_class->menu_item_type = GTK_TYPE_SEPARATOR_MENU_ITEM; action_class->toolbar_item_type = GTK_TYPE_SEPARATOR_TOOL_ITEM; } GtkAction * nemo_separator_action_new (const gchar *name) { return g_object_new (NEMO_TYPE_SEPARATOR_ACTION, "name", name, NULL); } static GtkWidget * create_menu_item (GtkAction *action) { GType menu_item_type; GtkWidget *ret; menu_item_type = GTK_ACTION_GET_CLASS (action)->menu_item_type; ret = g_object_new (menu_item_type, NULL); gtk_activatable_set_use_action_appearance (GTK_ACTIVATABLE (ret), FALSE); return ret; } static GtkWidget * create_tool_item (GtkAction *action) { g_warning ("NemoSeparatorAction: Toolbar items unsupported at this time."); return NULL; }
Kulmerov/nemo
libnemo-private/nemo-separator-action.c
C
gpl-2.0
2,449
<?php /** * File containing a test class * * @copyright Copyright (C) eZ Systems AS. All rights reserved. * @license For full copyright and license information view LICENSE file distributed with this source code. * @version 2014.11.1 */ namespace eZ\Publish\Core\REST\Server\Tests\Output\ValueObjectVisitor; use eZ\Publish\Core\REST\Common\Tests\Output\ValueObjectVisitorBaseTest; use eZ\Publish\Core\REST\Server\Output\ValueObjectVisitor; use eZ\Publish\Core\Repository\Values\User; use eZ\Publish\Core\REST\Common; class RoleTest extends ValueObjectVisitorBaseTest { /** * Test the Role visitor * * @return string */ public function testVisit() { $visitor = $this->getVisitor(); $generator = $this->getGenerator(); $generator->startDocument( null ); $role = new User\Role( array( 'id' => 42, 'identifier' => 'some-role', /* @todo uncomment when support for multilingual names and descriptions is added 'mainLanguageCode' => 'eng-GB', 'names' => array( 'eng-GB' => 'Role name EN', 'eng-US' => 'Role name EN US', ), 'descriptions' => array( 'eng-GB' => 'Role description EN', 'eng-US' => 'Role description EN US', ) */ ) ); $this->addRouteExpectation( 'ezpublish_rest_loadRole', array( 'roleId' => $role->id ), "/user/roles/{$role->id}" ); $this->addRouteExpectation( 'ezpublish_rest_loadPolicies', array( 'roleId' => $role->id ), "/user/roles/{$role->id}/policies" ); $visitor->visit( $this->getVisitorMock(), $generator, $role ); $result = $generator->endDocument( null ); $this->assertNotNull( $result ); return $result; } /** * Test if result contains Role element * * @param string $result * * @depends testVisit */ public function testResultContainsRoleElement( $result ) { $this->assertTag( array( 'tag' => 'Role', 'children' => array( 'count' => 2 ) ), $result, 'Invalid <Role> element.', false ); } /** * Test if result contains Role element attributes * * @param string $result * * @depends testVisit */ public function testResultContainsRoleAttributes( $result ) { $this->assertTag( array( 'tag' => 'Role', 'attributes' => array( 'media-type' => 'application/vnd.ez.api.Role+xml', 'href' => '/user/roles/42', ) ), $result, 'Invalid <Role> attributes.', false ); } /** * Test if result contains identifier value element * * @param string $result * * @depends testVisit */ public function testResultContainsIdentifierValueElement( $result ) { $this->assertTag( array( 'tag' => 'identifier', 'content' => 'some-role' ), $result, 'Invalid or non-existing <Role> identifier value element.', false ); } /** * Test if result contains mainLanguageCode value element * * @param string $result * * @depends testVisit */ public function testResultContainsMainLanguageCodeValueElement( $result ) { $this->markTestSkipped( '@todo uncomment when support for multilingual names and descriptions is added' ); $this->assertTag( array( 'tag' => 'mainLanguageCode', 'content' => 'eng-GB' ), $result, 'Invalid or non-existing <Role> mainLanguageCode value element.', false ); } /** * Test if result contains names element * * @param string $result * * @depends testVisit */ public function testResultContainsNamesElement( $result ) { $this->markTestSkipped( '@todo uncomment when support for multilingual names and descriptions is added' ); $this->assertTag( array( 'tag' => 'names', 'children' => array( 'count' => 2 ) ), $result, 'Invalid <names> element.', false ); } /** * Test if result contains descriptions element * * @param string $result * * @depends testVisit */ public function testResultContainsDescriptionsElement( $result ) { $this->markTestSkipped( '@todo uncomment when support for multilingual names and descriptions is added' ); $this->assertTag( array( 'tag' => 'descriptions', 'children' => array( 'count' => 2 ) ), $result, 'Invalid <descriptions> element.', false ); } /** * Test if result contains Policies element * * @param string $result * * @depends testVisit */ public function testResultContainsPoliciesElement( $result ) { $this->assertTag( array( 'tag' => 'Policies' ), $result, 'Invalid <Policies> element.', false ); } /** * Test if result contains Policies element attributes * * @param string $result * * @depends testVisit */ public function testResultContainsPoliciesAttributes( $result ) { $this->assertTag( array( 'tag' => 'Policies', 'attributes' => array( 'media-type' => 'application/vnd.ez.api.PolicyList+xml', 'href' => '/user/roles/42/policies', ) ), $result, 'Invalid <Policies> attributes.', false ); } /** * Get the Role visitor * * @return \eZ\Publish\Core\REST\Server\Output\ValueObjectVisitor\Role */ protected function internalGetVisitor() { return new ValueObjectVisitor\Role; } }
CG77/ezpublish-kernel
eZ/Publish/Core/REST/Server/Tests/Output/ValueObjectVisitor/RoleTest.php
PHP
gpl-2.0
6,636
/* * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/of_gpio.h> #include <linux/clk/msm-clk.h> #include <soc/qcom/subsystem_restart.h> #include <soc/qcom/ramdump.h> #include <soc/qcom/smem.h> #include <soc/qcom/smsm.h> #include "peripheral-loader.h" #include "pil-q6v5.h" #include "pil-msa.h" #define MAX_VDD_MSS_UV 1150000 #define PROXY_TIMEOUT_MS 10000 #define MAX_SSR_REASON_LEN 81U #define STOP_ACK_TIMEOUT_MS 1000 #define subsys_to_drv(d) container_of(d, struct modem_data, subsys_desc) static void log_modem_sfr(void) { u32 size; char *smem_reason, reason[MAX_SSR_REASON_LEN]; smem_reason = smem_get_entry_no_rlock(SMEM_SSR_REASON_MSS0, &size, 0, SMEM_ANY_HOST_FLAG); if (!smem_reason || !size) { pr_err("modem subsystem failure reason: (unknown, smem_get_entry_no_rlock failed).\n"); return; } if (!smem_reason[0]) { pr_err("modem subsystem failure reason: (unknown, empty string found).\n"); return; } strlcpy(reason, smem_reason, min(size, MAX_SSR_REASON_LEN)); pr_err("modem subsystem failure reason: %s.\n", reason); smem_reason[0] = '\0'; wmb(); } static void restart_modem(struct modem_data *drv) { log_modem_sfr(); drv->ignore_errors = true; subsystem_restart_dev(drv->subsys); } static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id) { struct modem_data *drv = subsys_to_drv(dev_id); /* Ignore if we're the one that set the force stop GPIO */ if (drv->crash_shutdown) return IRQ_HANDLED; pr_err("Fatal error on the modem.\n"); subsys_set_crash_status(drv->subsys, true); restart_modem(drv); return IRQ_HANDLED; } static irqreturn_t modem_stop_ack_intr_handler(int irq, void *dev_id) { struct modem_data *drv = subsys_to_drv(dev_id); pr_info("Received stop ack interrupt from modem\n"); complete(&drv->stop_ack); return IRQ_HANDLED; } static int modem_shutdown(const struct subsys_desc *subsys, bool force_stop) { struct modem_data *drv = subsys_to_drv(subsys); unsigned long ret; if (subsys->is_not_loadable) return 0; if (!subsys_get_crash_status(drv->subsys) && force_stop && subsys->force_stop_gpio) { gpio_set_value(subsys->force_stop_gpio, 1); ret = wait_for_completion_timeout(&drv->stop_ack, msecs_to_jiffies(STOP_ACK_TIMEOUT_MS)); if (!ret) pr_warn("Timed out on stop ack from modem.\n"); gpio_set_value(subsys->force_stop_gpio, 0); } if (drv->subsys_desc.ramdump_disable_gpio) { drv->subsys_desc.ramdump_disable = gpio_get_value( drv->subsys_desc.ramdump_disable_gpio); pr_warn("Ramdump disable gpio value is %d\n", drv->subsys_desc.ramdump_disable); } pil_shutdown(&drv->q6->desc); return 0; } static int modem_powerup(const struct subsys_desc *subsys) { struct modem_data *drv = subsys_to_drv(subsys); if (subsys->is_not_loadable) return 0; /* * At this time, the modem is shutdown. Therefore this function cannot * run concurrently with the watchdog bite error handler, making it safe * to unset the flag below. */ INIT_COMPLETION(drv->stop_ack); drv->subsys_desc.ramdump_disable = 0; drv->ignore_errors = false; return pil_boot(&drv->q6->desc); } static void modem_crash_shutdown(const struct subsys_desc *subsys) { struct modem_data *drv = subsys_to_drv(subsys); drv->crash_shutdown = true; if (!subsys_get_crash_status(drv->subsys) && subsys->force_stop_gpio) { gpio_set_value(subsys->force_stop_gpio, 1); mdelay(STOP_ACK_TIMEOUT_MS); } } static void modem_free_memory(const struct subsys_desc *subsys) { struct modem_data *drv = subsys_to_drv(subsys); pil_free_memory(&drv->q6->desc); } static int modem_ramdump(int enable, const struct subsys_desc *subsys) { struct modem_data *drv = subsys_to_drv(subsys); int ret; if (!enable) return 0; ret = pil_mss_make_proxy_votes(&drv->q6->desc); if (ret) return ret; ret = pil_mss_reset_load_mba(&drv->q6->desc); if (ret) return ret; ret = pil_do_ramdump(&drv->q6->desc, drv->ramdump_dev); if (ret < 0) pr_err("Unable to dump modem fw memory (rc = %d).\n", ret); ret = pil_mss_deinit_image(&drv->q6->desc); if (ret < 0) pr_err("Unable to free up resources (rc = %d).\n", ret); pil_mss_remove_proxy_votes(&drv->q6->desc); return ret; } static irqreturn_t modem_wdog_bite_intr_handler(int irq, void *dev_id) { struct modem_data *drv = subsys_to_drv(dev_id); if (drv->ignore_errors) return IRQ_HANDLED; pr_err("Watchdog bite received from modem software!\n"); if (drv->subsys_desc.system_debug && !gpio_get_value(drv->subsys_desc.err_fatal_gpio)) panic("%s: System ramdump requested. Triggering device restart!\n", __func__); subsys_set_crash_status(drv->subsys, true); restart_modem(drv); return IRQ_HANDLED; } static int pil_subsys_init(struct modem_data *drv, struct platform_device *pdev) { int ret; drv->subsys_desc.name = "modem"; drv->subsys_desc.dev = &pdev->dev; drv->subsys_desc.owner = THIS_MODULE; drv->subsys_desc.shutdown = modem_shutdown; drv->subsys_desc.powerup = modem_powerup; drv->subsys_desc.ramdump = modem_ramdump; drv->subsys_desc.free_memory = modem_free_memory; drv->subsys_desc.crash_shutdown = modem_crash_shutdown; drv->subsys_desc.err_fatal_handler = modem_err_fatal_intr_handler; drv->subsys_desc.stop_ack_handler = modem_stop_ack_intr_handler; drv->subsys_desc.wdog_bite_handler = modem_wdog_bite_intr_handler; drv->subsys = subsys_register(&drv->subsys_desc); if (IS_ERR(drv->subsys)) { ret = PTR_ERR(drv->subsys); goto err_subsys; } drv->ramdump_dev = create_ramdump_device("modem", &pdev->dev); if (!drv->ramdump_dev) { pr_err("%s: Unable to create a modem ramdump device.\n", __func__); ret = -ENOMEM; goto err_ramdump; } return 0; err_ramdump: subsys_unregister(drv->subsys); err_subsys: return ret; } static int pil_mss_loadable_init(struct modem_data *drv, struct platform_device *pdev) { struct q6v5_data *q6; struct pil_desc *q6_desc; struct resource *res; struct property *prop; int ret; q6 = pil_q6v5_init(pdev); if (IS_ERR(q6)) return PTR_ERR(q6); drv->q6 = q6; drv->xo = q6->xo; q6_desc = &q6->desc; q6_desc->owner = THIS_MODULE; q6_desc->proxy_timeout = PROXY_TIMEOUT_MS; q6_desc->ops = &pil_msa_mss_ops; q6->self_auth = of_property_read_bool(pdev->dev.of_node, "qcom,pil-self-auth"); if (q6->self_auth) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb_base"); q6->rmb_base = devm_request_and_ioremap(&pdev->dev, res); if (!q6->rmb_base) return -ENOMEM; drv->rmb_base = q6->rmb_base; q6_desc->ops = &pil_msa_mss_ops_selfauth; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg"); if (!res) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg_sec"); q6->restart_reg_sec = true; } q6->restart_reg = devm_request_and_ioremap(&pdev->dev, res); if (!q6->restart_reg) return -ENOMEM; q6->vreg = NULL; prop = of_find_property(pdev->dev.of_node, "vdd_mss-supply", NULL); if (prop) { q6->vreg = devm_regulator_get(&pdev->dev, "vdd_mss"); if (IS_ERR(q6->vreg)) return PTR_ERR(q6->vreg); ret = regulator_set_voltage(q6->vreg, VDD_MSS_UV, MAX_VDD_MSS_UV); if (ret) dev_err(&pdev->dev, "Failed to set vreg voltage.\n"); ret = regulator_set_optimum_mode(q6->vreg, 100000); if (ret < 0) { dev_err(&pdev->dev, "Failed to set vreg mode.\n"); return ret; } } q6->vreg_mx = devm_regulator_get(&pdev->dev, "vdd_mx"); if (IS_ERR(q6->vreg_mx)) return PTR_ERR(q6->vreg_mx); prop = of_find_property(pdev->dev.of_node, "vdd_mx-uV", NULL); if (!prop) { dev_err(&pdev->dev, "Missing vdd_mx-uV property\n"); return -EINVAL; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cxrail_bhs_reg"); if (res) q6->cxrail_bhs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); q6->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk"); if (IS_ERR(q6->ahb_clk)) return PTR_ERR(q6->ahb_clk); q6->axi_clk = devm_clk_get(&pdev->dev, "bus_clk"); if (IS_ERR(q6->axi_clk)) return PTR_ERR(q6->axi_clk); q6->rom_clk = devm_clk_get(&pdev->dev, "mem_clk"); if (IS_ERR(q6->rom_clk)) return PTR_ERR(q6->rom_clk); /* Optional. */ if (of_property_match_string(pdev->dev.of_node, "qcom,active-clock-names", "gpll0_mss_clk") >= 0) q6->gpll0_mss_clk = devm_clk_get(&pdev->dev, "gpll0_mss_clk"); ret = pil_desc_init(q6_desc); return ret; } static int pil_mss_driver_probe(struct platform_device *pdev) { struct modem_data *drv; int ret, is_not_loadable; drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); if (!drv) return -ENOMEM; platform_set_drvdata(pdev, drv); is_not_loadable = of_property_read_bool(pdev->dev.of_node, "qcom,is-not-loadable"); if (is_not_loadable) { drv->subsys_desc.is_not_loadable = 1; } else { ret = pil_mss_loadable_init(drv, pdev); if (ret) return ret; } init_completion(&drv->stop_ack); return pil_subsys_init(drv, pdev); } static int pil_mss_driver_exit(struct platform_device *pdev) { struct modem_data *drv = platform_get_drvdata(pdev); subsys_unregister(drv->subsys); destroy_ramdump_device(drv->ramdump_dev); pil_desc_release(&drv->q6->desc); return 0; } static struct of_device_id mss_match_table[] = { { .compatible = "qcom,pil-q6v5-mss" }, { .compatible = "qcom,pil-q6v55-mss" }, { .compatible = "qcom,pil-q6v56-mss" }, {} }; static struct platform_driver pil_mss_driver = { .probe = pil_mss_driver_probe, .remove = pil_mss_driver_exit, .driver = { .name = "pil-q6v5-mss", .of_match_table = mss_match_table, .owner = THIS_MODULE, }, }; static int __init pil_mss_init(void) { return platform_driver_register(&pil_mss_driver); } module_init(pil_mss_init); static void __exit pil_mss_exit(void) { platform_driver_unregister(&pil_mss_driver); } module_exit(pil_mss_exit); MODULE_DESCRIPTION("Support for booting modem subsystems with QDSP6v5 Hexagon processors"); MODULE_LICENSE("GPL v2");
Riccorbypro/android_kernel_sony_wukong
drivers/soc/qcom/pil-q6v5-mss.c
C
gpl-2.0
10,816
+{ locale_version => 1.12, suppress => [0x0410, 0x0430, 0x04D8, 0x04D9, 0x0413, 0x0433, 0x0415, 0x0435, 0x0416, 0x0436, 0x0417, 0x0437, 0x0406, 0x0456, 0x041E, 0x043E, 0x04E8, 0x04E9, 0x041A, 0x043A, 0x0423, 0x0443, 0x0427, 0x0447, 0x042B, 0x044B, 0x042D, 0x044D, 0x0474, 0x0475], entry => <<'ENTRY', # for DUCET v7.0.0 04D1 ; [.1CE0.0020.0002][.0000.0026.0002] # CYRILLIC SMALL LETTER A WITH BREVE 04D0 ; [.1CE0.0020.0008][.0000.0026.0002] # CYRILLIC CAPITAL LETTER A WITH BREVE 04D3 ; [.1CE0.0020.0002][.0000.002B.0002] # CYRILLIC SMALL LETTER A WITH DIAERESIS 04D2 ; [.1CE0.0020.0008][.0000.002B.0002] # CYRILLIC CAPITAL LETTER A WITH DIAERESIS 04DB ; [.1CEC.0020.0002][.0000.002B.0002] # CYRILLIC SMALL LETTER SCHWA WITH DIAERESIS 04DA ; [.1CEC.0020.0008][.0000.002B.0002] # CYRILLIC CAPITAL LETTER SCHWA WITH DIAERESIS 0453 ; [.1D00.0020.0002][.0000.0024.0002] # CYRILLIC SMALL LETTER GJE 0403 ; [.1D00.0020.0008][.0000.0024.0002] # CYRILLIC CAPITAL LETTER GJE 04D7 ; [.1D28.0020.0002][.0000.0026.0002] # CYRILLIC SMALL LETTER IE WITH BREVE 04D6 ; [.1D28.0020.0008][.0000.0026.0002] # CYRILLIC CAPITAL LETTER IE WITH BREVE 04DD ; [.1D34.0020.0002][.0000.002B.0002] # CYRILLIC SMALL LETTER ZHE WITH DIAERESIS 04DC ; [.1D34.0020.0008][.0000.002B.0002] # CYRILLIC CAPITAL LETTER ZHE WITH DIAERESIS 04DF ; [.1D42.0020.0002][.0000.002B.0002] # CYRILLIC SMALL LETTER ZE WITH DIAERESIS 04DE ; [.1D42.0020.0008][.0000.002B.0002] # CYRILLIC CAPITAL LETTER ZE WITH DIAERESIS 0457 ; [.1D66.0020.0002][.0000.002B.0002] # CYRILLIC SMALL LETTER YI 0407 ; [.1D66.0020.0008][.0000.002B.0002] # CYRILLIC CAPITAL LETTER YI 04E7 ; [.1DC9.0020.0002][.0000.002B.0002] # CYRILLIC SMALL LETTER O WITH DIAERESIS 04E6 ; [.1DC9.0020.0008][.0000.002B.0002] # CYRILLIC CAPITAL LETTER O WITH DIAERESIS 04EB ; [.1DD1.0020.0002][.0000.002B.0002] # CYRILLIC SMALL LETTER BARRED O WITH DIAERESIS 04EA ; [.1DD1.0020.0008][.0000.002B.0002] # CYRILLIC CAPITAL LETTER BARRED O WITH DIAERESIS 045C ; [.1D78.0020.0002][.0000.0024.0002] # CYRILLIC SMALL LETTER KJE 040C ; [.1D78.0020.0008][.0000.0024.0002] # CYRILLIC CAPITAL LETTER KJE 045E ; [.1E0B.0020.0002][.0000.0026.0002] # CYRILLIC SMALL LETTER SHORT U 040E ; [.1E0B.0020.0008][.0000.0026.0002] # CYRILLIC CAPITAL LETTER SHORT U 04F1 ; [.1E0B.0020.0002][.0000.002B.0002] # CYRILLIC SMALL LETTER U WITH DIAERESIS 04F0 ; [.1E0B.0020.0008][.0000.002B.0002] # CYRILLIC CAPITAL LETTER U WITH DIAERESIS 04F3 ; [.1E0B.0020.0002][.0000.002C.0002] # CYRILLIC SMALL LETTER U WITH DOUBLE ACUTE 04F2 ; [.1E0B.0020.0008][.0000.002C.0002] # CYRILLIC CAPITAL LETTER U WITH DOUBLE ACUTE 04F5 ; [.1E5E.0020.0002][.0000.002B.0002] # CYRILLIC SMALL LETTER CHE WITH DIAERESIS 04F4 ; [.1E5E.0020.0008][.0000.002B.0002] # CYRILLIC CAPITAL LETTER CHE WITH DIAERESIS 04F9 ; [.1E92.0020.0002][.0000.002B.0002] # CYRILLIC SMALL LETTER YERU WITH DIAERESIS 04F8 ; [.1E92.0020.0008][.0000.002B.0002] # CYRILLIC CAPITAL LETTER YERU WITH DIAERESIS 04ED ; [.1EA7.0020.0002][.0000.002B.0002] # CYRILLIC SMALL LETTER E WITH DIAERESIS 04EC ; [.1EA7.0020.0008][.0000.002B.0002] # CYRILLIC CAPITAL LETTER E WITH DIAERESIS 0477 ; [.1EDD.0020.0002][.0000.003C.0002] # CYRILLIC SMALL LETTER IZHITSA WITH DOUBLE GRAVE ACCENT 0476 ; [.1EDD.0020.0008][.0000.003C.0002] # CYRILLIC CAPITAL LETTER IZHITSA WITH DOUBLE GRAVE ACCENT ENTRY };
ljgabc/lfs
usr/lib/perl5/5.22.0/Unicode/Collate/Locale/ru.pl
Perl
gpl-2.0
3,512
<?php /** * @version $Id: AbstarctJoomlaBasedProvider.php 19583 2014-03-10 22:54:45Z btowles $ * @author RocketTheme http://www.rockettheme.com * @copyright Copyright (C) 2007 - 2015 RocketTheme, LLC * @license http://www.gnu.org/licenses/gpl-2.0.html GNU/GPLv2 only */ abstract class RokSprocket_Provider_AbstarctJoomlaBasedProvider extends RokSprocket_Provider { /** * @return RokSprocket_ItemCollection */ public function getItems() { if (empty($this->filters)) return new RokSprocket_ItemCollection(); // setup active menu item if not there $app = JFactory::getApplication(); $menus = $app->getMenu('site'); $menu_id = $menus->getDefault()->id; $active = $menus->getActive(); $input = $app->input; if ($active == null && $passed_item_id = $input->get('ItemId', $menu_id, 'int')) { $menus->setActive($passed_item_id); } /** @var $filer_processor RokSprocket_Provider_AbstractJoomlaPlatformFilter */ $filer_processor = $this->getFilterProcessor(); $filer_processor->setModuleId($this->module_id); $filer_processor->setDisplayedIds($this->displayed_ids); $provider = $this->params->get('provider', 'joomla'); $manualsort = ($this->params->get($provider . '_sort', 'automatic') == 'manual') ? true : false; if ($manualsort) { $filer_processor->setManualSort($manualsort); $filer_processor->setManualAppend($this->params->get($provider . '_sort_manual_append', 'after')); } $filer_processor->process($this->filters, $this->sort_filters, $this->showUnpublished); /** @var $query JDatabaseQuery */ $query = $filer_processor->getQuery(); $display_limit = (int)$this->params->get('display_limit', 0); if ($app->isSite() && is_int($display_limit) && $display_limit > 0) { $query = (string)$query . ' LIMIT ' . $display_limit; } $db = JFactory::getDbo(); $string_query = (string)$query; $db->setQuery($string_query); $raw_results = $db->loadObjectList('id'); if ($error = $db->getErrorMsg()) { throw new RokSprocket_Exception($error); } $raw_results = $this->populateTags($raw_results); $converted = $this->convertRawToItems($raw_results); $this->mapPerItemData($converted); return $converted; } protected function populateTags(array $raw_results) { return $raw_results; } /** * @param array $data * * @return RokSprocket_ItemCollection */ protected function convertRawToItems(array $data) { $collection = new RokSprocket_ItemCollection(); $dborder = 0; foreach ($data as $raw_item) { $item = $this->convertRawToItem($raw_item, $dborder); $collection[$item->getArticleId()] = $item; $dborder++; } return $collection; } /** * @abstract * * @param $raw_item * @param int $dborder */ abstract protected function convertRawToItem($raw_item, $dborder = 0); /** * @param RokSprocket_ItemCollection $items * * @throws RokSprocket_Exception */ protected function getModuleItemSettings(RokSprocket_ItemCollection &$items) { //TODO move this to be a platform independent fucntion $item_ids = array_keys($items); $db = JFactory::getDbo(); $query = $db->getQuery(true); $query->select('rsi.provider_id as id, rsi.order as order, rsi.params as params')->from('#__roksprocket_items as rsi'); $query->where(sprintf('rsi.module_id = %d', $this->module_id)); $query->where(sprintf('rsi.provider = %s', $db->quote($this->provider_name))); $query->where(sprintf('rsi.provider_id in (%s)', implode(',', $item_ids))); $query->order('rsi.order'); $db->setQuery($query); $item_results = $db->loadObjectList('id'); if ($error = $db->getErrorMsg()) { throw new RokSprocket_Exception($error); } foreach ($item_results as $item_id => $item) { if (isset($items[$item_id])) { } } } /** * @param RokSprocket_ItemCollection $items * * @throws RokSprocket_Exception */ protected function mapPerItemData(RokSprocket_ItemCollection &$items) { $db = JFactory::getDbo(); $query = $db->getQuery(true); $query->select('i.provider_id as id, i.order, i.params')->from('#__roksprocket_items as i'); $query->where('i.module_id = ' . $db->quote($this->module_id)); $query->where('i.provider = ' . $db->quote($this->provider_name)); $db->setQuery($query); $sprocket_items = $db->loadObjectList('id'); if ($error = $db->getErrorMsg()) { throw new RokSprocket_Exception($error); } /** @var $items RokSprocket_Item[] */ foreach ($items as $item_id => &$item) { list($provider, $id) = explode('-', $item_id); if (array_key_exists($id, $sprocket_items)) { $items[$item_id]->setOrder((int)$sprocket_items[$id]->order); if (null != $sprocket_items[$id]->params) { $decoded = null; try { $decoded = RokCommon_Utils_ArrayHelper::fromObject(RokCommon_JSON::decode($sprocket_items[$id]->params)); } catch (RokCommon_JSON_Exception $jse) { //TODO log that unable to get per item settings } $items[$item_id]->setParams($decoded); } else { $items[$item_id]->setParams(array()); } } } } /** * @param $id * * @return \RokSprocket_Item */ public function getArticlePreview($id) { $ret = $this->getArticleInfo($id); $ret->setText($this->_cleanPreview($ret->getText())); return $ret; } /** * @param $id * * @param bool $raw return the raw object not the RokSprocket_Item * * @return stdClass|RokSprocket_Item * @throws RokSprocket_Exception */ public function getArticleInfo($id, $raw = false) { /** @var $filer_processor RokCommon_Filter_IProcessor */ $filer_processor = $this->getFilterProcessor(); $filer_processor->process(array('id' => array($id)), array(), true); $query = $filer_processor->getQuery(); $db = JFactory::getDbo(); $db->setQuery($query); $ret = $db->loadObject(); if ($error = $db->getErrorMsg()) { throw new RokSprocket_Exception($error); } if ($raw) { $ret->preview = $this->_cleanPreview($ret->introtext); $ret->editUrl = $this->getArticleEditUrl($id); return $ret; } else { $item = $this->convertRawToItem($ret); $item->editUrl = $this->getArticleEditUrl($id); $item->preview = $this->_cleanPreview($item->getText()); return $item; } } /** * @abstract * * @param $id */ abstract protected function getArticleEditUrl($id); /** * @param $content * * @return mixed */ protected function _cleanPreview($content) { $container = RokCommon_Service::getContainer(); /** @var $helper RokSprocket_PlatformHelper */ $helper = $container->roksprocket_platformhelper; $content = $helper->cleanup($content); // //Replace src links // $base = JURI::root(); // // $regex = '#href="index.php\?([^"]*)#m'; // $content = preg_replace_callback($regex, array('self', '_route'), $content); // // $protocols = '[a-zA-Z0-9]+:'; //To check for all unknown protocals (a protocol must contain at least one alpahnumeric fillowed by : // $regex = '#(src|href)="(?!/|' . $protocols . '|\#|\')([^"]*)"#m'; // $content = preg_replace($regex, "$1=\"$base\$2\" target=\"_blank\"", $content); // // $regex = '#(onclick="window.open\(\')(?!/|' . $protocols . '|\#)([^/]+[^\']*?\')#m'; // $content = preg_replace($regex, '$1' . $base . '$2', $content); // // // ONMOUSEOVER / ONMOUSEOUT // $regex = '#(onmouseover|onmouseout)="this.src=([\']+)(?!/|' . $protocols . '|\#|\')([^"]+)"#m'; // $content = preg_replace($regex, '$1="this.src=$2' . $base . '$3$4"', $content); // // // Background image // $regex = '#style\s*=\s*[\'\"](.*):\s*url\s*\([\'\"]?(?!/|' . $protocols . '|\#)([^\)\'\"]+)[\'\"]?\)#m'; // $content = preg_replace($regex, 'style="$1: url(\'' . $base . '$2$3\')', $content); return $content; } /** * @param $matches * * @return string */ protected function _route(&$matches) { $original = $matches[0]; $url = $matches[1]; $url = str_replace('&amp;', '&', $url); $route = JURI::root() . 'index.php?' . $url; return 'target="_blank" href="' . $route; } /** * @param array $texts * @return array */ protected function processPlugins($texts = array()) { if (!isset($this->params) || $this->params->get('run_content_plugins', 'onmodule') == 'oneach' || $this->params->get('run_content_plugins', 'onmodule') == 1) { if(JFactory::getApplication()->isSite()){ foreach ($texts as $k => $v) { $texts[$k] = JHtml::_('content.prepare', $v); } } } return $texts; } }
studiochakra/writopialab.com
components/com_roksprocket/lib/RokSprocket/Provider/AbstarctJoomlaBasedProvider.php
PHP
gpl-2.0
8,583
<?php /** * Template Name: Most Popular * * Displays most popular posts in all time. * * @package Path * @subpackage Template * @since 0.1.0 */ get_header(); // Loads the header.php template. ?> <?php do_atomic( 'before_content' ); // path_before_content ?> <div id="content"> <?php do_atomic( 'open_content' ); // path_open_content ?> <div class="hfeed"> <?php get_template_part( 'loop-meta' ); // Loads the loop-meta.php template. ?> <?php /* Loop for most viewed articles. entry-views extension is used. */ $args = array ( 'ignore_sticky_posts' => true, 'meta_key' => 'Views', 'orderby' => 'meta_value_num', 'posts_per_page' => get_option( 'posts_per_page' ), 'paged' => ( get_query_var( 'paged' ) ? get_query_var( 'paged' ) : 1 ) ); $wp_query = new WP_Query( $args ); ?> <?php if ( $wp_query->have_posts() ) : ?> <?php while ( $wp_query->have_posts() ) : $wp_query->the_post(); ?> <?php do_atomic( 'before_entry' ); // path_before_entry ?> <article id="post-<?php the_ID(); ?>" class="<?php hybrid_entry_class(); ?>"> <?php do_atomic( 'open_entry' ); // path_open_entry ?> <?php if ( current_theme_supports( 'get-the-image' ) ) get_the_image( array( 'meta_key' => 'Thumbnail', 'size' => 'thumbnail' ) ); ?> <header class="entry-header"> <h2 class="entry-title"><a href="<?php the_permalink(); ?>" title="<?php the_title_attribute(); ?>"><?php the_title(); ?></a></h2> <?php echo apply_atomic_shortcode( 'byline', '<div class="byline">' . __( 'Published by [entry-author] on [entry-published] [entry-comments-link before=" | "] [entry-edit-link before=" | "]', 'path' ) . '</div>' ); ?> </header><!-- .entry-header --> <div class="entry-summary"> <?php the_excerpt(); ?> <?php wp_link_pages( array( 'before' => '<p class="page-links">' . __( 'Pages:', 'path' ), 'after' => '</p>' ) ); ?> </div><!-- .entry-summary --> <footer class="entry-footer"> <?php echo apply_atomic_shortcode( 'entry_meta', '<div class="entry-meta">' . __( 'Views [entry-views] [entry-terms taxonomy="category" before="Posted in "] [entry-terms before="Tagged "]', 'path' ) . '</div>' ); ?> </footer><!-- .entry-footer --> <?php do_atomic( 'close_entry' ); // path_close_entry ?> </article><!-- .hentry --> <?php do_atomic( 'after_entry' ); // path_after_entry ?> <?php endwhile; ?> <?php else : ?> <?php get_template_part( 'loop-error' ); // Loads the loop-error.php template. ?> <?php endif; ?> </div><!-- .hfeed --> <?php do_atomic( 'close_content' ); // path_close_content ?> <?php get_template_part( 'loop-nav' ); // Loads the loop-nav.php template. ?> <?php wp_reset_postdata(); // Reset Query ?> </div><!-- #content --> <?php do_atomic( 'after_content' ); // path_after_content ?> <?php get_footer(); // Loads the footer.php template. ?>
evolutivo/asf
WP1/wp-content/themes/path/page-templates/most-popular.php
PHP
gpl-2.0
2,997
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <meta http-equiv="Content-Style-Type" content="text/css"> <title></title> <meta name="Generator" content="Cocoa HTML Writer"> <meta name="CocoaVersion" content="824.42"> <style type="text/css"> p.p1 {margin: 0.0px 0.0px 0.0px 0.0px; font: 18.0px Helvetica} p.p2 {margin: 0.0px 0.0px 0.0px 0.0px; font: 12.0px Helvetica; min-height: 14.0px} p.p3 {margin: 0.0px 0.0px 0.0px 0.0px; font: 12.0px Helvetica} p.p4 {margin: 0.0px 0.0px 0.0px 0.0px; font: 12.0px Monaco; min-height: 16.0px} p.p5 {margin: 0.0px 0.0px 0.0px 0.0px; font: 9.0px Monaco} span.s1 {color: #0019b7} span.Apple-tab-span {white-space:pre} </style> </head> <body> <p class="p1"><b>fold2<span class="Apple-tab-span"> </span><span class="Apple-tab-span"> </span><span class="Apple-tab-span"> </span>bilateral folding</b></p> <p class="p2"><br></p> <p class="p3"><b>BinaryOperator</b></p> <p class="p2"><br></p> <p class="p3"><b>fold2(a, b)</b></p> <p class="p3"><b>a fold2: b</b></p> <p class="p3"><b>a.fold2(b)</b></p> <p class="p2"><br></p> <p class="p3">folds input wave a to +/- b</p> <p class="p4"><br></p> <p class="p5">{ <span class="s1">FSinOsc</span>.ar(1000).fold2(<span class="s1">Line</span>.kr(0,1,8)) }.scope;</p> <p class="p4"><br></p> </body> </html>
scztt/sc-debug
build/Help/BinaryOps/fold2.html
HTML
gpl-2.0
1,403
<?php /** * @file * Contains \Drupal\Core\Entity\ContentEntityForm. */ namespace Drupal\Core\Entity; use Drupal\Core\Entity\Display\EntityFormDisplayInterface; use Drupal\Core\Entity\Entity\EntityFormDisplay; use Drupal\Core\Form\FormStateInterface; use Symfony\Component\DependencyInjection\ContainerInterface; /** * Entity form variant for content entity types. * * @see \Drupal\Core\ContentEntityBase */ class ContentEntityForm extends EntityForm implements ContentEntityFormInterface { /** * The entity manager. * * @var \Drupal\Core\Entity\EntityManagerInterface */ protected $entityManager; /** * Constructs a ContentEntityForm object. * * @param \Drupal\Core\Entity\EntityManagerInterface $entity_manager * The entity manager. */ public function __construct(EntityManagerInterface $entity_manager) { $this->entityManager = $entity_manager; } /** * {@inheritdoc} */ public static function create(ContainerInterface $container) { return new static( $container->get('entity.manager') ); } /** * {@inheritdoc} */ public function form(array $form, FormStateInterface $form_state) { $form = parent::form($form, $form_state); $this->getFormDisplay($form_state)->buildForm($this->entity, $form, $form_state); return $form; } /** * {@inheritdoc} */ public function validate(array $form, FormStateInterface $form_state) { $this->updateFormLangcode($form_state); $entity = $this->buildEntity($form, $form_state); $this->getFormDisplay($form_state)->validateFormValues($entity, $form, $form_state); // @todo Remove this. // Execute legacy global validation handlers. $form_state->setValidateHandlers([]); form_execute_handlers('validate', $form, $form_state); } /** * Initialize the form state and the entity before the first form build. */ protected function init(FormStateInterface $form_state) { // Ensure we act on the translation object corresponding to the current form // language. $langcode = $this->getFormLangcode($form_state); $this->entity = $this->entity->getTranslation($langcode); $form_display = EntityFormDisplay::collectRenderDisplay($this->entity, $this->getOperation()); $this->setFormDisplay($form_display, $form_state); parent::init($form_state); } /** * {@inheritdoc} */ public function getFormLangcode(FormStateInterface $form_state) { if (!$form_state->has('langcode')) { // Imply a 'view' operation to ensure users edit entities in the same // language they are displayed. This allows to keep contextual editing // working also for multilingual entities. $form_state->set('langcode', $this->entityManager->getTranslationFromContext($this->entity)->language()->id); } return $form_state->get('langcode'); } /** * {@inheritdoc} */ public function isDefaultFormLangcode(FormStateInterface $form_state) { return $this->getFormLangcode($form_state) == $this->entity->getUntranslated()->language()->id; } /** * {@inheritdoc} */ protected function copyFormValuesToEntity(EntityInterface $entity, array $form, FormStateInterface $form_state) { // First, extract values from widgets. $extracted = $this->getFormDisplay($form_state)->extractFormValues($entity, $form, $form_state); // Then extract the values of fields that are not rendered through widgets, // by simply copying from top-level form values. This leaves the fields // that are not being edited within this form untouched. foreach ($form_state->getValues() as $name => $values) { if ($entity->hasField($name) && !isset($extracted[$name])) { $entity->set($name, $values); } } } /** * {@inheritdoc} */ public function getFormDisplay(FormStateInterface $form_state) { return $form_state->get('form_display'); } /** * {@inheritdoc} */ public function setFormDisplay(EntityFormDisplayInterface $form_display, FormStateInterface $form_state) { $form_state->set('form_display', $form_display); return $this; } }
saitanay/doitwithd8
core/lib/Drupal/Core/Entity/ContentEntityForm.php
PHP
gpl-2.0
4,140
<?php /** * HeadSpace * * @package HeadSpace * @author John Godley **/ /* ============================================================================================================ This software is provided "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantibility and fitness for a particular purpose are disclaimed. In no event shall the copyright owner or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage. For full license details see license.txt ============================================================================================================ */ class HSS_Mint extends HS_SiteModule { var $path = ''; var $role = 'everyone'; var $trackable = null; function name () { return __ ('Mint', 'headspace'); } function description () { return __ ('Adds Mint tracking code to all pages', 'headspace'); } function run () { add_action ('wp_footer', array (&$this, 'wp_footer')); } function is_trackable () { if ($this->is_trackable !== null) return $this->is_trackable; if (is_user_logged_in () && $this->role != 'everyone') { $user = wp_get_current_user (); global $wp_roles; $caps = $wp_roles->get_role ($this->role); if ($caps) { // Calculate the highest level of the user and the role $role_level = $user_level = 0; for ($x = 10; $x >= 0; $x--) { if (isset ($caps->capabilities['level_'.$x])) break; } $role_level = $x; for ($x = 10; $x >= 0; $x--) { if (isset ($user->allcaps['level_'.$x])) break; } $user_level = $x; // Quit if the user is greater level than the role if ($user_level > $role_level) { $this->is_trackable = false; return false; } } } $this->is_trackable = true; return $this->is_trackable; } function wp_footer () { if ($this->path && $this->is_trackable ()) { ?> <script src="<?php echo rtrim ($this->path, '/') ?>/?js" type="text/javascript"></script> <?php } } function load ($data) { if (isset ($data['path'])) $this->path = $data['path']; if (isset ($data['role'])) $this->role = $data['role']; } function has_config () { return true; } function save_options ($data) { return array ('path' => $data['path'], 'role' => $data['role']); } function edit () { ?> <tr> <th width="150"><?php _e ('Mint path', 'headspace'); ?>:</th> <td> <input type="text" name="path" value="<?php echo esc_attr ($this->path); ?>"/> <span class="sub"><?php _e ('Enter the URL of your Mint installation (i.e. <code>/mint/</code>).', 'headspace'); ?></span> </td> </tr> <tr> <th><?php _e ('Who to track', 'headspace'); ?>:</th> <td> <select name="role"> <option value="everyone"><?php _e ('Everyone', 'headspace'); ?></option> <?php global $wp_roles; foreach ($wp_roles->role_names as $key => $rolename) : ?> <option value="<?php echo $key ?>"<?php if ($this->role == $key) echo ' selected="selected"'; ?>><?php echo esc_html( $rolename ) ?></option> <?php endforeach; ?> </select> </select> <span class="sub"><?php _e ('Users of the specified role or less will be tracked', 'headspace'); ?></span> </td> </tr> <?php } function file () { return basename (__FILE__); } }
tejas101/IndiaSpendHindi
wp-content/plugins/headspace2/modules/site/mint.php
PHP
gpl-2.0
3,762
/* * This file is part of a Java port of the program ltl2dstar * (http://www.ltl2dstar.de/) for PRISM (http://www.prismmodelchecker.org/) * Copyright (C) 2005-2007 Joachim Klein <j.klein@ltl2dstar.de> * Copyright (c) 2007 Carlos Bederian * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ package jltl2dstar; import java.util.Vector; import java.io.PrintStream; import jltl2ba.MyBitSet; /** * A Safra tree, an ordered tree of SafraTreeNodes. */ public class SafraTree implements NBA2DAState { /** The maximum number of nodes */ private int MAX_NODES; /** An array to store the nodes */ private Vector<SafraTreeNode> _nodes; /** * Constructor. * @param N the maximum number of nodes. */ public SafraTree(int N) { MAX_NODES = (N == 0 ? 1 : N); _nodes = new Vector<SafraTreeNode>(MAX_NODES); _nodes.setSize(MAX_NODES); // create root-node newNode(0); } /** Copy constructor. */ public SafraTree(SafraTree other) { MAX_NODES = other.MAX_NODES; _nodes = new Vector<SafraTreeNode>(MAX_NODES); _nodes.setSize(MAX_NODES); for (int i = 0; i < MAX_NODES; i++) { if (other._nodes.get(i) != null) { newNode(i); _nodes.get(i).setLabeling((MyBitSet) other._nodes.get(i).getLabeling().clone()); _nodes.get(i).setFinalFlag(other._nodes.get(i).hasFinalFlag()); } } copySubTree(_nodes.get(0), other._nodes.get(0)); } /** Get the root node of the tree. */ public SafraTreeNode getRootNode() {return _nodes.get(0);} /** Create a new node. The name is the next free node name. */ public SafraTreeNode newNode() { if (_nodes.indexOf(null) != -1) return newNode(_nodes.indexOf(null)); // FIXME: hmm, inconsistent with newNode(int) else return null; } /** Create a new node with name <i>id</i>. */ public SafraTreeNode newNode(int id) { assert(id < MAX_NODES); assert(_nodes.get(id) == null); _nodes.set(id,new SafraTreeNode(id)); return _nodes.get(id); } /** * Remove a SafraTreeNode from the tree, * the node can have no children. */ public void remove(SafraTreeNode node) { assert(_nodes.get(node.getID()) == node); remove(node.getID()); } /** * Remove the SafraTreeNode <i>id</i> from the tree, * the node can have no children. */ public void remove(int id) { assert(id >= 0 && id < MAX_NODES); _nodes.get(id).removeFromTree(); _nodes.set(id, null); } /** * Remove all children of the SafraTreeNode <i>id</i>. */ public void removeAllChildren(int id) { assert(id < MAX_NODES); SafraTreeNode n = _nodes.get(id); SafraTreeNode child; while ((child = n.getOldestChild()) != null) { removeAllChildren(child.getID()); remove(child.getID()); } } /** * Walk the tree post-order, calling the function * void visit(SafraTree& tree, SafraTreeNode *node) * in the SafraTreeVisitor on each node. */ public <V extends SafrasAlgorithm.SafraTreeVisitor> void walkTreePostOrder(V visitor) { SafraTreeWalker<V> stw = new SafraTreeWalker<V>(visitor); stw.walkTreePostOrder(this); } /** * Walk the subtree rooted under node *top post-order, * calling the function void visit(SafraTree& tree, SafraTreeNode *node) * in the SafraTreeVisitor on each node. */ public <V extends SafrasAlgorithm.SafraTreeVisitor> void walkSubTreePostOrder(V visitor, SafraTreeNode top) { SafraTreeWalker<V> stw = new SafraTreeWalker<V>(visitor); stw.walkSubTreePostOrder(this, top, true); } /** * Walk the subtree rooted under node *top (only the children, not *top itself) * post-order, calling the function void visit(SafraTree& tree, SafraTreeNode *node) * in the SafraTreeVisitor on each node. */ public <V extends SafrasAlgorithm.SafraTreeVisitor> void walkChildrenPostOrder(V visitor, SafraTreeNode top) { SafraTreeWalker<V> stw = new SafraTreeWalker<V>(visitor); stw.walkSubTreePostOrder(this, top, false); // = don't visit top } /** * Calculate the height of the tree. */ public int treeHeight() { if (getRootNode() != null) { return getRootNode().treeHeight(); } return 0; } /** * Calculate the width of the tree. */ public int treeWidth() { if (getRootNode() != null) { return getRootNode().treeWidth(); } return 0; } /** * Equality operator. */ public boolean equals(SafraTree other) { if (other.MAX_NODES != MAX_NODES) {return false;} return _nodes.equals(other._nodes); } public boolean equals(Object other) { if (other instanceof SafraTree) return this.equals((SafraTree) other); else return false; } /** * Checks equality when ignoring the node names. */ public boolean structural_equal_to(SafraTree other) { if (other.MAX_NODES!=MAX_NODES) {return false;} SafraTreeNode this_root = this.getRootNode(); SafraTreeNode other_root = other.getRootNode(); if (this_root == null || other_root == null) { // return true if both are 0 return (this_root==other_root); } return this_root.structuralEquals(other_root); } /** * Less-than operator when ignoring the node names. */ public boolean structural_less_than(SafraTree other) { if (other.MAX_NODES<MAX_NODES) {return true;} SafraTreeNode this_root = this.getRootNode(); SafraTreeNode other_root = other.getRootNode(); if (this_root == null) { if (other_root!= null) { return true; } else { return false; } } else { // this_root !=0 if (other_root == null) {return false;} return this_root.structuralLessThan(other_root); } } /** * Less-than operator */ public boolean lessThan(SafraTree other) { if (MAX_NODES < other.MAX_NODES) {return true;} for (int i = 0; i < MAX_NODES; i++) { if (_nodes.get(i) == null && other._nodes.get(i) == null) { ; } else if (_nodes.get(i) == null) { return true; } else if (other._nodes.get(i) == null) { return false; } else { if (_nodes.get(i).lessThan(other._nodes.get(i))) { return true; } else if (_nodes.get(i).equals(other._nodes.get(i))) { ; } else { return false; } } } return false; } /** Get the maximum number of nodes. */ public int getNodeMax() {return MAX_NODES;} /** Get SafraTreeNode with index <i>i</i>*/ public SafraTreeNode get(int i) { return _nodes.get(i); } public void set(int i, SafraTreeNode node) { _nodes.set(i, node); } /** Print the SafraTree on an output stream. */ public void print(PrintStream out) { if (getRootNode() == null) { out.println("<empty>"); } else { printSubTree(out, 0, getRootNode()); } } /** Returns a string representation of the SafraTree */ // public String toString() { // std::ostringstream buf; // buf << *this; // return buf.str(); // } /** Returns a string representation in HTML of the SafraTree */ public String toHTML() { if (getRootNode() == null) { return "<TABLE><TR><TD>[empty]</TD></TR></TABLE>"; } else { return getRootNode().toHTMLString(); } } /** * Calculate a hash value using HashFunction * @param hashfunction the HashFunction * @param only_structure ignore the nameing of the nodes */ // template <class HashFunction> // public void hashCode(HashFunction& hashfunction, // bool only_structure=false) { // SafraTreeNode* root=getRootNode(); // // if (root!=0) { // root->hashCode(hashfunction, only_structure); // } // } public int hashCode() { if (getRootNode() != null) return getRootNode().hashCode(); else return 0; } /** * Generate the appropriate acceptance signature for Rabin Acceptance for this tree */ public void generateAcceptance(AcceptanceForState acceptance) { for (int i = 0; i < getNodeMax(); i++) { SafraTreeNode stn = this.get(i); if (stn == null) { acceptance.addTo_U(i); } else { if (stn.hasFinalFlag()) { acceptance.addTo_L(i); } } } } public void generateAcceptance(RabinSignature acceptance) { acceptance.setSize(getNodeMax()); for (int i = 0; i < getNodeMax(); i++) { SafraTreeNode stn = this.get(i); if (stn == null) { acceptance.setColor(i, RabinAcceptance.RabinColor.RABIN_RED); } else { if (stn.hasFinalFlag()) { acceptance.setColor(i, RabinAcceptance.RabinColor.RABIN_GREEN); } else { acceptance.setColor(i, RabinAcceptance.RabinColor.RABIN_WHITE); } } } } public RabinSignature generateAcceptance() { RabinSignature s = new RabinSignature(getNodeMax()); generateAcceptance(s); return s; } /** * Copy the subtree (the children) of *other * to *top, becoming the children of *top */ private void copySubTree(SafraTreeNode top, SafraTreeNode other) { if (other == null) {return;} for (SafraTreeNode child : other) { SafraTreeNode n = _nodes.get(child.getID()); top.addAsYoungestChild(n); copySubTree(n, child); } } /** * Print the subtree rooted at node *top to the output stream * @param out the output stream * @param prefix the number of spaces ' ' in front of each node * @param top the current tree sub root */ private void printSubTree(PrintStream out, int prefix, SafraTreeNode top) { for (int i = 0; i < prefix; i++) { out.print(" "); } top.print(out); out.println(); for (SafraTreeNode child : top) { printSubTree(out, prefix+1, child); } } }
nicodelpiano/prism
src/jltl2dstar/SafraTree.java
Java
gpl-2.0
9,871
/* eslint-disable import/no-extraneous-dependencies */ import { isWidget } from 'ckeditor5/src/widget'; /** * Checks if the provided model element is `drupalMedia`. * * @param {module:engine/model/element~Element} modelElement * The model element to be checked. * @return {boolean} * A boolean indicating whether element is drupalMedia element. * * @internal */ export function isDrupalMedia(modelElement) { return !!modelElement && modelElement.is('element', 'drupalMedia'); } /** * Checks if view element is <drupal-media> element. * * @param {module:engine/view/element~Element} viewElement * The view element. * @return {boolean} * A boolean indicating whether element is <drupal-media> element. * * @internal */ export function isDrupalMediaWidget(viewElement) { return ( isWidget(viewElement) && !!viewElement.getCustomProperty('drupalMedia') ); } /** * Gets selected Drupal Media widget if only Drupal Media is currently selected. * * @param {module:engine/model/selection~Selection} selection * The current selection. * @return {module:engine/view/element~Element|null} * The currently selected Drupal Media widget or null. * * @internal */ export function getSelectedDrupalMediaWidget(selection) { const viewElement = selection.getSelectedElement(); if (viewElement && isDrupalMediaWidget(viewElement)) { return viewElement; } return null; }
mamont77/easydrupal
core/modules/ckeditor5/js/ckeditor5_plugins/drupalMedia/src/utils.js
JavaScript
gpl-2.0
1,416
<html lang="en"> <head> <title>What FFTW Really Computes - FFTW 3.3.2</title> <meta http-equiv="Content-Type" content="text/html"> <meta name="description" content="FFTW 3.3.2"> <meta name="generator" content="makeinfo 4.13"> <link title="Top" rel="start" href="index.html#Top"> <link rel="up" href="FFTW-Reference.html#FFTW-Reference" title="FFTW Reference"> <link rel="prev" href="Wisdom.html#Wisdom" title="Wisdom"> <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage"> <!-- This manual is for FFTW (version 3.3.2, 28 April 2012). Copyright (C) 2003 Matteo Frigo. Copyright (C) 2003 Massachusetts Institute of Technology. Permission is granted to make and distribute verbatim copies of this manual provided the copyright notice and this permission notice are preserved on all copies. Permission is granted to copy and distribute modified versions of this manual under the conditions for verbatim copying, provided that the entire resulting derived work is distributed under the terms of a permission notice identical to this one. Permission is granted to copy and distribute translations of this manual into another language, under the above conditions for modified versions, except that this permission notice may be stated in a translation approved by the Free Software Foundation. --> <meta http-equiv="Content-Style-Type" content="text/css"> <style type="text/css"><!-- pre.display { font-family:inherit } pre.format { font-family:inherit } pre.smalldisplay { font-family:inherit; font-size:smaller } pre.smallformat { font-family:inherit; font-size:smaller } pre.smallexample { font-size:smaller } pre.smalllisp { font-size:smaller } span.sc { font-variant:small-caps } span.roman { font-family:serif; font-weight:normal; } span.sansserif { font-family:sans-serif; font-weight:normal; } --></style> </head> <body> <div class="node"> <a name="What-FFTW-Really-Computes"></a> <p> Previous:&nbsp;<a rel="previous" accesskey="p" href="Wisdom.html#Wisdom">Wisdom</a>, Up:&nbsp;<a rel="up" accesskey="u" href="FFTW-Reference.html#FFTW-Reference">FFTW Reference</a> <hr> </div> <h3 class="section">4.8 What FFTW Really Computes</h3> <p>In this section, we provide precise mathematical definitions for the transforms that FFTW computes. These transform definitions are fairly standard, but some authors follow slightly different conventions for the normalization of the transform (the constant factor in front) and the sign of the complex exponent. We begin by presenting the one-dimensional (1d) transform definitions, and then give the straightforward extension to multi-dimensional transforms. <ul class="menu"> <li><a accesskey="1" href="The-1d-Discrete-Fourier-Transform-_0028DFT_0029.html#The-1d-Discrete-Fourier-Transform-_0028DFT_0029">The 1d Discrete Fourier Transform (DFT)</a> <li><a accesskey="2" href="The-1d-Real_002ddata-DFT.html#The-1d-Real_002ddata-DFT">The 1d Real-data DFT</a> <li><a accesskey="3" href="1d-Real_002deven-DFTs-_0028DCTs_0029.html#g_t1d-Real_002deven-DFTs-_0028DCTs_0029">1d Real-even DFTs (DCTs)</a> <li><a accesskey="4" href="1d-Real_002dodd-DFTs-_0028DSTs_0029.html#g_t1d-Real_002dodd-DFTs-_0028DSTs_0029">1d Real-odd DFTs (DSTs)</a> <li><a accesskey="5" href="1d-Discrete-Hartley-Transforms-_0028DHTs_0029.html#g_t1d-Discrete-Hartley-Transforms-_0028DHTs_0029">1d Discrete Hartley Transforms (DHTs)</a> <li><a accesskey="6" href="Multi_002ddimensional-Transforms.html#Multi_002ddimensional-Transforms">Multi-dimensional Transforms</a> </ul> <!-- =========> --> </body></html>
mesjetiu/grandorgue-es
src/fftw/src/doc/html/What-FFTW-Really-Computes.html
HTML
gpl-2.0
3,658
<?php namespace Drupal\migrate\Plugin; use Drupal\Component\Plugin\PluginInspectionInterface; use Drupal\migrate\Row; /** * Defines an interface for migrate sources. * * @see \Drupal\migrate\Plugin\MigratePluginManager * @see \Drupal\migrate\Annotation\MigrateSource * @see \Drupal\migrate\Plugin\migrate\source\SourcePluginBase * @see plugin_api * * @ingroup migration */ interface MigrateSourceInterface extends \Countable, \Iterator, PluginInspectionInterface { /** * Indicates that the source is not countable. */ const NOT_COUNTABLE = -1; /** * Returns available fields on the source. * * @return array * Available fields in the source, keys are the field machine names as used * in field mappings, values are descriptions. */ public function fields(); /** * Adds additional data to the row. * * @param \Drupal\migrate\Row $row * The row object. * * @return bool * FALSE if this row needs to be skipped. */ public function prepareRow(Row $row); /** * Allows class to decide how it will react when it is treated like a string. */ public function __toString(); /** * Defines the source fields uniquely identifying a source row. * * None of these fields should contain a NULL value. If necessary, use * prepareRow() or hook_migrate_prepare_row() to rewrite NULL values to * appropriate empty values (such as '' or 0). * * @return array[] * An associative array of field definitions keyed by field ID. Values are * associative arrays with a structure that contains the field type ('type' * key). The other keys are the field storage settings as they are returned * by FieldStorageDefinitionInterface::getSettings(). * * Examples: * * A composite source primary key that is defined by an integer and a string * might look like this: * @code * return [ * 'id' => [ * 'type' => 'integer', * 'unsigned' => FALSE, * 'size' => 'big', * ], * 'version' => [ * 'type' => 'string', * 'max_length' => 64, * 'is_ascii' => TRUE, * ], * ]; * @endcode * * If 'type' points to a field plugin with multiple columns and needs to * refer to a column different than 'value', the key of that column will be * appended as a suffix to the plugin name, separated by dot ('.'). Example: * @code * return [ * 'format' => [ * 'type' => 'text.format', * ], * ]; * @endcode * * Additional custom keys/values that are not part of field storage * definition can be added as shown below. The most common setting * passed along to the ID definition is table 'alias', used by the SqlBase * source plugin in order to distinguish between ambiguous column names - * for example, when a SQL source query joins two tables with the same * column names. * @code * return [ * 'nid' => [ * 'type' => 'integer', * 'alias' => 'n', * ], * ]; * @endcode * * @see \Drupal\Core\Field\FieldStorageDefinitionInterface::getSettings() * @see \Drupal\Core\Field\Plugin\Field\FieldType\IntegerItem * @see \Drupal\Core\Field\Plugin\Field\FieldType\StringItem * @see \Drupal\text\Plugin\Field\FieldType\TextItem * @see \Drupal\migrate\Plugin\migrate\source\SqlBase */ public function getIds(); /** * Gets the source module providing the source data. * * @return string|null * The source module or NULL if not found. */ public function getSourceModule(); }
pulibrary/recap
core/modules/migrate/src/Plugin/MigrateSourceInterface.php
PHP
gpl-2.0
3,699
#define DEBUG // yjcho #include <linux/kernel.h> #include <linux/string.h> #include <soc/qcom/lge/board_lge.h> #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/platform_device.h> #ifdef CONFIG_LGE_PM_USB_ID #include <linux/err.h> #include <linux/qpnp/qpnp-adc.h> #include <linux/power_supply.h> #endif #ifdef CONFIG_LGE_USB_G_ANDROID #include <linux/platform_data/lge_android_usb.h> #endif #ifdef CONFIG_LGE_EARJACK_DEBUGGER #include <soc/qcom/lge/board_lge.h> #endif #ifdef CONFIG_LGE_PM_USB_ID struct chg_cable_info_table { int threshhold; enum acc_cable_type type; unsigned ta_ma; unsigned usb_ma; }; #define ADC_NO_INIT_CABLE 0 #define C_NO_INIT_TA_MA 0 #define C_NO_INIT_USB_MA 0 #define ADC_CABLE_NONE 1900000 #define C_NONE_TA_MA 700 #define C_NONE_USB_MA 500 #define MAX_CABLE_NUM 15 static bool cable_type_defined; static struct chg_cable_info_table lge_acc_cable_type_data[MAX_CABLE_NUM]; #endif #if defined(CONFIG_LGE_MIPI_P1_INCELL_QHD_CMD_PANEL) static char dsv_vendor[3]; int display_panel_type; int lk_panel_init_fail = 0; int rsp_nvm_rw; #if defined(CONFIG_MACH_MSM8992_P1_CN) || defined(CONFIG_MACH_MSM8992_P1_GLOBAL_COM) int lge_sim_type; #endif #endif static enum hw_rev_type lge_bd_rev = HW_REV_MAX; /* CAUTION: These strings are come from LK. */ char *rev_str[] = {"evb1", "evb2", "evb3", "rev_0", "rev_a", "rev_b", "rev_c", "rev_d", "rev_e", "rev_f", "rev_g", "rev_10", "rev_11", "rev_12", "reserved"}; extern unsigned int system_rev; static int __init board_revno_setup(char *rev_info) { int i; for (i = 0; i < HW_REV_MAX; i++) { if (!strncmp(rev_info, rev_str[i], 6)) { lge_bd_rev = i; system_rev = lge_bd_rev; break; } } pr_info("BOARD : LGE %s\n", rev_str[lge_bd_rev]); return 1; } __setup("lge.rev=", board_revno_setup); enum hw_rev_type lge_get_board_revno(void) { return lge_bd_rev; } #ifdef CONFIG_LGE_PM_USB_ID void get_cable_data_from_dt(void *of_node) { int i; u32 cable_value[3]; struct device_node *node_temp = (struct device_node *)of_node; const char *propname[MAX_CABLE_NUM] = { "lge,no-init-cable", "lge,cable-mhl-1k", "lge,cable-u-28p7k", "lge,cable-28p7k", "lge,cable-56k", "lge,cable-100k", "lge,cable-130k", "lge,cable-180k", "lge,cable-200k", "lge,cable-220k", "lge,cable-270k", "lge,cable-330k", "lge,cable-620k", "lge,cable-910k", "lge,cable-none" }; if (cable_type_defined) { pr_info("Cable type is already defined\n"); return; } for (i = 0; i < MAX_CABLE_NUM; i++) { of_property_read_u32_array(node_temp, propname[i], cable_value, 3); lge_acc_cable_type_data[i].threshhold = cable_value[0]; lge_acc_cable_type_data[i].type = i; lge_acc_cable_type_data[i].ta_ma = cable_value[1]; lge_acc_cable_type_data[i].usb_ma = cable_value[2]; } cable_type_defined = 1; } int lge_pm_get_cable_info(struct qpnp_vadc_chip *vadc, struct chg_cable_info *cable_info) { char *type_str[] = { "NOT INIT", "MHL 1K", "U_28P7K", "28P7K", "56K", "100K", "130K", "180K", "200K", "220K", "270K", "330K", "620K", "910K", "OPEN" }; struct qpnp_vadc_result result; struct chg_cable_info *info = cable_info; struct chg_cable_info_table *table; int table_size = ARRAY_SIZE(lge_acc_cable_type_data); int acc_read_value = 0; int i, rc; int count = 1; if (!info) { pr_err("%s : invalid info parameters\n", __func__); return -EINVAL; } if (!vadc) { pr_err("%s : invalid vadc parameters\n", __func__); return -EINVAL; } if (!cable_type_defined) { pr_err("%s : cable type is not defined yet.\n", __func__); return -EINVAL; } for (i = 0; i < count; i++) { rc = qpnp_vadc_read(vadc, LR_MUX10_USB_ID_LV, &result); if (rc < 0) { if (rc == -ETIMEDOUT) { /* reason: adc read timeout, * assume it is open cable */ info->cable_type = CABLE_NONE; info->ta_ma = C_NONE_TA_MA; info->usb_ma = C_NONE_USB_MA; } pr_err("%s : adc read error - %d\n", __func__, rc); return rc; } acc_read_value = (int)result.physical; pr_info("%s : adc_read-%d\n", __func__, (int)result.physical); /* mdelay(10); */ } info->cable_type = NO_INIT_CABLE; info->ta_ma = C_NO_INIT_TA_MA; info->usb_ma = C_NO_INIT_USB_MA; /* assume: adc value must be existed in ascending order */ for (i = 0; i < table_size; i++) { table = &lge_acc_cable_type_data[i]; if (acc_read_value <= table->threshhold) { info->cable_type = table->type; info->ta_ma = table->ta_ma; info->usb_ma = table->usb_ma; break; } } pr_err("\n\n[PM]Cable detected: %d(%s)(%d, %d)\n\n", acc_read_value, type_str[info->cable_type], info->ta_ma, info->usb_ma); return 0; } /* Belows are for using in interrupt context */ static struct chg_cable_info lge_cable_info; enum acc_cable_type lge_pm_get_cable_type(void) { return lge_cable_info.cable_type; } unsigned lge_pm_get_ta_current(void) { return lge_cable_info.ta_ma; } unsigned lge_pm_get_usb_current(void) { return lge_cable_info.usb_ma; } /* This must be invoked in process context */ void lge_pm_read_cable_info(struct qpnp_vadc_chip *vadc) { lge_cable_info.cable_type = NO_INIT_CABLE; lge_cable_info.ta_ma = C_NO_INIT_TA_MA; lge_cable_info.usb_ma = C_NO_INIT_USB_MA; lge_pm_get_cable_info(vadc, &lge_cable_info); } void lge_pm_read_cable_info_and_type(struct device *dev, struct qpnp_vadc_chip *vadc) { struct device_node *node = NULL; const char *cable_type = "lge,cable-type"; if (!cable_type_defined) { if (dev && dev->of_node) { node = of_parse_phandle(dev->of_node, cable_type, 0); if (node) get_cable_data_from_dt(node); } } lge_pm_read_cable_info(vadc); } #endif #ifdef CONFIG_LGE_EARJACK_DEBUGGER /* s_uart_console_status bits format * ------higher than bit4 are not used * bit5...: not used * ------bit4 indicates whenter uart console was ready(probed) * bit4: [UART_CONSOLE_READY] * ------current uart console status ----------------- * bit3: [UART_CONSOLE_ENABLED] * ------configuration bit field ----------------- * bit2: [UART_CONSOLE_ENABLE_ON_DEFAULT] * bit1; [UART_CONSOLE_ENABLE_ON_EARJACK_DEBUGGER] * bit0: [UART_CONSOLE_ENABLE_ON_EARJACK] */ static unsigned int s_uart_console_status = 0; /* disabling uart console */ unsigned int lge_uart_console_get_config(void) { return (s_uart_console_status & UART_CONSOLE_MASK_CONFIG); } void lge_uart_console_set_config(unsigned int config) { config &= UART_CONSOLE_MASK_CONFIG; s_uart_console_status |= config; } unsigned int lge_uart_console_get_enabled(void) { return s_uart_console_status & UART_CONSOLE_MASK_ENABLED; } void lge_uart_console_set_enabled(int enabled) { s_uart_console_status &= ~UART_CONSOLE_MASK_ENABLED; /* for caller conding convenience, regard no-zero as enabled also */ s_uart_console_status |= (enabled ? UART_CONSOLE_ENABLED : 0); } unsigned int lge_uart_console_get_ready(void) { return s_uart_console_status & UART_CONSOLE_MASK_READY; } void lge_uart_console_set_ready(unsigned int ready) { s_uart_console_status &= ~UART_CONSOLE_MASK_READY; /* for caller side coding convenience, regard no-zero as ready also */ s_uart_console_status |= (ready ? UART_CONSOLE_READY : 0); } #endif /* CONFIG_LGE_EARJACK_DEBUGGER */ #if defined(CONFIG_LGE_MIPI_P1_INCELL_QHD_CMD_PANEL) static int __init display_dsv_setup(char *dsv_cmd) { sscanf(dsv_cmd, "%s", dsv_vendor); pr_info("dsv vendor id is %s\n", dsv_vendor); return 1; } __setup("lge.dsv_id=", display_dsv_setup); char* lge_get_dsv_vendor(void) { return dsv_vendor; } void lge_set_panel(int panel_type) { pr_info("panel_type is %d\n",panel_type); display_panel_type = panel_type; } int lge_get_panel(void) { return display_panel_type; } static int __init lge_rsp_nvm_setup(char *rsp_nvm) { if (strncmp(rsp_nvm, "0", 1) == 0) { rsp_nvm_rw = 0; } else if (strncmp(rsp_nvm, "1", 1) == 0) { rsp_nvm_rw = 1; } else { pr_err("%s : fail to read rsp_nvm \n", __func__); } pr_debug("rsp_nvm %d,\n", rsp_nvm_rw); return 1; } __setup("lge.rsp_nvm=", lge_rsp_nvm_setup); int lge_get_rsp_nvm(void) { return rsp_nvm_rw; } #if defined(CONFIG_MACH_MSM8992_P1_CN) || defined(CONFIG_MACH_MSM8992_P1_GLOBAL_COM) static int __init lge_sim_setup(char *sim_num) { if (strncmp(sim_num, "1", 1) == 0) { lge_sim_type = 1; } else if (strncmp(sim_num, "2", 1) == 0) { lge_sim_type = 2; } else { lge_sim_type = 0; pr_err("%s : fail to read sim type\n", __func__); } pr_debug("lge_sim_type is %d, sim_num set %s= \n", lge_sim_type,sim_num); return 1; } __setup("lge.sim_num=", lge_sim_setup); int lge_get_sim_type(void) { return lge_sim_type; } #endif static int __init lk_panel_init_status(char *panel_init_cmd) { if (strncmp(panel_init_cmd, "1", 1) == 0) { lk_panel_init_fail = 1; pr_info("lk panel init fail[%d]\n", lk_panel_init_fail); } else { lk_panel_init_fail = 0; } return 1; } __setup("lge.pinit_fail=", lk_panel_init_status); int lge_get_lk_panel_status(void) { return lk_panel_init_fail; } #endif /* for download complete using LAF image return value : 1 --> right after laf complete & reset */ int android_dlcomplete = 0; int __init lge_android_dlcomplete(char *s) { if (strncmp(s, "1", 1) == 0) android_dlcomplete = 1; else android_dlcomplete = 0; pr_info("androidboot.dlcomplete = %d\n", android_dlcomplete); return 1; } __setup("androidboot.dlcomplete=", lge_android_dlcomplete); int lge_get_android_dlcomplete(void) { return android_dlcomplete; } #ifdef CONFIG_LGE_PM_FACTORY_PSEUDO_BATTERY struct pseudo_batt_info_type pseudo_batt_info = { .mode = 0, .id = 1, .therm = 100, .temp = 400, .volt = 4100, .capacity = 80, .charging = 1, }; int safety_timer = 1; void pseudo_batt_set(struct pseudo_batt_info_type *info) { struct power_supply *batt_psy, *usb_psy; union power_supply_propval ret = {0,}; batt_psy = power_supply_get_by_name("battery"); if (!batt_psy) { pr_err("called before init\n"); return; } usb_psy = power_supply_get_by_name("usb"); if (!usb_psy) { pr_err("called before init\n"); return; } pseudo_batt_info.mode = info->mode; pseudo_batt_info.id = info->id; pseudo_batt_info.therm = info->therm; pseudo_batt_info.temp = info->temp; pseudo_batt_info.volt = info->volt; pseudo_batt_info.capacity = info->capacity; pseudo_batt_info.charging = info->charging; pr_err("pseudo batt set success\n"); ret.intval = !pseudo_batt_info.mode; batt_psy->set_property(batt_psy, POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE, &ret); power_supply_changed(batt_psy); power_supply_changed(usb_psy); } #endif /* get boot mode information from cmdline. * If any boot mode is not specified, * boot mode is normal type. */ static enum lge_boot_mode_type lge_boot_mode = LGE_BOOT_MODE_NORMAL; int __init lge_boot_mode_init(char *s) { if (!strcmp(s, "charger")) lge_boot_mode = LGE_BOOT_MODE_CHARGER; else if (!strcmp(s, "chargerlogo")) lge_boot_mode = LGE_BOOT_MODE_CHARGERLOGO; else if (!strcmp(s, "qem_56k")) lge_boot_mode = LGE_BOOT_MODE_QEM_56K; else if (!strcmp(s, "qem_130k")) lge_boot_mode = LGE_BOOT_MODE_QEM_130K; else if (!strcmp(s, "qem_910k")) lge_boot_mode = LGE_BOOT_MODE_QEM_910K; else if (!strcmp(s, "pif_56k")) lge_boot_mode = LGE_BOOT_MODE_PIF_56K; else if (!strcmp(s, "pif_130k")) lge_boot_mode = LGE_BOOT_MODE_PIF_130K; else if (!strcmp(s, "pif_910k")) lge_boot_mode = LGE_BOOT_MODE_PIF_910K; /* LGE_UPDATE_S for MINIOS2.0 */ else if (!strcmp(s, "miniOS")) lge_boot_mode = LGE_BOOT_MODE_MINIOS; pr_info("ANDROID BOOT MODE : %d %s\n", lge_boot_mode, s); /* LGE_UPDATE_E for MINIOS2.0 */ return 1; } __setup("androidboot.mode=", lge_boot_mode_init); enum lge_boot_mode_type lge_get_boot_mode(void) { return lge_boot_mode; } int lge_get_factory_boot(void) { int res; /* if boot mode is factory, * cable must be factory cable. */ switch (lge_boot_mode) { case LGE_BOOT_MODE_QEM_56K: case LGE_BOOT_MODE_QEM_130K: case LGE_BOOT_MODE_QEM_910K: case LGE_BOOT_MODE_PIF_56K: case LGE_BOOT_MODE_PIF_130K: case LGE_BOOT_MODE_PIF_910K: case LGE_BOOT_MODE_MINIOS: res = 1; break; default: res = 0; break; } return res; } static enum lge_laf_mode_type lge_laf_mode = LGE_LAF_MODE_NORMAL; int __init lge_laf_mode_init(char *s) { if (strcmp(s, "") && strcmp(s, "MID")) lge_laf_mode = LGE_LAF_MODE_LAF; return 1; } __setup("androidboot.laf=", lge_laf_mode_init); enum lge_laf_mode_type lge_get_laf_mode(void) { return lge_laf_mode; } #ifdef CONFIG_LGE_USB_G_ANDROID int get_factory_cable(void) { int res = 0; /* if boot mode is factory, cable must be factory cable. */ switch (lge_boot_mode) { case LGE_BOOT_MODE_QEM_56K: case LGE_BOOT_MODE_PIF_56K: res = LGEUSB_FACTORY_56K; break; case LGE_BOOT_MODE_QEM_130K: case LGE_BOOT_MODE_PIF_130K: res = LGEUSB_FACTORY_130K; break; case LGE_BOOT_MODE_QEM_910K: case LGE_BOOT_MODE_PIF_910K: res = LGEUSB_FACTORY_910K; break; default: res = 0; break; } return res; } struct lge_android_usb_platform_data lge_android_usb_pdata = { .vendor_id = 0x1004, .factory_pid = 0x6000, .iSerialNumber = 0, .product_name = "LGE Android Phone", .manufacturer_name = "LG Electronics Inc.", .factory_composition = "acm,diag", .get_factory_cable = get_factory_cable, }; static struct platform_device lge_android_usb_device = { .name = "lge_android_usb", .id = -1, .dev = { .platform_data = &lge_android_usb_pdata, }, }; static int __init lge_android_usb_devices_init(void) { return platform_device_register(&lge_android_usb_device); } arch_initcall(lge_android_usb_devices_init); #endif #ifdef CONFIG_LGE_USB_DIAG_LOCK static struct platform_device lg_diag_cmd_device = { .name = "lg_diag_cmd", .id = -1, .dev = { .platform_data = 0, /* &lg_diag_cmd_pdata */ }, }; static int __init lge_diag_devices_init(void) { return platform_device_register(&lg_diag_cmd_device); } arch_initcall(lge_diag_devices_init); #endif #ifdef CONFIG_LGE_QFPROM_INTERFACE static struct platform_device qfprom_device = { .name = "lge-qfprom", .id = -1, }; static int __init lge_add_qfprom_devices(void) { return platform_device_register(&qfprom_device); } arch_initcall(lge_add_qfprom_devices); #endif static int lge_boot_reason = -1; /* undefined for error checking */ static int __init lge_check_bootreason(char *reason) { int ret = 0; /* handle corner case of kstrtoint */ if (!strcmp(reason, "0xffffffff")) { lge_boot_reason = 0xffffffff; return 1; } ret = kstrtoint(reason, 16, &lge_boot_reason); if (!ret) printk(KERN_INFO "LGE REBOOT REASON: %x\n", lge_boot_reason); else printk(KERN_INFO "LGE REBOOT REASON: Couldn't get bootreason - %d\n", ret); return 1; } __setup("lge.bootreasoncode=", lge_check_bootreason); int lge_get_bootreason(void) { return lge_boot_reason; } int on_hidden_reset; static int __init lge_check_hidden_reset(char *reset_mode) { if (!strncmp(reset_mode, "on", 2)) on_hidden_reset = 1; return 1; } __setup("lge.hreset=", lge_check_hidden_reset); #ifdef CONFIG_LGE_LCD_OFF_DIMMING int lge_get_bootreason_with_lcd_dimming(void) { int ret = 0; if (lge_get_bootreason() == 0x77665560) ret = 1; else if (lge_get_bootreason() == 0x77665561) ret = 2; else if (lge_get_bootreason() == 0x77665562) ret = 3; return ret; } #endif
AndroidGX/SimpleGX-MM-6.0_H815_20i
drivers/soc/qcom/lge/devices_lge.c
C
gpl-2.0
15,320
<?php /** * Handles all manipulation of the session. * * The static methods are used to manipulate the currently active controller's session. * The instance methods are used to manipulate a particular session. There can be more than one of these created. * * In order to support things like testing, the session is associated with a particular Controller. In normal usage, * this is loaded from and saved to the regular PHP session, but for things like static-page-generation and * unit-testing, you can create multiple Controllers, each with their own session. * * The instance object is basically just a way of manipulating a set of nested maps, and isn't specific to session * data. * * <b>Saving Data</b> * * You can write a value to a users session from your PHP code using the static function {@link Session::set()}. You * can add this line in any function or file you wish to save the value. * * <code> * Session::set('MyValue', 6); * </code> * * Saves the value of "6" to the MyValue session data. You can also save arrays or serialized objects in session (but * note there may be size restrictions as to how much you can save) * * <code> * // save a variable * $var = 1; * Session::set('MyVar', $var); * * // saves an array * Session::set('MyArrayOfValues', array('1','2','3')); * * // saves an object (you'll have to unserialize it back) * $object = new Object(); * * Session::set('MyObject', serialize($object)); * </code> * * <b>Accessing Data</b> * * Once you have saved a value to the Session you can access it by using the {@link Session::get()} function. * Like the {@link Session::set()} function you can use this anywhere in your PHP files. * * The values in the comments are the values stored from the previous example. * * <code> * public function bar() { * $value = Session::get('MyValue'); // $value = 6 * $var = Session::get('MyVar'); // $var = 1 * $array = Session::get('MyArrayOfValues'); // $array = array(1,2,3) * $object = Session::get('MyObject', unserialize($object)); // $object = Object() * } * </code> * * You can also get all the values in the session at once. This is useful for debugging. * * <code> * Session::get_all(); // returns an array of all the session values. * </code> * * <b>Clearing Data</b> * * Once you have accessed a value from the Session it doesn't automatically wipe the value from the Session, you have * to specifically remove it. To clear a value you can either delete 1 session value by the name that you saved it * * <code> * Session::clear('MyValue'); // MyValue is no longer 6. * </code> * * Or you can clear every single value in the session at once. Note SilverStripe stores some of its own session data * including form and page comment information. None of this is vital but clear_all will clear everything. * * <code> * Session::clear_all(); * </code> * * @see Cookie * @todo This class is currently really basic and could do with a more well-thought-out implementation. * * @package framework * @subpackage control */ class Session { /** * @var $timeout Set session timeout in seconds. * @config */ private static $timeout = 0; /** * @config * @var array */ private static $session_ips = array(); /** * @config * @var string */ private static $cookie_domain; /** * @config * @var string */ private static $cookie_path; /** * @config * @var string */ private static $session_store_path; /** * @config * @var boolean */ private static $cookie_secure = false; /** * Session data */ protected $data = array(); protected $changedData = array(); protected function userAgent() { if (isset($_SERVER['HTTP_USER_AGENT'])) { return $_SERVER['HTTP_USER_AGENT']; } else { return ''; } } /** * Start PHP session, then create a new Session object with the given start data. * * @param $data array|Session Can be an array of data (such as $_SESSION) or another Session object to clone. */ public function __construct($data) { if($data instanceof Session) $data = $data->inst_getAll(); $this->data = $data; if (isset($this->data['HTTP_USER_AGENT'])) { if ($this->data['HTTP_USER_AGENT'] != $this->userAgent()) { // Funny business detected! $this->inst_clearAll(); $this->inst_destroy(); $this->inst_start(); } } } /** * Cookie domain, for example 'www.php.net'. * * To make cookies visible on all subdomains then the domain * must be prefixed with a dot like '.php.net'. * * @deprecated 3.2 Use the "Session.cookie_domain" config setting instead * * @param string $domain The domain to set */ public static function set_cookie_domain($domain) { Deprecation::notice('3.2', 'Use the "Session.cookie_domain" config setting instead'); Config::inst()->update('Session', 'cookie_domain', $domain); } /** * Get the cookie domain. * * @deprecated 3.2 Use the "Session.cookie_domain" config setting instead * * @return string */ public static function get_cookie_domain() { Deprecation::notice('3.2', 'Use the "Session.cookie_domain" config setting instead'); return Config::inst()->get('Session', 'cookie_domain'); } /** * Path to set on the domain where the session cookie will work. * Use a single slash ('/') for all paths on the domain. * * @deprecated 3.2 Use the "Session.cookie_path" config setting instead * * @param string $path The path to set */ public static function set_cookie_path($path) { Deprecation::notice('3.2', 'Use the "Session.cookie_path" config setting instead'); Config::inst()->update('Session', 'cookie_path', $path); } /** * Get the path on the domain where the session cookie will work. * * @deprecated 3.2 Use the "Session.cookie_path" config setting instead * * @return string */ public static function get_cookie_path() { Deprecation::notice('3.2', 'Use the "Session.cookie_path" config setting instead'); if(Config::inst()->get('Session', 'cookie_path')) { return Config::inst()->get('Session', 'cookie_path'); } else { return Director::baseURL(); } } /** * Secure cookie, tells the browser to only send it over SSL. * * @deprecated 3.2 Use the "Session.cookie_secure" config setting instead * * @param boolean $secure */ public static function set_cookie_secure($secure) { Deprecation::notice('3.2', 'Use the "Session.cookie_secure" config setting instead'); Config::inst()->update('Session', 'cookie_secure', (bool)$secure); } /** * Get if the cookie is secure * * @deprecated 3.2 Use the "Session.cookie_secure" config setting instead * * @return boolean */ public static function get_cookie_secure() { Deprecation::notice('3.2', 'Use the "Session.cookie_secure" config setting instead'); return Config::inst()->get('Session', 'cookie_secure'); } /** * Set the session store path * * @deprecated 3.2 Use the "Session.session_store_path" config setting instead * * @param string $path Filesystem path to the session store */ public static function set_session_store_path($path) { Deprecation::notice('3.2', 'Use the "Session.session_store_path" config setting instead'); Config::inst()->update('Session', 'session_store_path', $path); } /** * Get the session store path * @return string */ public static function get_session_store_path() { Deprecation::notice('3.2', 'Use the "Session.session_store_path" config setting instead'); return Config::inst()->get('Session', 'session_store_path'); } /** * Provide an <code>array</code> of rules specifing timeouts for IPv4 address ranges or * individual IPv4 addresses. The key is an IP address or range and the value is the time * until the session expires in seconds. For example: * * Session::set_timeout_ips(array( * '127.0.0.1' => 36000 * )); * * Any user connecting from 127.0.0.1 (localhost) will have their session expired after 10 hours. * * Session::set_timeout is used to set the timeout value for any users whose address is not in the given IP range. * * @deprecated 3.2 Use the "Session.timeout_ips" config setting instead * * @param array $session_ips Array of IPv4 rules. */ public static function set_timeout_ips($ips) { Deprecation::notice('3.2', 'Use the "Session.timeout_ips" config setting instead'); Config::inst()->update('Session', 'timeout_ips', $ips); } /** * Add a value to a specific key in the session array */ public static function add_to_array($name, $val) { return self::current_session()->inst_addToArray($name, $val); } /** * Set a key/value pair in the session * * @param string $name Key * @param string $val Value */ public static function set($name, $val) { return self::current_session()->inst_set($name, $val); } /** * Return a specific value by session key * * @param string $name Key to lookup */ public static function get($name) { return self::current_session()->inst_get($name); } /** * Return all the values in session * * @return Array */ public static function get_all() { return self::current_session()->inst_getAll(); } /** * Clear a given session key, value pair. * * @param string $name Key to lookup */ public static function clear($name) { return self::current_session()->inst_clear($name); } /** * Clear all the values * * @return void */ public static function clear_all() { self::current_session()->inst_clearAll(); self::$default_session = null; } /** * Save all the values in our session to $_SESSION */ public static function save() { return self::current_session()->inst_save(); } protected static $default_session = null; protected static function current_session() { if(Controller::has_curr()) { return Controller::curr()->getSession(); } else { if(!self::$default_session) { self::$default_session = Injector::inst()->create('Session', isset($_SESSION) ? $_SESSION : array()); } return self::$default_session; } } public function inst_start($sid = null) { $path = Config::inst()->get('Session', 'cookie_path'); if(!$path) $path = Director::baseURL(); $domain = Config::inst()->get('Session', 'cookie_domain'); $secure = Director::is_https() && Config::inst()->get('Session', 'cookie_secure'); $session_path = Config::inst()->get('Session', 'session_store_path'); $timeout = Config::inst()->get('Session', 'timeout'); if(!session_id() && !headers_sent()) { if($domain) { session_set_cookie_params($timeout, $path, $domain, $secure, true); } else { session_set_cookie_params($timeout, $path, null, $secure, true); } // Allow storing the session in a non standard location if($session_path) session_save_path($session_path); // If we want a secure cookie for HTTPS, use a seperate session name. This lets us have a // seperate (less secure) session for non-HTTPS requests if($secure) session_name('SECSESSID'); if($sid) session_id($sid); session_start(); $this->data = isset($_SESSION) ? $_SESSION : array(); } // Modify the timeout behaviour so it's the *inactive* time before the session expires. // By default it's the total session lifetime if($timeout && !headers_sent()) { Cookie::set(session_name(), session_id(), $timeout/86400, $path, $domain ? $domain : null, $secure, true); } } public function inst_destroy($removeCookie = true) { if(session_id()) { if($removeCookie) { $path = Config::inst()->get('Session', 'cookie_path'); if(!$path) $path = Director::baseURL(); $domain = Config::inst()->get('Session', 'cookie_domain'); $secure = Config::inst()->get('Session', 'cookie_secure'); if($domain) { Cookie::set(session_name(), '', null, $path, $domain, $secure, true); } else { Cookie::set(session_name(), '', null, $path, null, $secure, true); } unset($_COOKIE[session_name()]); } session_destroy(); // Clean up the superglobal - session_destroy does not do it. // http://nz1.php.net/manual/en/function.session-destroy.php unset($_SESSION); $this->data = array(); } } public function inst_set($name, $val) { // Quicker execution path for "."-free names if(strpos($name,'.') === false) { $this->data[$name] = $val; $this->changedData[$name] = $val; } else { $names = explode('.', $name); // We still want to do this even if we have strict path checking for legacy code $var = &$this->data; $diffVar = &$this->changedData; // Iterate twice over the names - once to see if the value needs to be changed, // and secondly to get the changed data value. This is done to solve a problem // where iterating over the diff var would create empty arrays, and the value // would then not be set, inadvertently clearing session values. foreach($names as $n) { $var = &$var[$n]; } if($var !== $val) { foreach($names as $n) { $diffVar = &$diffVar[$n]; } $var = $val; $diffVar = $val; } } } public function inst_addToArray($name, $val) { $names = explode('.', $name); // We still want to do this even if we have strict path checking for legacy code $var = &$this->data; $diffVar = &$this->changedData; foreach($names as $n) { $var = &$var[$n]; $diffVar = &$diffVar[$n]; } $var[] = $val; $diffVar[sizeof($var)-1] = $val; } public function inst_get($name) { // Quicker execution path for "."-free names if(strpos($name,'.') === false) { if(isset($this->data[$name])) return $this->data[$name]; } else { $names = explode('.', $name); if(!isset($this->data)) { return null; } $var = $this->data; foreach($names as $n) { if(!isset($var[$n])) { return null; } $var = $var[$n]; } return $var; } } public function inst_clear($name) { $names = explode('.', $name); // We still want to do this even if we have strict path checking for legacy code $var = &$this->data; $diffVar = &$this->changedData; foreach($names as $n) { // don't clear a record that doesn't exist if(!isset($var[$n])) return; $var = &$var[$n]; } // only loop to find data within diffVar if var is proven to exist in the above loop foreach($names as $n) { $diffVar = &$diffVar[$n]; } if($var !== null) { $var = null; $diffVar = null; } } public function inst_clearAll() { if($this->data && is_array($this->data)) { foreach(array_keys($this->data) as $key) { $this->inst_clear($key); } } } public function inst_getAll() { return $this->data; } public function inst_finalize() { $this->inst_set('HTTP_USER_AGENT', $this->userAgent()); } /** * Save data to session * Only save the changes, so that anyone manipulating $_SESSION directly doesn't get burned. */ public function inst_save() { if($this->changedData) { $this->inst_finalize(); if(!isset($_SESSION)) { $this->inst_start(); } $this->recursivelyApply($this->changedData, $_SESSION); } } /** * Recursively apply the changes represented in $data to $dest. * Used to update $_SESSION */ protected function recursivelyApply($data, &$dest) { foreach($data as $k => $v) { if(is_array($v)) { if(!isset($dest[$k]) || !is_array($dest[$k])) $dest[$k] = array(); $this->recursivelyApply($v, $dest[$k]); } else { $dest[$k] = $v; } } } /** * Return the changed data, for debugging purposes. * @return array */ public function inst_changedData() { return $this->changedData; } /** * Sets the appropriate form message in session, with type. This will be shown once, * for the form specified. * * @param formname the form name you wish to use ( usually $form->FormName() ) * @param messsage the message you wish to add to it * @param type the type of message */ public static function setFormMessage($formname,$message,$type){ Session::set("FormInfo.$formname.formError.message", $message); Session::set("FormInfo.$formname.formError.type", $type); } /** * Is there a session ID in the request? * @return bool */ public static function request_contains_session_id() { $secure = Director::is_https() && Config::inst()->get('Session', 'cookie_secure'); $name = $secure ? 'SECSESSID' : session_name(); return isset($_COOKIE[$name]) || isset($_REQUEST[$name]); } /** * Initialize session. * * @param string $sid Start the session with a specific ID */ public static function start($sid = null) { self::current_session()->inst_start($sid); } /** * Destroy the active session. * * @param bool $removeCookie If set to TRUE, removes the user's cookie, FALSE does not remove */ public static function destroy($removeCookie = true) { self::current_session()->inst_destroy($removeCookie); } /** * Set the timeout of a Session value * * @deprecated 3.2 Use the "Session.timeout" config setting instead * * @param int $timeout Time until a session expires in seconds. Defaults to expire when browser is closed. */ public static function set_timeout($timeout) { Deprecation::notice('3.2', 'Use the "Session.timeout" config setting instead'); Config::inst()->update('Session', 'timeout', (int)$timeout); } /** * @deprecated 3.2 Use the "Session.timeout" config setting instead */ public static function get_timeout() { Deprecation::notice('3.2', 'Use the "Session.timeout" config setting instead'); return Config::inst()->get('Session', 'timeout'); } }
dangquochoi2007/SSiPhone
iShop/framework/control/Session.php
PHP
gpl-2.0
17,630
<?php /** * @package Redcore * @subpackage Upgrade * * @copyright Copyright (C) 2012 - 2016 redCOMPONENT.com. All rights reserved. * @license GNU General Public License version 2 or later, see LICENSE. */ defined('_JEXEC') or die; /** * Upgrade script for redCORE. * * @package Redcore * @subpackage Upgrade * @since 1.5 */ class Com_RedcoreUpdateScript_1_8_6 { /** * Performs the upgrade after initial Joomla update for this version * * @param JInstallerAdapter $parent Class calling this method * * @return bool */ public function executeAfterUpdate($parent) { $db = JFactory::getDbo(); $query = $db->getQuery(true) ->select($db->qn('params')) ->from('#__extensions') ->where($db->qn('type') . ' = ' . $db->q('plugin')) ->where($db->qn('element') . ' = ' . $db->q('redcore')) ->where($db->qn('folder') . ' = ' . $db->q('system')); $params = $db->setQuery($query)->loadResult(); if ($params) { // We will update com_redcore component parameters with the plugin parameters try { // We have changed default behavior of stateful webservices so we will change it together with the update if (is_string($params)) { $params = json_decode($params, true); } // We set all old installations to default ON condition if that parameter was not set $params['webservice_stateful'] = isset($params['webservice_stateful']) ? $params['webservice_stateful'] : 1; $params = json_encode($params); $query = $db->getQuery(true) ->update('#__extensions') ->set($db->qn('params') . ' = ' . $db->q($params)) ->where($db->qn('type') . ' = ' . $db->q('component')) ->where($db->qn('element') . ' = ' . $db->q('com_redcore')); $db->setQuery($query); $db->execute(); } catch (RuntimeException $e) { JLog::add($e->getMessage(), JLog::ERROR, 'jerror'); } } return true; } }
jatitoam/redCORE
extensions/components/com_redcore/admin/updates/1.8.6.php
PHP
gpl-2.0
1,926
/*************************************************************************** testqgsquickmapsettings.cpp -------------------------------------- Date : Nov 2017 Copyright : (C) 2017 by Peter Petrik Email : zilolv at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include <QObject> #include <QApplication> #include <QDesktopWidget> #include "qgsapplication.h" #include "qgstest.h" #include "qgis.h" #include "qgsunittypes.h" #include "qgsquickmapsettings.h" class TestQgsQuickMapSettings: public QObject { Q_OBJECT private slots: void init() {} // will be called before each testfunction is executed. void cleanup() {} // will be called after every testfunction. void test_project_existency(); }; void TestQgsQuickMapSettings::test_project_existency() { QgsQuickMapSettings *settings = new QgsQuickMapSettings(); QVERIFY( !settings->project() ); delete settings; } QGSTEST_MAIN( TestQgsQuickMapSettings ) #include "testqgsquickmapsettings.moc"
kalxas/QGIS
tests/src/quickgui/testqgsquickmapsettings.cpp
C++
gpl-2.0
1,615
/* * $Id: dns.c,v 1.91 2006/05/31 10:57:49 serassio Exp $ * * DEBUG: section 34 Dnsserver interface * AUTHOR: Harvest Derived * * SQUID Web Proxy Cache http://www.squid-cache.org/ * ---------------------------------------------------------- * * Squid is the result of efforts by numerous individuals from * the Internet community; see the CONTRIBUTORS file for full * details. Many organizations have provided support for Squid's * development; see the SPONSORS file for full details. Squid is * Copyrighted (C) 2001 by the Regents of the University of * California; see the COPYRIGHT file for full details. Squid * incorporates software developed and/or copyrighted by other * sources; see the CREDITS file for full details. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA. * */ #include "squid.h" /* MS VisualStudio Projects are monolithic, so we need the following * #if to exclude the external DNS code from compile process when * using Internal DNS. */ #if USE_DNSSERVERS static helper *dnsservers = NULL; static void dnsStats(StoreEntry * sentry) { storeAppendPrintf(sentry, "Dnsserver Statistics:\n"); helperStats(sentry, dnsservers); } void dnsInit(void) { static int init = 0; wordlist *w; if (!Config.Program.dnsserver) return; if (dnsservers == NULL) dnsservers = helperCreate("dnsserver"); dnsservers->n_to_start = Config.dnsChildren; dnsservers->ipc_type = IPC_STREAM; assert(dnsservers->cmdline == NULL); wordlistAdd(&dnsservers->cmdline, Config.Program.dnsserver); if (Config.onoff.res_defnames) wordlistAdd(&dnsservers->cmdline, "-D"); for (w = Config.dns_nameservers; w != NULL; w = w->next) { wordlistAdd(&dnsservers->cmdline, "-s"); wordlistAdd(&dnsservers->cmdline, w->key); } helperOpenServers(dnsservers); if (!init) { cachemgrRegister("dns", "Dnsserver Statistics", dnsStats, 0, 1); init = 1; } } void dnsShutdown(void) { if (!dnsservers) return; helperShutdown(dnsservers); wordlistDestroy(&dnsservers->cmdline); if (!shutting_down) return; helperFree(dnsservers); dnsservers = NULL; } void dnsSubmit(const char *lookup, HLPCB * callback, void *data) { char buf[256]; static time_t first_warn = 0; snprintf(buf, 256, "%s\n", lookup); if (dnsservers->stats.queue_size >= dnsservers->n_running * 2) { if (first_warn == 0) first_warn = squid_curtime; if (squid_curtime - first_warn > 3 * 60) fatal("DNS servers not responding for 3 minutes"); debug(34, 1) ("dnsSubmit: queue overload, rejecting %s\n", lookup); callback(data, (char *) "$fail Temporary network problem, please retry later"); return; } first_warn = 0; helperSubmit(dnsservers, buf, callback, data); } #ifdef SQUID_SNMP /* * The function to return the DNS via SNMP */ variable_list * snmp_netDnsFn(variable_list * Var, snint * ErrP) { variable_list *Answer = NULL; debug(49, 5) ("snmp_netDnsFn: Processing request: %d\n", Var->name[LEN_SQ_NET + 1]); snmpDebugOid(5, Var->name, Var->name_length); *ErrP = SNMP_ERR_NOERROR; switch (Var->name[LEN_SQ_NET + 1]) { case DNS_REQ: Answer = snmp_var_new_integer(Var->name, Var->name_length, dnsservers->stats.requests, SMI_COUNTER32); break; case DNS_REP: Answer = snmp_var_new_integer(Var->name, Var->name_length, dnsservers->stats.replies, SMI_COUNTER32); break; case DNS_SERVERS: Answer = snmp_var_new_integer(Var->name, Var->name_length, dnsservers->n_running, SMI_COUNTER32); break; default: *ErrP = SNMP_ERR_NOSUCHNAME; break; } return Answer; } #endif /*SQUID_SNMP */ #endif /* USE_DNSSERVERS */
jlduran/BAPS2
squid-2.7.STABLE9/src/dns.c
C
gpl-2.0
4,393
// // Nibo-NanoVM, a tiny java VM for the Nibo robot // // Copyright (C) 2007 by Nils Springob <nils@nicai-systems.de> // Based on work by Benjamin Benz(c't-Bot) and Till Harbaum(NanoVM) // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA // // native_wheelencoder.c // #include "types.h" #include "debug.h" #include "config.h" #include "error.h" #include "delay.h" #include "stack.h" #include "vm.h" #include "nibo/native.h" #include "nibo/native_wheelencoder.h" #include "nibo/iodefs.h" #include "motco.h" #define NATIVE_METHOD_getLeftInc 1 #define NATIVE_METHOD_getRightInc 2 #define NATIVE_METHOD_setEnabled 3 #define NATIVE_METHOD_getEnabled 4 #define NATIVE_METHOD_getLeftSpeed 5 #define NATIVE_METHOD_getRightSpeed 6 #define NATIVE_METHOD_update 7 int16_t last_ticks_l; int16_t last_ticks_r; void native_nibo_wheelencoder_init(void) { } void native_nibo_wheelencoder_invoke(u08_t mref) { // JAVA: void update() if(mref == NATIVE_METHOD_update) { motco_update(); } // JAVA: int getLeftInc() else if(mref == NATIVE_METHOD_getLeftInc) { nvm_int_t val = (int16_t)(motco_ticks_l-last_ticks_l); last_ticks_l = motco_ticks_l; stack_push(val); } // JAVA: int getRightInc() else if(mref == NATIVE_METHOD_getRightInc) { nvm_int_t val = (int16_t)(motco_ticks_r-last_ticks_r); last_ticks_r = motco_ticks_r; stack_push(val); } // JAVA: void setEnabled(boolean enabled) else if(mref == NATIVE_METHOD_setEnabled) { stack_pop_int(); } // JAVA: boolean getEnabled() else if(mref == NATIVE_METHOD_getEnabled) { stack_push(1); } // JAVA: int getLeftSpeed() else if(mref == NATIVE_METHOD_getLeftSpeed) { nvm_int_t val = motco_speed_l; stack_push(val); } // JAVA: int getRightSpeed() else if(mref == NATIVE_METHOD_getRightSpeed) { nvm_int_t val = motco_speed_r; stack_push(val); } else error(ERROR_NATIVE_UNKNOWN_METHOD); }
wycc/NanoVMArduino
vm/src/nibo/native_wheelencoder.c
C
gpl-2.0
2,611
<?php /** * File containing the FieldType\RichTextTypeTest class * * @copyright Copyright (C) eZ Systems AS. All rights reserved. * @license For full copyright and license information view LICENSE file distributed with this source code. * @version 2014.11.1 */ namespace eZ\Publish\Core\FieldType\Tests; use eZ\Publish\Core\FieldType\RichText\Type as RichTextType; use eZ\Publish\Core\FieldType\RichText\Value; use eZ\Publish\Core\FieldType\RichText\ConverterDispatcher; use eZ\Publish\Core\FieldType\RichText\ValidatorDispatcher; use eZ\Publish\Core\FieldType\RichText\Validator; use eZ\Publish\Core\Base\Exceptions\InvalidArgumentException; use eZ\Publish\Core\Base\Exceptions\NotFoundException; use eZ\Publish\API\Repository\Values\Content\Relation; use Exception; use PHPUnit_Framework_TestCase; /** * @group fieldType * @group ezrichtext */ class RichTextTest extends PHPUnit_Framework_TestCase { /** * @return \eZ\Publish\Core\FieldType\RichText\Type */ protected function getFieldType() { $fieldType = new RichTextType( new ConverterDispatcher( array( "http://docbook.org/ns/docbook" => null ) ), new ValidatorDispatcher( array( "http://docbook.org/ns/docbook" => new Validator( array( $this->getAbsolutePath( "eZ/Publish/Core/FieldType/RichText/Resources/schemas/docbook/ezpublish.rng" ), $this->getAbsolutePath( "eZ/Publish/Core/FieldType/RichText/Resources/schemas/docbook/docbook.iso.sch.xsl" ) ) ) ) ) ); $fieldType->setTransformationProcessor( $this->getTransformationProcessorMock() ); return $fieldType; } /** * @return \PHPUnit_Framework_MockObject_MockObject */ protected function getTransformationProcessorMock() { return $this->getMockForAbstractClass( "eZ\\Publish\\Core\\Persistence\\TransformationProcessor", array(), '', false, true, true ); } /** * @covers \eZ\Publish\Core\FieldType\FieldType::getValidatorConfigurationSchema */ public function testValidatorConfigurationSchema() { $fieldType = $this->getFieldType(); self::assertEmpty( $fieldType->getValidatorConfigurationSchema(), "The validator configuration schema does not match what is expected." ); } /** * @covers \eZ\Publish\Core\FieldType\FieldType::getSettingsSchema */ public function testSettingsSchema() { $fieldType = $this->getFieldType(); self::assertSame( array( "numRows" => array( "type" => "int", "default" => 10 ), "tagPreset" => array( "type" => "choice", "default" => RichTextType::TAG_PRESET_DEFAULT ), ), $fieldType->getSettingsSchema(), "The settings schema does not match what is expected." ); } /** * @covers \eZ\Publish\Core\FieldType\RichText\Type::acceptValue * @expectedException \eZ\Publish\API\Repository\Exceptions\InvalidArgumentException */ public function testAcceptValueInvalidType() { $this->getFieldType()->acceptValue( $this->getMockBuilder( 'eZ\\Publish\\Core\\FieldType\\Value' )->disableOriginalConstructor()->getMock() ); } public static function providerForTestAcceptValueValidFormat() { return array( array( $xml = '<?xml version="1.0" encoding="UTF-8"?> <section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0-variant ezpublish-1.0"> <title>This is a heading.</title> <para>This is a paragraph.</para> </section> ' ), ); } /** * @covers \eZ\Publish\Core\FieldType\Author\Type::acceptValue * @dataProvider providerForTestAcceptValueValidFormat */ public function testAcceptValueValidFormat( $input ) { $fieldType = $this->getFieldType(); $fieldType->acceptValue( $input ); } public static function providerForTestAcceptValueInvalidFormat() { return array( array( '<?xml version="1.0" encoding="UTF-8"?> <section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0-variant ezpublish-1.0"> <h1>This is a heading.</h1> </section>', new InvalidArgumentException( "\$inputValue", "Validation of XML content failed: Error in 3:0: Element section has extra content: h1" ) ), array( '<?xml version="1.0" encoding="UTF-8"?> <section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink"> <title>This is a heading.</title> </section>', new InvalidArgumentException( "\$inputValue", "Validation of XML content failed: /*[local-name()='section' and namespace-uri()='http://docbook.org/ns/docbook']: The root element must have a version attribute." ) ), array( 'This is not XML at all!', new InvalidArgumentException( "\$inputValue", "Could not create XML document: Start tag expected, '<' not found" ) ), array( '<?xml version="1.0" encoding="UTF-8"?><unknown xmlns="http://www.w3.org/2013/foobar"><format /></unknown>', new NotFoundException( "Validator", "http://www.w3.org/2013/foobar" ) ), ); } /** * @covers \eZ\Publish\Core\FieldType\Author\Type::acceptValue * @dataProvider providerForTestAcceptValueInvalidFormat */ public function testAcceptValueInvalidFormat( $input, Exception $expectedException ) { try { $fieldType = $this->getFieldType(); $fieldType->acceptValue( $input ); $this->fail( "An InvalidArgumentException was expected! None thrown." ); } catch ( InvalidArgumentException $e ) { $this->assertEquals( $expectedException->getMessage(), $e->getMessage() ); } catch ( NotFoundException $e ) { $this->assertEquals( $expectedException->getMessage(), $e->getMessage() ); } catch ( Exception $e ) { $this->fail( "Unexpected exception thrown! " . get_class( $e ) . " thrown with message: " . $e->getMessage() ); } } /** * @covers \eZ\Publish\Core\FieldType\RichText\Type::toPersistenceValue */ public function testToPersistenceValue() { $xmlString = '<?xml version="1.0" encoding="UTF-8"?> <section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"> <title>This is a heading.</title> <para>This is a paragraph.</para> </section> '; $fieldType = $this->getFieldType(); $fieldValue = $fieldType->toPersistenceValue( $fieldType->acceptValue( $xmlString ) ); self::assertInternalType( 'string', $fieldValue->data ); self::assertSame( $xmlString, $fieldValue->data ); } /** * @covers \eZ\Publish\Core\FieldType\RichText\Type::getName * @dataProvider providerForTestGetName */ public function testGetName( $xmlString, $expectedName ) { $value = new Value( $xmlString ); $fieldType = $this->getFieldType(); $this->assertEquals( $expectedName, $fieldType->getName( $value ) ); } /** * @todo format does not really matter for the method tested, but the fixtures here should be replaced * by valid docbook anyway */ public static function providerForTestGetName() { return array( array( '<?xml version="1.0" encoding="utf-8"?> <section xmlns:image="http://ez.no/namespaces/ezpublish3/image/" xmlns:xhtml="http://ez.no/namespaces/ezpublish3/xhtml/" xmlns:custom="http://ez.no/namespaces/ezpublish3/custom/"><header level="1">This is a piece of text</header></section>', "This is a piece of text" ), array( '<?xml version="1.0" encoding="utf-8"?> <section xmlns:image="http://ez.no/namespaces/ezpublish3/image/" xmlns:xhtml="http://ez.no/namespaces/ezpublish3/xhtml/" xmlns:custom="http://ez.no/namespaces/ezpublish3/custom/"><header level="1">This is a piece of <emphasize>text</emphasize></header></section>', /** @todo FIXME: should probably be "This is a piece of text" */ "This is a piece of" ), array( '<?xml version="1.0" encoding="utf-8"?> <section xmlns:image="http://ez.no/namespaces/ezpublish3/image/" xmlns:xhtml="http://ez.no/namespaces/ezpublish3/xhtml/" xmlns:custom="http://ez.no/namespaces/ezpublish3/custom/"><header level="1"><strong>This is a piece</strong> of text</header></section>', /** @todo FIXME: should probably be "This is a piece of text" */ "This is a piece" ), array( '<?xml version="1.0" encoding="utf-8"?> <section xmlns:image="http://ez.no/namespaces/ezpublish3/image/" xmlns:xhtml="http://ez.no/namespaces/ezpublish3/xhtml/" xmlns:custom="http://ez.no/namespaces/ezpublish3/custom/"><header level="1"><strong><emphasize>This is</emphasize> a piece</strong> of text</header></section>', /** @todo FIXME: should probably be "This is a piece of text" */ "This is" ), array( '<?xml version="1.0" encoding="utf-8"?> <section xmlns:image="http://ez.no/namespaces/ezpublish3/image/" xmlns:xhtml="http://ez.no/namespaces/ezpublish3/xhtml/" xmlns:custom="http://ez.no/namespaces/ezpublish3/custom/"><paragraph><table class="default" border="0" width="100%" custom:summary="wai" custom:caption=""><tr><td><paragraph>First cell</paragraph></td><td><paragraph>Second cell</paragraph></td></tr><tr><td><paragraph>Third cell</paragraph></td><td><paragraph>Fourth cell</paragraph></td></tr></table></paragraph><paragraph>Text after table</paragraph></section>', /** @todo FIXME: should probably be "First cell" */ "First cellSecond cell" ), array( '<?xml version="1.0" encoding="utf-8"?> <section xmlns:image="http://ez.no/namespaces/ezpublish3/image/" xmlns:xhtml="http://ez.no/namespaces/ezpublish3/xhtml/" xmlns:custom="http://ez.no/namespaces/ezpublish3/custom/"><paragraph xmlns:tmp="http://ez.no/namespaces/ezpublish3/temporary/"><ul><li><paragraph xmlns:tmp="http://ez.no/namespaces/ezpublish3/temporary/">List item</paragraph></li></ul></paragraph></section>', "List item" ), array( '<?xml version="1.0" encoding="utf-8"?> <section xmlns:image="http://ez.no/namespaces/ezpublish3/image/" xmlns:xhtml="http://ez.no/namespaces/ezpublish3/xhtml/" xmlns:custom="http://ez.no/namespaces/ezpublish3/custom/"><paragraph xmlns:tmp="http://ez.no/namespaces/ezpublish3/temporary/"><ul><li><paragraph xmlns:tmp="http://ez.no/namespaces/ezpublish3/temporary/">List <emphasize>item</emphasize></paragraph></li></ul></paragraph></section>', "List item" ), array( '<?xml version="1.0" encoding="utf-8"?> <section xmlns:image="http://ez.no/namespaces/ezpublish3/image/" xmlns:xhtml="http://ez.no/namespaces/ezpublish3/xhtml/" xmlns:custom="http://ez.no/namespaces/ezpublish3/custom/" />', "" ), array( '<?xml version="1.0" encoding="utf-8"?> <section xmlns:image="http://ez.no/namespaces/ezpublish3/image/" xmlns:xhtml="http://ez.no/namespaces/ezpublish3/xhtml/" xmlns:custom="http://ez.no/namespaces/ezpublish3/custom/"><paragraph><strong><emphasize>A simple</emphasize></strong> paragraph!</paragraph></section>', "A simple" ), array( '<section><paragraph>test</paragraph></section>', "test" ), array( '<section><paragraph><link node_id="1">test</link><link object_id="1">test</link></paragraph></section>', "test" ), ); } /** * @todo handle embeds when implemented * @covers \eZ\Publish\Core\FieldType\RichText\Type::getRelations */ public function testGetRelations() { $xml = <<<EOT <?xml version="1.0" encoding="UTF-8"?> <section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0-variant ezpublish-1.0"> <title>Some text</title> <para><link xlink:href="ezlocation://72">link1</link></para> <para><link xlink:href="ezlocation://61">link2</link></para> <para><link xlink:href="ezlocation://61">link3</link></para> <para><link xlink:href="ezcontent://70">link4</link></para> <para><link xlink:href="ezcontent://75">link5</link></para> <para><link xlink:href="ezcontent://75">link6</link></para> </section> EOT; $fieldType = $this->getFieldType(); $this->assertEquals( array( Relation::LINK => array( "locationIds" => array( 72, 61 ), "contentIds" => array( 70, 75 ), ), Relation::EMBED => array( "locationIds" => array(), "contentIds" => array(), ), ), $fieldType->getRelations( $fieldType->acceptValue( $xml ) ) ); } /** * @param string $relativePath * * @return string */ protected function getAbsolutePath( $relativePath ) { return self::getInstallationDir() . "/" . $relativePath; } /** * @return string */ static protected function getInstallationDir() { static $installDir = null; if ( $installDir === null ) { $config = require 'config.php'; $installDir = $config['install_dir']; } return $installDir; } protected function provideFieldTypeIdentifier() { return 'ezrichtext'; } public function provideDataForGetName() { return array(); } }
imadkaf/PFE
vendor/ezsystems/ezpublish-kernel/eZ/Publish/Core/FieldType/Tests/RichTextTest.php
PHP
gpl-2.0
14,895
<?php /* +--------------------------------------------------------------------+ | CiviCRM version 4.6 | +--------------------------------------------------------------------+ | Copyright CiviCRM LLC (c) 2004-2015 | +--------------------------------------------------------------------+ | This file is a part of CiviCRM. | | | | CiviCRM is free software; you can copy, modify, and distribute it | | under the terms of the GNU Affero General Public License | | Version 3, 19 November 2007 and the CiviCRM Licensing Exception. | | | | CiviCRM is distributed in the hope that it will be useful, but | | WITHOUT ANY WARRANTY; without even the implied warranty of | | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | | See the GNU Affero General Public License for more details. | | | | You should have received a copy of the GNU Affero General Public | | License and the CiviCRM Licensing Exception along | | with this program; if not, contact CiviCRM LLC | | at info[AT]civicrm[DOT]org. If you have questions about the | | GNU Affero General Public License or the licensing of CiviCRM, | | see the CiviCRM license FAQ at http://civicrm.org/licensing | +--------------------------------------------------------------------+ */ /** * * @package CRM * @copyright CiviCRM LLC (c) 2004-2015 * $Id$ * */ /** * form to process actions on the field aspect of Custom */ class CRM_Custom_Form_Option extends CRM_Core_Form { /** * The custom field id saved to the session for an update * * @var int */ protected $_fid; /** * The custom group id saved to the session for an update * * @var int */ protected $_gid; /** * The option group ID */ protected $_optionGroupID = NULL; /** * The Option id, used when editing the Option * * @var int */ protected $_id; /** * Set variables up before form is built. * * @return void */ public function preProcess() { $this->_fid = CRM_Utils_Request::retrieve('fid', 'Positive', $this); $this->_gid = CRM_Utils_Request::retrieve('gid', 'Positive', $this); if (!isset($this->_gid) && $this->_fid) { $this->_gid = CRM_Core_DAO::getFieldValue( 'CRM_Core_DAO_CustomField', $this->_fid, 'custom_group_id' ); } if ($this->_fid) { $this->_optionGroupID = CRM_Core_DAO::getFieldValue( 'CRM_Core_DAO_CustomField', $this->_fid, 'option_group_id' ); } if ($isReserved = CRM_Core_DAO::getFieldValue('CRM_Core_DAO_CustomGroup', $this->_gid, 'is_reserved', 'id')) { CRM_Core_Error::fatal("You cannot add or edit muliple choice options in a reserved custom field-set."); } $this->_id = CRM_Utils_Request::retrieve('id', 'Positive', $this); } /** * Set default values for the form. Note that in edit/view mode * the default values are retrieved from the database * * @return array * array of default values */ public function setDefaultValues() { $defaults = $fieldDefaults = array(); if (isset($this->_id)) { $params = array('id' => $this->_id); CRM_Core_BAO_CustomOption::retrieve($params, $defaults); $paramsField = array('id' => $this->_fid); CRM_Core_BAO_CustomField::retrieve($paramsField, $fieldDefaults); if ($fieldDefaults['html_type'] == 'CheckBox' || $fieldDefaults['html_type'] == 'Multi-Select' || $fieldDefaults['html_type'] == 'AdvMulti-Select' ) { if (!empty($fieldDefaults['default_value'])) { $defaultCheckValues = explode(CRM_Core_DAO::VALUE_SEPARATOR, substr($fieldDefaults['default_value'], 1, -1) ); if (in_array($defaults['value'], $defaultCheckValues)) { $defaults['default_value'] = 1; } } } else { if (CRM_Utils_Array::value('default_value', $fieldDefaults) == CRM_Utils_Array::value('value', $defaults)) { $defaults['default_value'] = 1; } } } else { $defaults['is_active'] = 1; } if ($this->_action & CRM_Core_Action::ADD) { $fieldValues = array('option_group_id' => $this->_optionGroupID); $defaults['weight'] = CRM_Utils_Weight::getDefaultWeight('CRM_Core_DAO_OptionValue', $fieldValues); } return $defaults; } /** * Build the form object. * * @return void */ public function buildQuickForm() { if ($this->_action == CRM_Core_Action::DELETE) { $option = civicrm_api3('option_value', 'getsingle', array('id' => $this->_id)); $this->assign('label', $option['label']); $this->addButtons(array( array( 'type' => 'next', 'name' => ts('Delete'), 'isDefault' => TRUE, ), array( 'type' => 'cancel', 'name' => ts('Cancel'), ), ) ); } else { // lets trim all the whitespace $this->applyFilter('__ALL__', 'trim'); // hidden Option Id for validation use $this->add('hidden', 'optionId', $this->_id); //hidden field ID for validation use $this->add('hidden', 'fieldId', $this->_fid); // label $this->add('text', 'label', ts('Option Label'), CRM_Core_DAO::getAttribute('CRM_Core_DAO_OptionValue', 'label'), TRUE); $this->add('text', 'value', ts('Option Value'), CRM_Core_DAO::getAttribute('CRM_Core_DAO_OptionValue', 'value'), TRUE); // weight $this->add('text', 'weight', ts('Order'), CRM_Core_DAO::getAttribute('CRM_Core_DAO_OptionValue', 'weight'), TRUE); $this->addRule('weight', ts('is a numeric field'), 'numeric'); // is active ? $this->add('checkbox', 'is_active', ts('Active?')); // Set the default value for Custom Field $this->add('checkbox', 'default_value', ts('Default')); // add a custom form rule $this->addFormRule(array('CRM_Custom_Form_Option', 'formRule'), $this); // add buttons $this->addButtons(array( array( 'type' => 'next', 'name' => ts('Save'), 'isDefault' => TRUE, ), array( 'type' => 'next', 'name' => ts('Save and New'), 'subName' => 'new', ), array( 'type' => 'cancel', 'name' => ts('Cancel'), ), ) ); // if view mode pls freeze it with the done button. if ($this->_action & CRM_Core_Action::VIEW) { $this->freeze(); $url = CRM_Utils_System::url('civicrm/admin/custom/group/field/option', 'reset=1&action=browse&fid=' . $this->_fid . '&gid=' . $this->_gid, TRUE, NULL, FALSE ); $this->addElement('button', 'done', ts('Done'), array('onclick' => "location.href='$url'", 'class' => 'crm-form-submit') ); } } $this->assign('id', $this->_id); } /** * Global validation rules for the form. * * @param array $fields * Posted values of the form. * * @param $files * @param CRM_Core_Form $form * * @return array * list of errors to be posted back to the form */ public static function formRule($fields, $files, $form) { $optionLabel = $fields['label']; $optionValue = $fields['value']; $fieldId = $form->_fid; $optionGroupId = $form->_optionGroupID; $temp = array(); if (empty($form->_id)) { $query = " SELECT count(*) FROM civicrm_option_value WHERE option_group_id = %1 AND label = %2"; $params = array( 1 => array($optionGroupId, 'Integer'), 2 => array($optionLabel, 'String'), ); if (CRM_Core_DAO::singleValueQuery($query, $params) > 0) { $errors['label'] = ts('There is an entry with the same label.'); } $query = " SELECT count(*) FROM civicrm_option_value WHERE option_group_id = %1 AND value = %2"; $params = array( 1 => array($optionGroupId, 'Integer'), 2 => array($optionValue, 'String'), ); if (CRM_Core_DAO::singleValueQuery($query, $params) > 0) { $errors['value'] = ts('There is an entry with the same value.'); } } else { //capture duplicate entries while updating Custom Options $optionId = CRM_Utils_Type::escape($fields['optionId'], 'Integer'); //check label duplicates within a custom field $query = " SELECT count(*) FROM civicrm_option_value WHERE option_group_id = %1 AND id != %2 AND label = %3"; $params = array( 1 => array($optionGroupId, 'Integer'), 2 => array($optionId, 'Integer'), 3 => array($optionLabel, 'String'), ); if (CRM_Core_DAO::singleValueQuery($query, $params) > 0) { $errors['label'] = ts('There is an entry with the same label.'); } //check value duplicates within a custom field $query = " SELECT count(*) FROM civicrm_option_value WHERE option_group_id = %1 AND id != %2 AND value = %3"; $params = array( 1 => array($optionGroupId, 'Integer'), 2 => array($optionId, 'Integer'), 3 => array($optionValue, 'String'), ); if (CRM_Core_DAO::singleValueQuery($query, $params) > 0) { $errors['value'] = ts('There is an entry with the same value.'); } } $query = " SELECT data_type FROM civicrm_custom_field WHERE id = %1"; $params = array(1 => array($fieldId, 'Integer')); $dao = CRM_Core_DAO::executeQuery($query, $params); if ($dao->fetch()) { switch ($dao->data_type) { case 'Int': if (!CRM_Utils_Rule::integer($fields["value"])) { $errors['value'] = ts('Please enter a valid integer value.'); } break; case 'Float': // case 'Money': if (!CRM_Utils_Rule::numeric($fields["value"])) { $errors['value'] = ts('Please enter a valid number.'); } break; case 'Money': if (!CRM_Utils_Rule::money($fields["value"])) { $errors['value'] = ts('Please enter a valid value.'); } break; case 'Date': if (!CRM_Utils_Rule::date($fields["value"])) { $errors['value'] = ts('Please enter a valid date using YYYY-MM-DD format. Example: 2004-12-31.'); } break; case 'Boolean': if (!CRM_Utils_Rule::integer($fields["value"]) && ($fields["value"] != '1' || $fields["value"] != '0') ) { $errors['value'] = ts('Please enter 1 or 0 as value.'); } break; case 'Country': if (!empty($fields["value"])) { $params = array(1 => array($fields['value'], 'String')); $query = "SELECT count(*) FROM civicrm_country WHERE name = %1 OR iso_code = %1"; if (CRM_Core_DAO::singleValueQuery($query, $params) <= 0) { $errors['value'] = ts('Invalid default value for country.'); } } break; case 'StateProvince': if (!empty($fields["value"])) { $params = array(1 => array($fields['value'], 'String')); $query = " SELECT count(*) FROM civicrm_state_province WHERE name = %1 OR abbreviation = %1"; if (CRM_Core_DAO::singleValueQuery($query, $params) <= 0) { $errors['value'] = ts('The invalid value for State/Province data type'); } } break; } } return empty($errors) ? TRUE : $errors; } /** * Process the form. * * @return void */ public function postProcess() { // store the submitted values in an array $params = $this->controller->exportValues('Option'); if ($this->_action == CRM_Core_Action::DELETE) { $option = civicrm_api3('option_value', 'getsingle', array('id' => $this->_id)); $fieldValues = array('option_group_id' => $this->_optionGroupID); CRM_Utils_Weight::delWeight('CRM_Core_DAO_OptionValue', $this->_id, $fieldValues); CRM_Core_BAO_CustomOption::del($this->_id); CRM_Core_Session::setStatus(ts('Option "%1" has been deleted.', array(1 => $option['label'])), ts('Deleted'), 'success'); return; } // set values for custom field properties and save $customOption = new CRM_Core_DAO_OptionValue(); $customOption->label = $params['label']; $customOption->name = CRM_Utils_String::titleToVar($params['label']); $customOption->weight = $params['weight']; $customOption->value = $params['value']; $customOption->is_active = CRM_Utils_Array::value('is_active', $params, FALSE); $oldWeight = NULL; if ($this->_id) { $customOption->id = $this->_id; CRM_Core_BAO_CustomOption::updateCustomValues($params); $oldWeight = CRM_Core_DAO::getFieldValue('CRM_Core_DAO_OptionValue', $this->_id, 'weight', 'id'); } $fieldValues = array('option_group_id' => $this->_optionGroupID); $customOption->weight = CRM_Utils_Weight::updateOtherWeights( 'CRM_Core_DAO_OptionValue', $oldWeight, $params['weight'], $fieldValues); $customOption->option_group_id = $this->_optionGroupID; $customField = new CRM_Core_DAO_CustomField(); $customField->id = $this->_fid; if ( $customField->find(TRUE) && ( $customField->html_type == 'CheckBox' || $customField->html_type == 'AdvMulti-Select' || $customField->html_type == 'Multi-Select' ) ) { $defVal = explode( CRM_Core_DAO::VALUE_SEPARATOR, substr($customField->default_value, 1, -1) ); if (!empty($params['default_value'])) { if (!in_array($customOption->value, $defVal)) { if (empty($defVal[0])) { $defVal = array($customOption->value); } else { $defVal[] = $customOption->value; } $customField->default_value = CRM_Core_DAO::VALUE_SEPARATOR . implode(CRM_Core_DAO::VALUE_SEPARATOR, $defVal) . CRM_Core_DAO::VALUE_SEPARATOR; $customField->save(); } } elseif (in_array($customOption->value, $defVal)) { $tempVal = array(); foreach ($defVal as $v) { if ($v != $customOption->value) { $tempVal[] = $v; } } $customField->default_value = CRM_Core_DAO::VALUE_SEPARATOR . implode(CRM_Core_DAO::VALUE_SEPARATOR, $tempVal) . CRM_Core_DAO::VALUE_SEPARATOR; $customField->save(); } } else { switch ($customField->data_type) { case 'Money': $customOption->value = CRM_Utils_Rule::cleanMoney($customOption->value); break; case 'Int': $customOption->value = intval($customOption->value); break; case 'Float': $customOption->value = floatval($customOption->value); break; } if (!empty($params['default_value'])) { $customField->default_value = $customOption->value; $customField->save(); } elseif ($customField->find(TRUE) && $customField->default_value == $customOption->value) { // this is the case where this option is the current default value and we have been reset $customField->default_value = 'null'; $customField->save(); } } $customOption->save(); $msg = ts('Your multiple choice option \'%1\' has been saved', array(1 => $customOption->label)); CRM_Core_Session::setStatus($msg, '', 'success'); $buttonName = $this->controller->getButtonName(); $session = CRM_Core_Session::singleton(); if ($buttonName == $this->getButtonName('next', 'new')) { CRM_Core_Session::setStatus(ts('You can add another option.'), '', 'info'); $session->replaceUserContext( CRM_Utils_System::url( 'civicrm/admin/custom/group/field/option', 'reset=1&action=add&fid=' . $this->_fid . '&gid=' . $this->_gid ) ); } } }
civicrm/drupal-screenshots
sites/all/modules/civicrm/CRM/Custom/Form/Option.php
PHP
gpl-2.0
16,554
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.math.transform; import org.apache.commons.math.FunctionEvaluationException; import org.apache.commons.math.MathRuntimeException; import org.apache.commons.math.analysis.UnivariateRealFunction; /** * Implements the <a href="http://www.archive.chipcenter.com/dsp/DSP000517F1.html">Fast Hadamard Transform</a> (FHT). * Transformation of an input vector x to the output vector y. * <p>In addition to transformation of real vectors, the Hadamard transform can * transform integer vectors into integer vectors. However, this integer transform * cannot be inverted directly. Due to a scaling factor it may lead to rational results. * As an example, the inverse transform of integer vector (0, 1, 0, 1) is rational * vector (1/2, -1/2, 0, 0).</p> * @version $Revision$ $Date$ * @since 2.0 */ public class FastHadamardTransformer implements RealTransformer { /** {@inheritDoc} */ public double[] transform(double f[]) throws IllegalArgumentException { return fht(f); } /** {@inheritDoc} */ public double[] transform(UnivariateRealFunction f, double min, double max, int n) throws FunctionEvaluationException, IllegalArgumentException { return fht(FastFourierTransformer.sample(f, min, max, n)); } /** {@inheritDoc} */ public double[] inversetransform(double f[]) throws IllegalArgumentException { return FastFourierTransformer.scaleArray(fht(f), 1.0 / f.length); } /** {@inheritDoc} */ public double[] inversetransform(UnivariateRealFunction f, double min, double max, int n) throws FunctionEvaluationException, IllegalArgumentException { final double[] unscaled = fht(FastFourierTransformer.sample(f, min, max, n)); return FastFourierTransformer.scaleArray(unscaled, 1.0 / n); } /** * Transform the given real data set. * <p>The integer transform cannot be inverted directly, due to a scaling * factor it may lead to double results.</p> * @param f the integer data array to be transformed (signal) * @return the integer transformed array (spectrum) * @throws IllegalArgumentException if any parameters are invalid */ public int[] transform(int f[]) throws IllegalArgumentException { return fht(f); } /** * The FHT (Fast Hadamard Transformation) which uses only subtraction and addition. * <br> * Requires <b>Nlog2N = n2</b><sup>n</sup> additions. * <br> * <br> * <b><u>Short Table of manual calculation for N=8:</u></b> * <ol> * <li><b>x</b> is the input vector we want to transform</li> * <li><b>y</b> is the output vector which is our desired result</li> * <li>a and b are just helper rows</li> * </ol> * <pre> * <code> * +----+----------+---------+----------+ * | <b>x</b> | <b>a</b> | <b>b</b> | <b>y</b> | * +----+----------+---------+----------+ * | x<sub>0</sub> | a<sub>0</sub>=x<sub>0</sub>+x<sub>1</sub> | b<sub>0</sub>=a<sub>0</sub>+a<sub>1</sub> | y<sub>0</sub>=b<sub>0</sub>+b<sub>1</sub> | * +----+----------+---------+----------+ * | x<sub>1</sub> | a<sub>1</sub>=x<sub>2</sub>+x<sub>3</sub> | b<sub>0</sub>=a<sub>2</sub>+a<sub>3</sub> | y<sub>0</sub>=b<sub>2</sub>+b<sub>3</sub> | * +----+----------+---------+----------+ * | x<sub>2</sub> | a<sub>2</sub>=x<sub>4</sub>+x<sub>5</sub> | b<sub>0</sub>=a<sub>4</sub>+a<sub>5</sub> | y<sub>0</sub>=b<sub>4</sub>+b<sub>5</sub> | * +----+----------+---------+----------+ * | x<sub>3</sub> | a<sub>3</sub>=x<sub>6</sub>+x<sub>7</sub> | b<sub>0</sub>=a<sub>6</sub>+a<sub>7</sub> | y<sub>0</sub>=b<sub>6</sub>+b<sub>7</sub> | * +----+----------+---------+----------+ * | x<sub>4</sub> | a<sub>0</sub>=x<sub>0</sub>-x<sub>1</sub> | b<sub>0</sub>=a<sub>0</sub>-a<sub>1</sub> | y<sub>0</sub>=b<sub>0</sub>-b<sub>1</sub> | * +----+----------+---------+----------+ * | x<sub>5</sub> | a<sub>1</sub>=x<sub>2</sub>-x<sub>3</sub> | b<sub>0</sub>=a<sub>2</sub>-a<sub>3</sub> | y<sub>0</sub>=b<sub>2</sub>-b<sub>3</sub> | * +----+----------+---------+----------+ * | x<sub>6</sub> | a<sub>2</sub>=x<sub>4</sub>-x<sub>5</sub> | b<sub>0</sub>=a<sub>4</sub>-a<sub>5</sub> | y<sub>0</sub>=b<sub>4</sub>-b<sub>5</sub> | * +----+----------+---------+----------+ * | x<sub>7</sub> | a<sub>3</sub>=x<sub>6</sub>-x<sub>7</sub> | b<sub>0</sub>=a<sub>6</sub>-a<sub>7</sub> | y<sub>0</sub>=b<sub>6</sub>-b<sub>7</sub> | * +----+----------+---------+----------+ * </code> * </pre> * * <b><u>How it works</u></b> * <ol> * <li>Construct a matrix with N rows and n+1 columns<br> <b>hadm[n+1][N]</b> * <br><i>(If I use [x][y] it always means [row-offset][column-offset] of a Matrix with n rows and m columns. Its entries go from M[0][0] to M[n][m])</i></li> * <li>Place the input vector <b>x[N]</b> in the first column of the matrix <b>hadm</b></li> * <li>The entries of the submatrix D<sub>top</sub> are calculated as follows. * <br>D<sub>top</sub> goes from entry [0][1] to [N/2-1][n+1]. * <br>The columns of D<sub>top</sub> are the pairwise mutually exclusive sums of the previous column * </li> * <li>The entries of the submatrix D<sub>bottom</sub> are calculated as follows. * <br>D<sub>bottom</sub> goes from entry [N/2][1] to [N][n+1]. * <br>The columns of D<sub>bottom</sub> are the pairwise differences of the previous column * </li> * <li>How D<sub>top</sub> and D<sub>bottom</sub> you can understand best with the example for N=8 above. * <li>The output vector y is now in the last column of <b>hadm</b></li> * <li><i>Algorithm from: http://www.archive.chipcenter.com/dsp/DSP000517F1.html</i></li> * </ol> * <br> * <b><u>Visually</u></b> * <pre> * +--------+---+---+---+-----+---+ * | 0 | 1 | 2 | 3 | ... |n+1| * +------+--------+---+---+---+-----+---+ * |0 | x<sub>0</sub> | /\ | * |1 | x<sub>1</sub> | || | * |2 | x<sub>2</sub> | <= D<sub>top</sub> => | * |... | ... | || | * |N/2-1 | x<sub>N/2-1</sub> | \/ | * +------+--------+---+---+---+-----+---+ * |N/2 | x<sub>N/2</sub> | /\ | * |N/2+1 | x<sub>N/2+1</sub> | || | * |N/2+2 | x<sub>N/2+2</sub> | <= D<sub>bottom</sub> => | which is in the last column of the matrix * |... | ... | || | * |N | x<sub>N/2</sub> | \/ | * +------+--------+---+---+---+-----+---+ * </pre> * * @param x input vector * @return y output vector * @exception IllegalArgumentException if input array is not a power of 2 */ protected double[] fht(double x[]) throws IllegalArgumentException { // n is the row count of the input vector x final int n = x.length; final int halfN = n / 2; // n has to be of the form n = 2^p !! if (!FastFourierTransformer.isPowerOf2(n)) { throw MathRuntimeException.createIllegalArgumentException( "{0} is not a power of 2", n); } // Instead of creating a matrix with p+1 columns and n rows // we will use two single dimension arrays which we will use in an alternating way. double[] yPrevious = new double[n]; double[] yCurrent = x.clone(); // iterate from left to right (column) for (int j = 1; j < n; j <<= 1) { // switch columns final double[] yTmp = yCurrent; yCurrent = yPrevious; yPrevious = yTmp; // iterate from top to bottom (row) for (int i = 0; i < halfN; ++i) { // D<sub>top</sub> // The top part works with addition final int twoI = 2 * i; yCurrent[i] = yPrevious[twoI] + yPrevious[twoI + 1]; } for (int i = halfN; i < n; ++i) { // D<sub>bottom</sub> // The bottom part works with subtraction final int twoI = 2 * i; yCurrent[i] = yPrevious[twoI - n] - yPrevious[twoI - n + 1]; } } // return the last computed output vector y return yCurrent; } /** * The FHT (Fast Hadamard Transformation) which uses only subtraction and addition. * @param x input vector * @return y output vector * @exception IllegalArgumentException if input array is not a power of 2 */ protected int[] fht(int x[]) throws IllegalArgumentException { // n is the row count of the input vector x final int n = x.length; final int halfN = n / 2; // n has to be of the form n = 2^p !! if (!FastFourierTransformer.isPowerOf2(n)) { throw MathRuntimeException.createIllegalArgumentException( "{0} is not a power of 2", n); } // Instead of creating a matrix with p+1 columns and n rows // we will use two single dimension arrays which we will use in an alternating way. int[] yPrevious = new int[n]; int[] yCurrent = x.clone(); // iterate from left to right (column) for (int j = 1; j < n; j <<= 1) { // switch columns final int[] yTmp = yCurrent; yCurrent = yPrevious; yPrevious = yTmp; // iterate from top to bottom (row) for (int i = 0; i < halfN; ++i) { // D<sub>top</sub> // The top part works with addition final int twoI = 2 * i; yCurrent[i] = yPrevious[twoI] + yPrevious[twoI + 1]; } for (int i = halfN; i < n; ++i) { // D<sub>bottom</sub> // The bottom part works with subtraction final int twoI = 2 * i; yCurrent[i] = yPrevious[twoI - n] - yPrevious[twoI - n + 1]; } } // return the last computed output vector y return yCurrent; } }
SpoonLabs/astor
examples/Math-issue-309/src/main/java/org/apache/commons/math/transform/FastHadamardTransformer.java
Java
gpl-2.0
11,482
<?php $lan = array( 'done' => 'Klaar', 'bounces' => 'bounces', 'to' => 'aan', 'listing' => 'Lijst', 'are you sure you want to delete all bounces older than 2 months' => 'Ben je zeker dat je alle bounces ouder dan 2 maanden wilt verwijderen', 'delete all processed (&gt; 2 months old)' => 'Verwijder alle verwerkte ouder dan 2 maand', 'are you sure you want to delete all bounces,\\n even the ones that have not been processed' => 'Ben je zeker dat je alle bounces wilt verwijderen,\\n ook diegene die nog niet zijn uitgevoerd', 'delete all' => 'Verwijder alles', 'are you sure you want to reset all counters' => 'Ben je zeker dat je alle tellers wilt resetten', 'reset bounces' => 'Reset bounces', 'delete' => 'Verwijder', 'deleting' => 'Verwijderen', 'no unprocessed bounces available' => 'geen onuitgevoerde bounces beschikbaar', 'message' => 'Bericht', 'user' => 'Gebruiker', 'date' => 'Datum', 'show' => 'Toon', 'unknown' => 'Onbekend', 'system message' => 'Systeem bericht', ### new in 2.9.5 'are you sure you want to delete all unidentified bounces older than 2 months' => 'ben je zeker dat je alle niet geidentificeerde bounces ouder dan 2 maanden wilt verwijderen', 'delete all unidentified (&gt; 2 months old)' => 'verwijder alle niet geidentificeerde ouder dan 2 maand', ); ?>
Ramir1/phplist
public_html/lists/admin/lan/nl/bounces.php
PHP
gpl-2.0
1,331
/* Framework for MDIO devices, other than PHYs. * * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mdio.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/phy.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/unistd.h> void mdio_device_free(struct mdio_device *mdiodev) { put_device(&mdiodev->dev); } EXPORT_SYMBOL(mdio_device_free); static void mdio_device_release(struct device *dev) { kfree(to_mdio_device(dev)); } struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr) { struct mdio_device *mdiodev; /* We allocate the device, and initialize the default values */ mdiodev = kzalloc(sizeof(*mdiodev), GFP_KERNEL); if (!mdiodev) return ERR_PTR(-ENOMEM); mdiodev->dev.release = mdio_device_release; mdiodev->dev.parent = &bus->dev; mdiodev->dev.bus = &mdio_bus_type; mdiodev->device_free = mdio_device_free; mdiodev->device_remove = mdio_device_remove; mdiodev->bus = bus; mdiodev->addr = addr; dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr); device_initialize(&mdiodev->dev); return mdiodev; } EXPORT_SYMBOL(mdio_device_create); /** * mdio_device_register - Register the mdio device on the MDIO bus * @mdiodev: mdio_device structure to be added to the MDIO bus */ int mdio_device_register(struct mdio_device *mdiodev) { int err; dev_info(&mdiodev->dev, "mdio_device_register\n"); err = mdiobus_register_device(mdiodev); if (err) return err; err = device_add(&mdiodev->dev); if (err) { pr_err("MDIO %d failed to add\n", mdiodev->addr); goto out; } return 0; out: mdiobus_unregister_device(mdiodev); return err; } EXPORT_SYMBOL(mdio_device_register); /** * mdio_device_remove - Remove a previously registered mdio device from the * MDIO bus * @mdiodev: mdio_device structure to remove * * This doesn't free the mdio_device itself, it merely reverses the effects * of mdio_device_register(). Use mdio_device_free() to free the device * after calling this function. */ void mdio_device_remove(struct mdio_device *mdiodev) { device_del(&mdiodev->dev); mdiobus_unregister_device(mdiodev); } EXPORT_SYMBOL(mdio_device_remove); /** * mdio_probe - probe an MDIO device * @dev: device to probe * * Description: Take care of setting up the mdio_device structure * and calling the driver to probe the device. */ static int mdio_probe(struct device *dev) { struct mdio_device *mdiodev = to_mdio_device(dev); struct device_driver *drv = mdiodev->dev.driver; struct mdio_driver *mdiodrv = to_mdio_driver(drv); int err = 0; if (mdiodrv->probe) err = mdiodrv->probe(mdiodev); return err; } static int mdio_remove(struct device *dev) { struct mdio_device *mdiodev = to_mdio_device(dev); struct device_driver *drv = mdiodev->dev.driver; struct mdio_driver *mdiodrv = to_mdio_driver(drv); if (mdiodrv->remove) mdiodrv->remove(mdiodev); return 0; } /** * mdio_driver_register - register an mdio_driver with the MDIO layer * @new_driver: new mdio_driver to register */ int mdio_driver_register(struct mdio_driver *drv) { struct mdio_driver_common *mdiodrv = &drv->mdiodrv; int retval; pr_info("mdio_driver_register: %s\n", mdiodrv->driver.name); mdiodrv->driver.bus = &mdio_bus_type; mdiodrv->driver.probe = mdio_probe; mdiodrv->driver.remove = mdio_remove; retval = driver_register(&mdiodrv->driver); if (retval) { pr_err("%s: Error %d in registering driver\n", mdiodrv->driver.name, retval); return retval; } return 0; } EXPORT_SYMBOL(mdio_driver_register); void mdio_driver_unregister(struct mdio_driver *drv) { struct mdio_driver_common *mdiodrv = &drv->mdiodrv; driver_unregister(&mdiodrv->driver); } EXPORT_SYMBOL(mdio_driver_unregister);
patjak/drm-gma500
drivers/net/phy/mdio_device.c
C
gpl-2.0
4,149
/* Copyright (c) 2012 Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/iommu.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/atomic.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <mach/iommu_hw-v2.h> #include <mach/iommu.h> static int msm_iommu_parse_dt(struct platform_device *pdev, struct msm_iommu_drvdata *drvdata) { struct device_node *child; int ret; ret = device_move(&pdev->dev, &msm_iommu_root_dev->dev, DPM_ORDER_NONE); if (ret) return ret; for_each_child_of_node(pdev->dev.of_node, child) { drvdata->ncb++; if (!of_platform_device_create(child, NULL, &pdev->dev)) pr_err("Failed to create %s device\n", child->name); } drvdata->name = dev_name(&pdev->dev); return 0; } static atomic_t msm_iommu_next_id = ATOMIC_INIT(-1); static int __devinit msm_iommu_probe(struct platform_device *pdev) { struct msm_iommu_drvdata *drvdata; struct resource *r; int ret; if (msm_iommu_root_dev == pdev) return 0; if (pdev->id == -1) pdev->id = atomic_inc_return(&msm_iommu_next_id) - 1; drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) return -EINVAL; drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!drvdata->base) return -ENOMEM; drvdata->gdsc = devm_regulator_get(&pdev->dev, "vdd"); if (IS_ERR(drvdata->gdsc)) return -EINVAL; drvdata->pclk = clk_get(&pdev->dev, "iface_clk"); if (IS_ERR(drvdata->pclk)) return PTR_ERR(drvdata->pclk); ret = clk_prepare_enable(drvdata->pclk); if (ret) goto fail_enable; drvdata->clk = clk_get(&pdev->dev, "core_clk"); if (!IS_ERR(drvdata->clk)) { if (clk_get_rate(drvdata->clk) == 0) { ret = clk_round_rate(drvdata->clk, 1); clk_set_rate(drvdata->clk, ret); } ret = clk_prepare_enable(drvdata->clk); if (ret) { clk_put(drvdata->clk); goto fail_pclk; } } else drvdata->clk = NULL; ret = msm_iommu_parse_dt(pdev, drvdata); if (ret) goto fail_clk; pr_info("device %s mapped at %p, with %d ctx banks\n", drvdata->name, drvdata->base, drvdata->ncb); platform_set_drvdata(pdev, drvdata); if (drvdata->clk) clk_disable_unprepare(drvdata->clk); clk_disable_unprepare(drvdata->pclk); return 0; fail_clk: if (drvdata->clk) { clk_disable_unprepare(drvdata->clk); clk_put(drvdata->clk); } fail_pclk: clk_disable_unprepare(drvdata->pclk); fail_enable: clk_put(drvdata->pclk); return ret; } static int __devexit msm_iommu_remove(struct platform_device *pdev) { struct msm_iommu_drvdata *drv = NULL; drv = platform_get_drvdata(pdev); if (drv) { if (drv->clk) clk_put(drv->clk); clk_put(drv->pclk); platform_set_drvdata(pdev, NULL); } return 0; } static int msm_iommu_ctx_parse_dt(struct platform_device *pdev, struct msm_iommu_ctx_drvdata *ctx_drvdata) { struct resource *r, rp; int irq, ret; irq = platform_get_irq(pdev, 0); if (irq > 0) { ret = request_threaded_irq(irq, NULL, msm_iommu_fault_handler_v2, IRQF_ONESHOT | IRQF_SHARED, "msm_iommu_nonsecure_irq", pdev); if (ret) { pr_err("Request IRQ %d failed with ret=%d\n", irq, ret); return ret; } } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) return -EINVAL; ret = of_address_to_resource(pdev->dev.parent->of_node, 0, &rp); if (ret) return -EINVAL; /* Calculate the context bank number using the base addresses. The * first 8 pages belong to the global address space which is followed * by the context banks, hence subtract by 8 to get the context bank * number. */ ctx_drvdata->num = ((r->start - rp.start) >> CTX_SHIFT) - 8; if (of_property_read_string(pdev->dev.of_node, "qcom,iommu-ctx-name", &ctx_drvdata->name)) ctx_drvdata->name = dev_name(&pdev->dev); return 0; } static int __devinit msm_iommu_ctx_probe(struct platform_device *pdev) { struct msm_iommu_ctx_drvdata *ctx_drvdata = NULL; int ret; if (!pdev->dev.parent) return -EINVAL; ctx_drvdata = devm_kzalloc(&pdev->dev, sizeof(*ctx_drvdata), GFP_KERNEL); if (!ctx_drvdata) return -ENOMEM; ctx_drvdata->pdev = pdev; INIT_LIST_HEAD(&ctx_drvdata->attached_elm); platform_set_drvdata(pdev, ctx_drvdata); ret = msm_iommu_ctx_parse_dt(pdev, ctx_drvdata); if (!ret) dev_info(&pdev->dev, "context %s using bank %d\n", dev_name(&pdev->dev), ctx_drvdata->num); return ret; } static int __devexit msm_iommu_ctx_remove(struct platform_device *pdev) { platform_set_drvdata(pdev, NULL); return 0; } static struct of_device_id msm_iommu_match_table[] = { { .compatible = "qcom,msm-smmu-v2", }, {} }; static struct platform_driver msm_iommu_driver = { .driver = { .name = "msm_iommu_v2", .of_match_table = msm_iommu_match_table, }, .probe = msm_iommu_probe, .remove = __devexit_p(msm_iommu_remove), }; static struct of_device_id msm_iommu_ctx_match_table[] = { { .name = "qcom,iommu-ctx", }, {} }; static struct platform_driver msm_iommu_ctx_driver = { .driver = { .name = "msm_iommu_ctx_v2", .of_match_table = msm_iommu_ctx_match_table, }, .probe = msm_iommu_ctx_probe, .remove = __devexit_p(msm_iommu_ctx_remove), }; static int __init msm_iommu_driver_init(void) { struct device_node *node; int ret; node = of_find_compatible_node(NULL, NULL, "qcom,msm-smmu-v2"); if (!node) return -ENODEV; of_node_put(node); msm_iommu_root_dev = platform_device_register_simple( "msm_iommu", -1, 0, 0); if (!msm_iommu_root_dev) { pr_err("Failed to create root IOMMU device\n"); ret = -ENODEV; goto error; } atomic_inc(&msm_iommu_next_id); ret = platform_driver_register(&msm_iommu_driver); if (ret != 0) { pr_err("Failed to register IOMMU driver\n"); goto error; } ret = platform_driver_register(&msm_iommu_ctx_driver); if (ret != 0) { pr_err("Failed to register IOMMU context driver\n"); goto error; } error: return ret; } static void __exit msm_iommu_driver_exit(void) { platform_driver_unregister(&msm_iommu_ctx_driver); platform_driver_unregister(&msm_iommu_driver); platform_device_unregister(msm_iommu_root_dev); } subsys_initcall(msm_iommu_driver_init); module_exit(msm_iommu_driver_exit); MODULE_LICENSE("GPL v2");
Flyhalf205/android_kernel_htc_t6
drivers/iommu/msm_iommu_dev-v2.c
C
gpl-2.0
6,940
/* * Copyright (C) 1996 Linus Torvalds & author (see below) */ /* * ALI M14xx chipset EIDE controller * * Works for ALI M1439/1443/1445/1487/1489 chipsets. * * Adapted from code developed by derekn@vw.ece.cmu.edu. -ml * Derek's notes follow: * * I think the code should be pretty understandable, * but I'll be happy to (try to) answer questions. * * The critical part is in the setupDrive function. The initRegisters * function doesn't seem to be necessary, but the DOS driver does it, so * I threw it in. * * I've only tested this on my system, which only has one disk. I posted * it to comp.sys.linux.hardware, so maybe some other people will try it * out. * * Derek Noonburg (derekn@ece.cmu.edu) * 95-sep-26 * * Update 96-jul-13: * * I've since upgraded to two disks and a CD-ROM, with no trouble, and * I've also heard from several others who have used it successfully. * This driver appears to work with both the 1443/1445 and the 1487/1489 * chipsets. I've added support for PIO mode 4 for the 1487. This * seems to work just fine on the 1443 also, although I'm not sure it's * advertised as supporting mode 4. (I've been running a WDC AC21200 in * mode 4 for a while now with no trouble.) -Derek */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/ioport.h> #include <linux/blkdev.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/io.h> #define DRV_NAME "ali14xx" /* port addresses for auto-detection */ #define ALI_NUM_PORTS 4 static const int ports[ALI_NUM_PORTS] __initdata = { 0x074, 0x0f4, 0x034, 0x0e4 }; /* register initialization data */ typedef struct { u8 reg, data; } RegInitializer; static const RegInitializer initData[] __initdata = { {0x01, 0x0f}, {0x02, 0x00}, {0x03, 0x00}, {0x04, 0x00}, {0x05, 0x00}, {0x06, 0x00}, {0x07, 0x2b}, {0x0a, 0x0f}, {0x25, 0x00}, {0x26, 0x00}, {0x27, 0x00}, {0x28, 0x00}, {0x29, 0x00}, {0x2a, 0x00}, {0x2f, 0x00}, {0x2b, 0x00}, {0x2c, 0x00}, {0x2d, 0x00}, {0x2e, 0x00}, {0x30, 0x00}, {0x31, 0x00}, {0x32, 0x00}, {0x33, 0x00}, {0x34, 0xff}, {0x35, 0x03}, {0x00, 0x00} }; /* timing parameter registers for each drive */ static struct { u8 reg1, reg2, reg3, reg4; } regTab[4] = { {0x03, 0x26, 0x04, 0x27}, /* drive 0 */ {0x05, 0x28, 0x06, 0x29}, /* drive 1 */ {0x2b, 0x30, 0x2c, 0x31}, /* drive 2 */ {0x2d, 0x32, 0x2e, 0x33}, /* drive 3 */ }; static int basePort; /* base port address */ static int regPort; /* port for register number */ static int dataPort; /* port for register data */ static u8 regOn; /* output to base port to access registers */ static u8 regOff; /* output to base port to close registers */ /*------------------------------------------------------------------------*/ /* * Read a controller register. */ static inline u8 inReg(u8 reg) { outb_p(reg, regPort); return inb(dataPort); } /* * Write a controller register. */ static void outReg(u8 data, u8 reg) { outb_p(reg, regPort); outb_p(data, dataPort); } static DEFINE_SPINLOCK(ali14xx_lock); /* * Set PIO mode for the specified drive. * This function computes timing parameters * and sets controller registers accordingly. */ static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio) { int driveNum; int time1, time2; u8 param1, param2, param3, param4; unsigned long flags; int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50; struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); /* calculate timing, according to PIO mode */ time1 = ide_pio_cycle_time(drive, pio); time2 = t->active; param3 = param1 = (time2 * bus_speed + 999) / 1000; param4 = param2 = (time1 * bus_speed + 999) / 1000 - param1; if (pio < 3) { param3 += 8; param4 += 8; } printk(KERN_DEBUG "%s: PIO mode%d, t1=%dns, t2=%dns, cycles = %d+%d, %d+%d\n", drive->name, pio, time1, time2, param1, param2, param3, param4); /* stuff timing parameters into controller registers */ driveNum = (drive->hwif->index << 1) + (drive->dn & 1); spin_lock_irqsave(&ali14xx_lock, flags); outb_p(regOn, basePort); outReg(param1, regTab[driveNum].reg1); outReg(param2, regTab[driveNum].reg2); outReg(param3, regTab[driveNum].reg3); outReg(param4, regTab[driveNum].reg4); outb_p(regOff, basePort); spin_unlock_irqrestore(&ali14xx_lock, flags); } /* * Auto-detect the IDE controller port. */ static int __init findPort(void) { int i; u8 t; unsigned long flags; local_irq_save(flags); for (i = 0; i < ALI_NUM_PORTS; ++i) { basePort = ports[i]; regOff = inb(basePort); for (regOn = 0x30; regOn <= 0x33; ++regOn) { outb_p(regOn, basePort); if (inb(basePort) == regOn) { regPort = basePort + 4; dataPort = basePort + 8; t = inReg(0) & 0xf0; outb_p(regOff, basePort); local_irq_restore(flags); if (t != 0x50) return 0; return 1; /* success */ } } outb_p(regOff, basePort); } local_irq_restore(flags); return 0; } /* * Initialize controller registers with default values. */ static int __init initRegisters(void) { const RegInitializer *p; u8 t; unsigned long flags; local_irq_save(flags); outb_p(regOn, basePort); for (p = initData; p->reg != 0; ++p) outReg(p->data, p->reg); outb_p(0x01, regPort); t = inb(regPort) & 0x01; outb_p(regOff, basePort); local_irq_restore(flags); return t; } static const struct ide_port_ops ali14xx_port_ops = { .set_pio_mode = ali14xx_set_pio_mode, }; static const struct ide_port_info ali14xx_port_info = { .name = DRV_NAME, .chipset = ide_ali14xx, .port_ops = &ali14xx_port_ops, .host_flags = IDE_HFLAG_NO_DMA, .pio_mask = ATA_PIO4, }; static int __init ali14xx_probe(void) { printk(KERN_DEBUG "ali14xx: base=0x%03x, regOn=0x%02x.\n", basePort, regOn); /* initialize controller registers */ if (!initRegisters()) { printk(KERN_ERR "ali14xx: Chip initialization failed.\n"); return 1; } return ide_legacy_device_add(&ali14xx_port_info, 0); } static int probe_ali14xx; module_param_named(probe, probe_ali14xx, bool, 0); MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets"); static int __init ali14xx_init(void) { if (probe_ali14xx == 0) goto out; /* auto-detect IDE controller port */ if (findPort()) { if (ali14xx_probe()) return -ENODEV; return 0; } printk(KERN_ERR "ali14xx: not found.\n"); out: return -ENODEV; } module_init(ali14xx_init); MODULE_AUTHOR("see local file"); MODULE_DESCRIPTION("support of ALI 14XX IDE chipsets"); MODULE_LICENSE("GPL");
kfazz/android_kernel_motorola_sholes
drivers/ide/ali14xx.c
C
gpl-2.0
6,564
/* * Hypervisor filesystem for Linux on s390. z/VM implementation. * * Copyright (C) IBM Corp. 2006 * Author(s): Michael Holzheu <holzheu@de.ibm.com> */ #include <linux/types.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <asm/ebcdic.h> #include "hypfs.h" #define NAME_LEN 8 static char local_guest[] = " "; static char all_guests[] = "* "; static char *guest_query; struct diag2fc_data { __u32 version; __u32 flags; __u64 used_cpu; __u64 el_time; __u64 mem_min_kb; __u64 mem_max_kb; __u64 mem_share_kb; __u64 mem_used_kb; __u32 pcpus; __u32 lcpus; __u32 vcpus; __u32 cpu_min; __u32 cpu_max; __u32 cpu_shares; __u32 cpu_use_samp; __u32 cpu_delay_samp; __u32 page_wait_samp; __u32 idle_samp; __u32 other_samp; __u32 total_samp; char guest_name[NAME_LEN]; }; struct diag2fc_parm_list { char userid[NAME_LEN]; char aci_grp[NAME_LEN]; __u64 addr; __u32 size; __u32 fmt; }; static int diag2fc(int size, char* query, void *addr) { unsigned long residual_cnt; unsigned long rc; struct diag2fc_parm_list parm_list; memcpy(parm_list.userid, query, NAME_LEN); ASCEBC(parm_list.userid, NAME_LEN); parm_list.addr = (unsigned long) addr ; parm_list.size = size; parm_list.fmt = 0x02; memset(parm_list.aci_grp, 0x40, NAME_LEN); rc = -1; asm volatile( " diag %0,%1,0x2fc\n" "0:\n" EX_TABLE(0b,0b) : "=d" (residual_cnt), "+d" (rc) : "0" (&parm_list) : "memory"); if ((rc != 0 ) && (rc != -2)) return rc; else return -residual_cnt; } static struct diag2fc_data *diag2fc_store(char *query, int *count) { int size; struct diag2fc_data *data; do { size = diag2fc(0, query, NULL); if (size < 0) return ERR_PTR(-EACCES); data = vmalloc(size); if (!data) return ERR_PTR(-ENOMEM); if (diag2fc(size, query, data) == 0) break; vfree(data); } while (1); *count = (size / sizeof(*data)); return data; } static void diag2fc_free(void *data) { vfree(data); } #define ATTRIBUTE(sb, dir, name, member) \ do { \ void *rc; \ rc = hypfs_create_u64(sb, dir, name, member); \ if (IS_ERR(rc)) \ return PTR_ERR(rc); \ } while(0) static int hpyfs_vm_create_guest(struct super_block *sb, struct dentry *systems_dir, struct diag2fc_data *data) { char guest_name[NAME_LEN + 1] = {}; struct dentry *guest_dir, *cpus_dir, *samples_dir, *mem_dir; int dedicated_flag, capped_value; capped_value = (data->flags & 0x00000006) >> 1; dedicated_flag = (data->flags & 0x00000008) >> 3; /* guest dir */ memcpy(guest_name, data->guest_name, NAME_LEN); EBCASC(guest_name, NAME_LEN); strstrip(guest_name); guest_dir = hypfs_mkdir(sb, systems_dir, guest_name); if (IS_ERR(guest_dir)) return PTR_ERR(guest_dir); ATTRIBUTE(sb, guest_dir, "onlinetime_us", data->el_time); /* logical cpu information */ cpus_dir = hypfs_mkdir(sb, guest_dir, "cpus"); if (IS_ERR(cpus_dir)) return PTR_ERR(cpus_dir); ATTRIBUTE(sb, cpus_dir, "cputime_us", data->used_cpu); ATTRIBUTE(sb, cpus_dir, "capped", capped_value); ATTRIBUTE(sb, cpus_dir, "dedicated", dedicated_flag); ATTRIBUTE(sb, cpus_dir, "count", data->vcpus); ATTRIBUTE(sb, cpus_dir, "weight_min", data->cpu_min); ATTRIBUTE(sb, cpus_dir, "weight_max", data->cpu_max); ATTRIBUTE(sb, cpus_dir, "weight_cur", data->cpu_shares); /* memory information */ mem_dir = hypfs_mkdir(sb, guest_dir, "mem"); if (IS_ERR(mem_dir)) return PTR_ERR(mem_dir); ATTRIBUTE(sb, mem_dir, "min_KiB", data->mem_min_kb); ATTRIBUTE(sb, mem_dir, "max_KiB", data->mem_max_kb); ATTRIBUTE(sb, mem_dir, "used_KiB", data->mem_used_kb); ATTRIBUTE(sb, mem_dir, "share_KiB", data->mem_share_kb); /* samples */ samples_dir = hypfs_mkdir(sb, guest_dir, "samples"); if (IS_ERR(samples_dir)) return PTR_ERR(samples_dir); ATTRIBUTE(sb, samples_dir, "cpu_using", data->cpu_use_samp); ATTRIBUTE(sb, samples_dir, "cpu_delay", data->cpu_delay_samp); ATTRIBUTE(sb, samples_dir, "mem_delay", data->page_wait_samp); ATTRIBUTE(sb, samples_dir, "idle", data->idle_samp); ATTRIBUTE(sb, samples_dir, "other", data->other_samp); ATTRIBUTE(sb, samples_dir, "total", data->total_samp); return 0; } int hypfs_vm_create_files(struct super_block *sb, struct dentry *root) { struct dentry *dir, *file; struct diag2fc_data *data; int rc, i, count = 0; data = diag2fc_store(guest_query, &count); if (IS_ERR(data)) return PTR_ERR(data); /* Hpervisor Info */ dir = hypfs_mkdir(sb, root, "hyp"); if (IS_ERR(dir)) { rc = PTR_ERR(dir); goto failed; } file = hypfs_create_str(sb, dir, "type", "z/VM Hypervisor"); if (IS_ERR(file)) { rc = PTR_ERR(file); goto failed; } /* physical cpus */ dir = hypfs_mkdir(sb, root, "cpus"); if (IS_ERR(dir)) { rc = PTR_ERR(dir); goto failed; } file = hypfs_create_u64(sb, dir, "count", data->lcpus); if (IS_ERR(file)) { rc = PTR_ERR(file); goto failed; } /* guests */ dir = hypfs_mkdir(sb, root, "systems"); if (IS_ERR(dir)) { rc = PTR_ERR(dir); goto failed; } for (i = 0; i < count; i++) { rc = hpyfs_vm_create_guest(sb, dir, &(data[i])); if (rc) goto failed; } diag2fc_free(data); return 0; failed: diag2fc_free(data); return rc; } int hypfs_vm_init(void) { if (diag2fc(0, all_guests, NULL) > 0) guest_query = all_guests; else if (diag2fc(0, local_guest, NULL) > 0) guest_query = local_guest; else return -EACCES; return 0; }
j0n3lson/linux-2.6.22.19-cs370
arch/s390/hypfs/hypfs_vm.c
C
gpl-2.0
5,387
/* * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/bug.h> #include <linux/device.h> #include <linux/kernel.h> #include <soc/tegra/fuse.h> #include "fuse.h" #define CPU_SPEEDO_LSBIT 20 #define CPU_SPEEDO_MSBIT 29 #define CPU_SPEEDO_REDUND_LSBIT 30 #define CPU_SPEEDO_REDUND_MSBIT 39 #define CPU_SPEEDO_REDUND_OFFS (CPU_SPEEDO_REDUND_MSBIT - CPU_SPEEDO_MSBIT) #define CORE_SPEEDO_LSBIT 40 #define CORE_SPEEDO_MSBIT 47 #define CORE_SPEEDO_REDUND_LSBIT 48 #define CORE_SPEEDO_REDUND_MSBIT 55 #define CORE_SPEEDO_REDUND_OFFS (CORE_SPEEDO_REDUND_MSBIT - CORE_SPEEDO_MSBIT) #define SPEEDO_MULT 4 #define PROCESS_CORNERS_NUM 4 #define SPEEDO_ID_SELECT_0(rev) ((rev) <= 2) #define SPEEDO_ID_SELECT_1(sku) \ (((sku) != 20) && ((sku) != 23) && ((sku) != 24) && \ ((sku) != 27) && ((sku) != 28)) enum { SPEEDO_ID_0, SPEEDO_ID_1, SPEEDO_ID_2, SPEEDO_ID_COUNT, }; static const u32 __initconst cpu_process_speedos[][PROCESS_CORNERS_NUM] = { {315, 366, 420, UINT_MAX}, {303, 368, 419, UINT_MAX}, {316, 331, 383, UINT_MAX}, }; static const u32 __initconst core_process_speedos[][PROCESS_CORNERS_NUM] = { {165, 195, 224, UINT_MAX}, {165, 195, 224, UINT_MAX}, {165, 195, 224, UINT_MAX}, }; void __init tegra20_init_speedo_data(struct tegra_sku_info *sku_info) { u32 reg; u32 val; int i; BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) != SPEEDO_ID_COUNT); BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) != SPEEDO_ID_COUNT); if (SPEEDO_ID_SELECT_0(sku_info->revision)) sku_info->soc_speedo_id = SPEEDO_ID_0; else if (SPEEDO_ID_SELECT_1(sku_info->sku_id)) sku_info->soc_speedo_id = SPEEDO_ID_1; else sku_info->soc_speedo_id = SPEEDO_ID_2; val = 0; for (i = CPU_SPEEDO_MSBIT; i >= CPU_SPEEDO_LSBIT; i--) { reg = tegra20_spare_fuse_early(i) | tegra20_spare_fuse_early(i + CPU_SPEEDO_REDUND_OFFS); val = (val << 1) | (reg & 0x1); } val = val * SPEEDO_MULT; pr_debug("Tegra CPU speedo value %u\n", val); for (i = 0; i < (PROCESS_CORNERS_NUM - 1); i++) { if (val <= cpu_process_speedos[sku_info->soc_speedo_id][i]) break; } sku_info->cpu_process_id = i; val = 0; for (i = CORE_SPEEDO_MSBIT; i >= CORE_SPEEDO_LSBIT; i--) { reg = tegra20_spare_fuse_early(i) | tegra20_spare_fuse_early(i + CORE_SPEEDO_REDUND_OFFS); val = (val << 1) | (reg & 0x1); } val = val * SPEEDO_MULT; pr_debug("Core speedo value %u\n", val); for (i = 0; i < (PROCESS_CORNERS_NUM - 1); i++) { if (val <= core_process_speedos[sku_info->soc_speedo_id][i]) break; } sku_info->core_process_id = i; }
snegovick/linux
drivers/soc/tegra/fuse/speedo-tegra20.c
C
gpl-2.0
3,170
/* * C-Brick Serial Port (and console) driver for SGI Altix machines. * * This driver is NOT suitable for talking to the l1-controller for * anything other than 'console activities' --- please use the l1 * driver for that. * * * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Further, this software is distributed without any warranty that it is * free of the rightful claim of any third person regarding infringement * or the like. Any license provided herein, whether implied or * otherwise, applies only to this software file. Patent licenses, if * any, provided herein do not apply to combinations of this program with * other software, or any other product whatsoever. * * You should have received a copy of the GNU General Public * License along with this program; if not, write the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Contact information: Silicon Graphics, Inc., 1500 Crittenden Lane, * Mountain View, CA 94043, or: * * http://www.sgi.com * * For further information regarding this notice, see: * * http://oss.sgi.com/projects/GenInfo/NoticeExplan */ #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial.h> #include <linux/console.h> #include <linux/module.h> #include <linux/sysrq.h> #include <linux/circ_buf.h> #include <linux/serial_reg.h> #include <linux/delay.h> /* for mdelay */ #include <linux/miscdevice.h> #include <linux/serial_core.h> #include <asm/io.h> #include <asm/sn/simulator.h> #include <asm/sn/sn_sal.h> /* number of characters we can transmit to the SAL console at a time */ #define SN_SAL_MAX_CHARS 120 /* 64K, when we're asynch, it must be at least printk's LOG_BUF_LEN to * avoid losing chars, (always has to be a power of 2) */ #define SN_SAL_BUFFER_SIZE (64 * (1 << 10)) #define SN_SAL_UART_FIFO_DEPTH 16 #define SN_SAL_UART_FIFO_SPEED_CPS (9600/10) /* sn_transmit_chars() calling args */ #define TRANSMIT_BUFFERED 0 #define TRANSMIT_RAW 1 /* To use dynamic numbers only and not use the assigned major and minor, * define the following.. */ /* #define USE_DYNAMIC_MINOR 1 *//* use dynamic minor number */ #define USE_DYNAMIC_MINOR 0 /* Don't rely on misc_register dynamic minor */ /* Device name we're using */ #define DEVICE_NAME "ttySG" #define DEVICE_NAME_DYNAMIC "ttySG0" /* need full name for misc_register */ /* The major/minor we are using, ignored for USE_DYNAMIC_MINOR */ #define DEVICE_MAJOR 204 #define DEVICE_MINOR 40 #ifdef CONFIG_MAGIC_SYSRQ static char sysrq_serial_str[] = "\eSYS"; static char *sysrq_serial_ptr = sysrq_serial_str; static unsigned long sysrq_requested; #endif /* CONFIG_MAGIC_SYSRQ */ /* * Port definition - this kinda drives it all */ struct sn_cons_port { struct timer_list sc_timer; struct uart_port sc_port; struct sn_sal_ops { int (*sal_puts_raw) (const char *s, int len); int (*sal_puts) (const char *s, int len); int (*sal_getc) (void); int (*sal_input_pending) (void); void (*sal_wakeup_transmit) (struct sn_cons_port *, int); } *sc_ops; unsigned long sc_interrupt_timeout; int sc_is_asynch; }; static struct sn_cons_port sal_console_port; static int sn_process_input; /* Only used if USE_DYNAMIC_MINOR is set to 1 */ static struct miscdevice misc; /* used with misc_register for dynamic */ extern void early_sn_setup(void); #undef DEBUG #ifdef DEBUG static int sn_debug_printf(const char *fmt, ...); #define DPRINTF(x...) sn_debug_printf(x) #else #define DPRINTF(x...) do { } while (0) #endif /* Prototypes */ static int snt_hw_puts_raw(const char *, int); static int snt_hw_puts_buffered(const char *, int); static int snt_poll_getc(void); static int snt_poll_input_pending(void); static int snt_intr_getc(void); static int snt_intr_input_pending(void); static void sn_transmit_chars(struct sn_cons_port *, int); /* A table for polling: */ static struct sn_sal_ops poll_ops = { .sal_puts_raw = snt_hw_puts_raw, .sal_puts = snt_hw_puts_raw, .sal_getc = snt_poll_getc, .sal_input_pending = snt_poll_input_pending }; /* A table for interrupts enabled */ static struct sn_sal_ops intr_ops = { .sal_puts_raw = snt_hw_puts_raw, .sal_puts = snt_hw_puts_buffered, .sal_getc = snt_intr_getc, .sal_input_pending = snt_intr_input_pending, .sal_wakeup_transmit = sn_transmit_chars }; /* the console does output in two distinctly different ways: * synchronous (raw) and asynchronous (buffered). initially, early_printk * does synchronous output. any data written goes directly to the SAL * to be output (incidentally, it is internally buffered by the SAL) * after interrupts and timers are initialized and available for use, * the console init code switches to asynchronous output. this is * also the earliest opportunity to begin polling for console input. * after console initialization, console output and tty (serial port) * output is buffered and sent to the SAL asynchronously (either by * timer callback or by UART interrupt) */ /* routines for running the console in polling mode */ /** * snt_poll_getc - Get a character from the console in polling mode * */ static int snt_poll_getc(void) { int ch; ia64_sn_console_getc(&ch); return ch; } /** * snt_poll_input_pending - Check if any input is waiting - polling mode. * */ static int snt_poll_input_pending(void) { int status, input; status = ia64_sn_console_check(&input); return !status && input; } /* routines for an interrupt driven console (normal) */ /** * snt_intr_getc - Get a character from the console, interrupt mode * */ static int snt_intr_getc(void) { return ia64_sn_console_readc(); } /** * snt_intr_input_pending - Check if input is pending, interrupt mode * */ static int snt_intr_input_pending(void) { return ia64_sn_console_intr_status() & SAL_CONSOLE_INTR_RECV; } /* these functions are polled and interrupt */ /** * snt_hw_puts_raw - Send raw string to the console, polled or interrupt mode * @s: String * @len: Length * */ static int snt_hw_puts_raw(const char *s, int len) { /* this will call the PROM and not return until this is done */ return ia64_sn_console_putb(s, len); } /** * snt_hw_puts_buffered - Send string to console, polled or interrupt mode * @s: String * @len: Length * */ static int snt_hw_puts_buffered(const char *s, int len) { /* queue data to the PROM */ return ia64_sn_console_xmit_chars((char *)s, len); } /* uart interface structs * These functions are associated with the uart_port that the serial core * infrastructure calls. * * Note: Due to how the console works, many routines are no-ops. */ /** * snp_type - What type of console are we? * @port: Port to operate with (we ignore since we only have one port) * */ static const char *snp_type(struct uart_port *port) { return ("SGI SN L1"); } /** * snp_tx_empty - Is the transmitter empty? We pretend we're always empty * @port: Port to operate on (we ignore since we only have one port) * */ static unsigned int snp_tx_empty(struct uart_port *port) { return 1; } /** * snp_stop_tx - stop the transmitter - no-op for us * @port: Port to operat eon - we ignore - no-op function * */ static void snp_stop_tx(struct uart_port *port) { } /** * snp_release_port - Free i/o and resources for port - no-op for us * @port: Port to operate on - we ignore - no-op function * */ static void snp_release_port(struct uart_port *port) { } /** * snp_shutdown - shut down the port - free irq and disable - no-op for us * @port: Port to shut down - we ignore * */ static void snp_shutdown(struct uart_port *port) { } /** * snp_set_mctrl - set control lines (dtr, rts, etc) - no-op for our console * @port: Port to operate on - we ignore * @mctrl: Lines to set/unset - we ignore * */ static void snp_set_mctrl(struct uart_port *port, unsigned int mctrl) { } /** * snp_get_mctrl - get contorl line info, we just return a static value * @port: port to operate on - we only have one port so we ignore this * */ static unsigned int snp_get_mctrl(struct uart_port *port) { return TIOCM_CAR | TIOCM_RNG | TIOCM_DSR | TIOCM_CTS; } /** * snp_stop_rx - Stop the receiver - we ignor ethis * @port: Port to operate on - we ignore * */ static void snp_stop_rx(struct uart_port *port) { } /** * snp_start_tx - Start transmitter * @port: Port to operate on * */ static void snp_start_tx(struct uart_port *port) { if (sal_console_port.sc_ops->sal_wakeup_transmit) sal_console_port.sc_ops->sal_wakeup_transmit(&sal_console_port, TRANSMIT_BUFFERED); } /** * snp_break_ctl - handle breaks - ignored by us * @port: Port to operate on * @break_state: Break state * */ static void snp_break_ctl(struct uart_port *port, int break_state) { } /** * snp_startup - Start up the serial port - always return 0 (We're always on) * @port: Port to operate on * */ static int snp_startup(struct uart_port *port) { return 0; } /** * snp_set_termios - set termios stuff - we ignore these * @port: port to operate on * @termios: New settings * @termios: Old * */ static void snp_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { } /** * snp_request_port - allocate resources for port - ignored by us * @port: port to operate on * */ static int snp_request_port(struct uart_port *port) { return 0; } /** * snp_config_port - allocate resources, set up - we ignore, we're always on * @port: Port to operate on * @flags: flags used for port setup * */ static void snp_config_port(struct uart_port *port, int flags) { } /* Associate the uart functions above - given to serial core */ static struct uart_ops sn_console_ops = { .tx_empty = snp_tx_empty, .set_mctrl = snp_set_mctrl, .get_mctrl = snp_get_mctrl, .stop_tx = snp_stop_tx, .start_tx = snp_start_tx, .stop_rx = snp_stop_rx, .break_ctl = snp_break_ctl, .startup = snp_startup, .shutdown = snp_shutdown, .set_termios = snp_set_termios, .pm = NULL, .type = snp_type, .release_port = snp_release_port, .request_port = snp_request_port, .config_port = snp_config_port, .verify_port = NULL, }; /* End of uart struct functions and defines */ #ifdef DEBUG /** * sn_debug_printf - close to hardware debugging printf * @fmt: printf format * * This is as "close to the metal" as we can get, used when the driver * itself may be broken. * */ static int sn_debug_printf(const char *fmt, ...) { static char printk_buf[1024]; int printed_len; va_list args; va_start(args, fmt); printed_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args); if (!sal_console_port.sc_ops) { sal_console_port.sc_ops = &poll_ops; early_sn_setup(); } sal_console_port.sc_ops->sal_puts_raw(printk_buf, printed_len); va_end(args); return printed_len; } #endif /* DEBUG */ /* * Interrupt handling routines. */ /** * sn_receive_chars - Grab characters, pass them to tty layer * @port: Port to operate on * @flags: irq flags * * Note: If we're not registered with the serial core infrastructure yet, * we don't try to send characters to it... * */ static void sn_receive_chars(struct sn_cons_port *port, unsigned long flags) { struct tty_port *tport = NULL; int ch; if (!port) { printk(KERN_ERR "sn_receive_chars - port NULL so can't receive\n"); return; } if (!port->sc_ops) { printk(KERN_ERR "sn_receive_chars - port->sc_ops NULL so can't receive\n"); return; } if (port->sc_port.state) { /* The serial_core stuffs are initialized, use them */ tport = &port->sc_port.state->port; } while (port->sc_ops->sal_input_pending()) { ch = port->sc_ops->sal_getc(); if (ch < 0) { printk(KERN_ERR "sn_console: An error occurred while " "obtaining data from the console (0x%0x)\n", ch); break; } #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_requested) { unsigned long sysrq_timeout = sysrq_requested + HZ*5; sysrq_requested = 0; if (ch && time_before(jiffies, sysrq_timeout)) { spin_unlock_irqrestore(&port->sc_port.lock, flags); handle_sysrq(ch); spin_lock_irqsave(&port->sc_port.lock, flags); /* ignore actual sysrq command char */ continue; } } if (ch == *sysrq_serial_ptr) { if (!(*++sysrq_serial_ptr)) { sysrq_requested = jiffies; sysrq_serial_ptr = sysrq_serial_str; } /* * ignore the whole sysrq string except for the * leading escape */ if (ch != '\e') continue; } else sysrq_serial_ptr = sysrq_serial_str; #endif /* CONFIG_MAGIC_SYSRQ */ /* record the character to pass up to the tty layer */ if (tport) { if (tty_insert_flip_char(tport, ch, TTY_NORMAL) == 0) break; } port->sc_port.icount.rx++; } if (tport) tty_flip_buffer_push(tport); } /** * sn_transmit_chars - grab characters from serial core, send off * @port: Port to operate on * @raw: Transmit raw or buffered * * Note: If we're early, before we're registered with serial core, the * writes are going through sn_sal_console_write because that's how * register_console has been set up. We currently could have asynch * polls calling this function due to sn_sal_switch_to_asynch but we can * ignore them until we register with the serial core stuffs. * */ static void sn_transmit_chars(struct sn_cons_port *port, int raw) { int xmit_count, tail, head, loops, ii; int result; char *start; struct circ_buf *xmit; if (!port) return; BUG_ON(!port->sc_is_asynch); if (port->sc_port.state) { /* We're initialized, using serial core infrastructure */ xmit = &port->sc_port.state->xmit; } else { /* Probably sn_sal_switch_to_asynch has been run but serial core isn't * initialized yet. Just return. Writes are going through * sn_sal_console_write (due to register_console) at this time. */ return; } if (uart_circ_empty(xmit) || uart_tx_stopped(&port->sc_port)) { /* Nothing to do. */ ia64_sn_console_intr_disable(SAL_CONSOLE_INTR_XMIT); return; } head = xmit->head; tail = xmit->tail; start = &xmit->buf[tail]; /* twice around gets the tail to the end of the buffer and * then to the head, if needed */ loops = (head < tail) ? 2 : 1; for (ii = 0; ii < loops; ii++) { xmit_count = (head < tail) ? (UART_XMIT_SIZE - tail) : (head - tail); if (xmit_count > 0) { if (raw == TRANSMIT_RAW) result = port->sc_ops->sal_puts_raw(start, xmit_count); else result = port->sc_ops->sal_puts(start, xmit_count); #ifdef DEBUG if (!result) DPRINTF("`"); #endif if (result > 0) { xmit_count -= result; port->sc_port.icount.tx += result; tail += result; tail &= UART_XMIT_SIZE - 1; xmit->tail = tail; start = &xmit->buf[tail]; } } } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&port->sc_port); if (uart_circ_empty(xmit)) snp_stop_tx(&port->sc_port); /* no-op for us */ } /** * sn_sal_interrupt - Handle console interrupts * @irq: irq #, useful for debug statements * @dev_id: our pointer to our port (sn_cons_port which contains the uart port) * */ static irqreturn_t sn_sal_interrupt(int irq, void *dev_id) { struct sn_cons_port *port = (struct sn_cons_port *)dev_id; unsigned long flags; int status = ia64_sn_console_intr_status(); if (!port) return IRQ_NONE; spin_lock_irqsave(&port->sc_port.lock, flags); if (status & SAL_CONSOLE_INTR_RECV) { sn_receive_chars(port, flags); } if (status & SAL_CONSOLE_INTR_XMIT) { sn_transmit_chars(port, TRANSMIT_BUFFERED); } spin_unlock_irqrestore(&port->sc_port.lock, flags); return IRQ_HANDLED; } /** * sn_sal_timer_poll - this function handles polled console mode * @data: A pointer to our sn_cons_port (which contains the uart port) * * data is the pointer that init_timer will store for us. This function is * associated with init_timer to see if there is any console traffic. * Obviously not used in interrupt mode * */ static void sn_sal_timer_poll(unsigned long data) { struct sn_cons_port *port = (struct sn_cons_port *)data; unsigned long flags; if (!port) return; if (!port->sc_port.irq) { spin_lock_irqsave(&port->sc_port.lock, flags); if (sn_process_input) sn_receive_chars(port, flags); sn_transmit_chars(port, TRANSMIT_RAW); spin_unlock_irqrestore(&port->sc_port.lock, flags); mod_timer(&port->sc_timer, jiffies + port->sc_interrupt_timeout); } } /* * Boot-time initialization code */ /** * sn_sal_switch_to_asynch - Switch to async mode (as opposed to synch) * @port: Our sn_cons_port (which contains the uart port) * * So this is used by sn_sal_serial_console_init (early on, before we're * registered with serial core). It's also used by sn_sal_module_init * right after we've registered with serial core. The later only happens * if we didn't already come through here via sn_sal_serial_console_init. * */ static void __init sn_sal_switch_to_asynch(struct sn_cons_port *port) { unsigned long flags; if (!port) return; DPRINTF("sn_console: about to switch to asynchronous console\n"); /* without early_printk, we may be invoked late enough to race * with other cpus doing console IO at this point, however * console interrupts will never be enabled */ spin_lock_irqsave(&port->sc_port.lock, flags); /* early_printk invocation may have done this for us */ if (!port->sc_ops) port->sc_ops = &poll_ops; /* we can't turn on the console interrupt (as request_irq * calls kmalloc, which isn't set up yet), so we rely on a * timer to poll for input and push data from the console * buffer. */ init_timer(&port->sc_timer); port->sc_timer.function = sn_sal_timer_poll; port->sc_timer.data = (unsigned long)port; if (IS_RUNNING_ON_SIMULATOR()) port->sc_interrupt_timeout = 6; else { /* 960cps / 16 char FIFO = 60HZ * HZ / (SN_SAL_FIFO_SPEED_CPS / SN_SAL_FIFO_DEPTH) */ port->sc_interrupt_timeout = HZ * SN_SAL_UART_FIFO_DEPTH / SN_SAL_UART_FIFO_SPEED_CPS; } mod_timer(&port->sc_timer, jiffies + port->sc_interrupt_timeout); port->sc_is_asynch = 1; spin_unlock_irqrestore(&port->sc_port.lock, flags); } /** * sn_sal_switch_to_interrupts - Switch to interrupt driven mode * @port: Our sn_cons_port (which contains the uart port) * * In sn_sal_module_init, after we're registered with serial core and * the port is added, this function is called to switch us to interrupt * mode. We were previously in asynch/polling mode (using init_timer). * * We attempt to switch to interrupt mode here by calling * request_irq. If that works out, we enable receive interrupts. */ static void __init sn_sal_switch_to_interrupts(struct sn_cons_port *port) { unsigned long flags; if (port) { DPRINTF("sn_console: switching to interrupt driven console\n"); if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt, IRQF_SHARED, "SAL console driver", port) >= 0) { spin_lock_irqsave(&port->sc_port.lock, flags); port->sc_port.irq = SGI_UART_VECTOR; port->sc_ops = &intr_ops; irq_set_handler(port->sc_port.irq, handle_level_irq); /* turn on receive interrupts */ ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV); spin_unlock_irqrestore(&port->sc_port.lock, flags); } else { printk(KERN_INFO "sn_console: console proceeding in polled mode\n"); } } } /* * Kernel console definitions */ static void sn_sal_console_write(struct console *, const char *, unsigned); static int sn_sal_console_setup(struct console *, char *); static struct uart_driver sal_console_uart; extern struct tty_driver *uart_console_device(struct console *, int *); static struct console sal_console = { .name = DEVICE_NAME, .write = sn_sal_console_write, .device = uart_console_device, .setup = sn_sal_console_setup, .index = -1, /* unspecified */ .data = &sal_console_uart, }; #define SAL_CONSOLE &sal_console static struct uart_driver sal_console_uart = { .owner = THIS_MODULE, .driver_name = "sn_console", .dev_name = DEVICE_NAME, .major = 0, /* major/minor set at registration time per USE_DYNAMIC_MINOR */ .minor = 0, .nr = 1, /* one port */ .cons = SAL_CONSOLE, }; /** * sn_sal_module_init - When the kernel loads us, get us rolling w/ serial core * * Before this is called, we've been printing kernel messages in a special * early mode not making use of the serial core infrastructure. When our * driver is loaded for real, we register the driver and port with serial * core and try to enable interrupt driven mode. * */ static int __init sn_sal_module_init(void) { int retval; if (!ia64_platform_is("sn2")) return 0; printk(KERN_INFO "sn_console: Console driver init\n"); if (USE_DYNAMIC_MINOR == 1) { misc.minor = MISC_DYNAMIC_MINOR; misc.name = DEVICE_NAME_DYNAMIC; retval = misc_register(&misc); if (retval != 0) { printk(KERN_WARNING "Failed to register console " "device using misc_register.\n"); return -ENODEV; } sal_console_uart.major = MISC_MAJOR; sal_console_uart.minor = misc.minor; } else { sal_console_uart.major = DEVICE_MAJOR; sal_console_uart.minor = DEVICE_MINOR; } /* We register the driver and the port before switching to interrupts * or async above so the proper uart structures are populated */ if (uart_register_driver(&sal_console_uart) < 0) { printk ("ERROR sn_sal_module_init failed uart_register_driver, line %d\n", __LINE__); return -ENODEV; } spin_lock_init(&sal_console_port.sc_port.lock); /* Setup the port struct with the minimum needed */ sal_console_port.sc_port.membase = (char *)1; /* just needs to be non-zero */ sal_console_port.sc_port.type = PORT_16550A; sal_console_port.sc_port.fifosize = SN_SAL_MAX_CHARS; sal_console_port.sc_port.ops = &sn_console_ops; sal_console_port.sc_port.line = 0; if (uart_add_one_port(&sal_console_uart, &sal_console_port.sc_port) < 0) { /* error - not sure what I'd do - so I'll do nothing */ printk(KERN_ERR "%s: unable to add port\n", __func__); } /* when this driver is compiled in, the console initialization * will have already switched us into asynchronous operation * before we get here through the module initcalls */ if (!sal_console_port.sc_is_asynch) { sn_sal_switch_to_asynch(&sal_console_port); } /* at this point (module_init) we can try to turn on interrupts */ if (!IS_RUNNING_ON_SIMULATOR()) { sn_sal_switch_to_interrupts(&sal_console_port); } sn_process_input = 1; return 0; } /** * sn_sal_module_exit - When we're unloaded, remove the driver/port * */ static void __exit sn_sal_module_exit(void) { del_timer_sync(&sal_console_port.sc_timer); uart_remove_one_port(&sal_console_uart, &sal_console_port.sc_port); uart_unregister_driver(&sal_console_uart); misc_deregister(&misc); } module_init(sn_sal_module_init); module_exit(sn_sal_module_exit); /** * puts_raw_fixed - sn_sal_console_write helper for adding \r's as required * @puts_raw : puts function to do the writing * @s: input string * @count: length * * We need a \r ahead of every \n for direct writes through * ia64_sn_console_putb (what sal_puts_raw below actually does). * */ static void puts_raw_fixed(int (*puts_raw) (const char *s, int len), const char *s, int count) { const char *s1; /* Output '\r' before each '\n' */ while ((s1 = memchr(s, '\n', count)) != NULL) { puts_raw(s, s1 - s); puts_raw("\r\n", 2); count -= s1 + 1 - s; s = s1 + 1; } puts_raw(s, count); } /** * sn_sal_console_write - Print statements before serial core available * @console: Console to operate on - we ignore since we have just one * @s: String to send * @count: length * * This is referenced in the console struct. It is used for early * console printing before we register with serial core and for things * such as kdb. The console_lock must be held when we get here. * * This function has some code for trying to print output even if the lock * is held. We try to cover the case where a lock holder could have died. * We don't use this special case code if we're not registered with serial * core yet. After we're registered with serial core, the only time this * function would be used is for high level kernel output like magic sys req, * kdb, and printk's. */ static void sn_sal_console_write(struct console *co, const char *s, unsigned count) { unsigned long flags = 0; struct sn_cons_port *port = &sal_console_port; static int stole_lock = 0; BUG_ON(!port->sc_is_asynch); /* We can't look at the xmit buffer if we're not registered with serial core * yet. So only do the fancy recovery after registering */ if (!port->sc_port.state) { /* Not yet registered with serial core - simple case */ puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); return; } /* somebody really wants this output, might be an * oops, kdb, panic, etc. make sure they get it. */ if (spin_is_locked(&port->sc_port.lock)) { int lhead = port->sc_port.state->xmit.head; int ltail = port->sc_port.state->xmit.tail; int counter, got_lock = 0; /* * We attempt to determine if someone has died with the * lock. We wait ~20 secs after the head and tail ptrs * stop moving and assume the lock holder is not functional * and plow ahead. If the lock is freed within the time out * period we re-get the lock and go ahead normally. We also * remember if we have plowed ahead so that we don't have * to wait out the time out period again - the asumption * is that we will time out again. */ for (counter = 0; counter < 150; mdelay(125), counter++) { if (!spin_is_locked(&port->sc_port.lock) || stole_lock) { if (!stole_lock) { spin_lock_irqsave(&port->sc_port.lock, flags); got_lock = 1; } break; } else { /* still locked */ if ((lhead != port->sc_port.state->xmit.head) || (ltail != port->sc_port.state->xmit.tail)) { lhead = port->sc_port.state->xmit.head; ltail = port->sc_port.state->xmit.tail; counter = 0; } } } /* flush anything in the serial core xmit buffer, raw */ sn_transmit_chars(port, 1); if (got_lock) { spin_unlock_irqrestore(&port->sc_port.lock, flags); stole_lock = 0; } else { /* fell thru */ stole_lock = 1; } puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); } else { stole_lock = 0; spin_lock_irqsave(&port->sc_port.lock, flags); sn_transmit_chars(port, 1); spin_unlock_irqrestore(&port->sc_port.lock, flags); puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); } } /** * sn_sal_console_setup - Set up console for early printing * @co: Console to work with * @options: Options to set * * Altix console doesn't do anything with baud rates, etc, anyway. * * This isn't required since not providing the setup function in the * console struct is ok. However, other patches like KDB plop something * here so providing it is easier. * */ static int sn_sal_console_setup(struct console *co, char *options) { return 0; } /** * sn_sal_console_write_early - simple early output routine * @co - console struct * @s - string to print * @count - count * * Simple function to provide early output, before even * sn_sal_serial_console_init is called. Referenced in the * console struct registerd in sn_serial_console_early_setup. * */ static void __init sn_sal_console_write_early(struct console *co, const char *s, unsigned count) { puts_raw_fixed(sal_console_port.sc_ops->sal_puts_raw, s, count); } /* Used for very early console printing - again, before * sn_sal_serial_console_init is run */ static struct console sal_console_early __initdata = { .name = "sn_sal", .write = sn_sal_console_write_early, .flags = CON_PRINTBUFFER, .index = -1, }; /** * sn_serial_console_early_setup - Sets up early console output support * * Register a console early on... This is for output before even * sn_sal_serial_cosnole_init is called. This function is called from * setup.c. This allows us to do really early polled writes. When * sn_sal_serial_console_init is called, this console is unregistered * and a new one registered. */ int __init sn_serial_console_early_setup(void) { if (!ia64_platform_is("sn2")) return -1; sal_console_port.sc_ops = &poll_ops; spin_lock_init(&sal_console_port.sc_port.lock); early_sn_setup(); /* Find SAL entry points */ register_console(&sal_console_early); return 0; } /** * sn_sal_serial_console_init - Early console output - set up for register * * This function is called when regular console init happens. Because we * support even earlier console output with sn_serial_console_early_setup * (called from setup.c directly), this function unregisters the really * early console. * * Note: Even if setup.c doesn't register sal_console_early, unregistering * it here doesn't hurt anything. * */ static int __init sn_sal_serial_console_init(void) { if (ia64_platform_is("sn2")) { sn_sal_switch_to_asynch(&sal_console_port); DPRINTF("sn_sal_serial_console_init : register console\n"); register_console(&sal_console); unregister_console(&sal_console_early); } return 0; } console_initcall(sn_sal_serial_console_init);
bheu/raspberrypi_linux
drivers/tty/serial/sn_console.c
C
gpl-2.0
29,946
/* * arch/arm/mach-at91/at91sam9260_devices.c * * Copyright (C) 2006 Atmel * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/i2c-gpio.h> #include <mach/board.h> #include <mach/cpu.h> #include <mach/at91sam9260.h> #include <mach/at91sam9260_matrix.h> #include <mach/at91_matrix.h> #include <mach/at91sam9_smc.h> #include "generic.h" /* -------------------------------------------------------------------- * USB Host * -------------------------------------------------------------------- */ #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) static u64 ohci_dmamask = DMA_BIT_MASK(32); static struct at91_usbh_data usbh_data; static struct resource usbh_resources[] = { [0] = { .start = AT91SAM9260_UHP_BASE, .end = AT91SAM9260_UHP_BASE + SZ_1M - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_UHP, .end = AT91SAM9260_ID_UHP, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91_usbh_device = { .name = "at91_ohci", .id = -1, .dev = { .dma_mask = &ohci_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &usbh_data, }, .resource = usbh_resources, .num_resources = ARRAY_SIZE(usbh_resources), }; void __init at91_add_device_usbh(struct at91_usbh_data *data) { int i; if (!data) return; /* Enable overcurrent notification */ for (i = 0; i < data->ports; i++) { if (data->overcurrent_pin[i]) at91_set_gpio_input(data->overcurrent_pin[i], 1); } usbh_data = *data; platform_device_register(&at91_usbh_device); } #else void __init at91_add_device_usbh(struct at91_usbh_data *data) {} #endif /* -------------------------------------------------------------------- * USB Device (Gadget) * -------------------------------------------------------------------- */ #if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE) static struct at91_udc_data udc_data; static struct resource udc_resources[] = { [0] = { .start = AT91SAM9260_BASE_UDP, .end = AT91SAM9260_BASE_UDP + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_UDP, .end = AT91SAM9260_ID_UDP, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91_udc_device = { .name = "at91_udc", .id = -1, .dev = { .platform_data = &udc_data, }, .resource = udc_resources, .num_resources = ARRAY_SIZE(udc_resources), }; void __init at91_add_device_udc(struct at91_udc_data *data) { if (!data) return; if (gpio_is_valid(data->vbus_pin)) { at91_set_gpio_input(data->vbus_pin, 0); at91_set_deglitch(data->vbus_pin, 1); } /* Pullup pin is handled internally by USB device peripheral */ udc_data = *data; platform_device_register(&at91_udc_device); } #else void __init at91_add_device_udc(struct at91_udc_data *data) {} #endif /* -------------------------------------------------------------------- * Ethernet * -------------------------------------------------------------------- */ #if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE) static u64 eth_dmamask = DMA_BIT_MASK(32); static struct macb_platform_data eth_data; static struct resource eth_resources[] = { [0] = { .start = AT91SAM9260_BASE_EMAC, .end = AT91SAM9260_BASE_EMAC + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_EMAC, .end = AT91SAM9260_ID_EMAC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_eth_device = { .name = "macb", .id = -1, .dev = { .dma_mask = &eth_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &eth_data, }, .resource = eth_resources, .num_resources = ARRAY_SIZE(eth_resources), }; void __init at91_add_device_eth(struct macb_platform_data *data) { if (!data) return; if (gpio_is_valid(data->phy_irq_pin)) { at91_set_gpio_input(data->phy_irq_pin, 0); at91_set_deglitch(data->phy_irq_pin, 1); } /* Pins used for MII and RMII */ at91_set_A_periph(AT91_PIN_PA19, 0); /* ETXCK_EREFCK */ at91_set_A_periph(AT91_PIN_PA17, 0); /* ERXDV */ at91_set_A_periph(AT91_PIN_PA14, 0); /* ERX0 */ at91_set_A_periph(AT91_PIN_PA15, 0); /* ERX1 */ at91_set_A_periph(AT91_PIN_PA18, 0); /* ERXER */ at91_set_A_periph(AT91_PIN_PA16, 0); /* ETXEN */ at91_set_A_periph(AT91_PIN_PA12, 0); /* ETX0 */ at91_set_A_periph(AT91_PIN_PA13, 0); /* ETX1 */ at91_set_A_periph(AT91_PIN_PA21, 0); /* EMDIO */ at91_set_A_periph(AT91_PIN_PA20, 0); /* EMDC */ if (!data->is_rmii) { at91_set_B_periph(AT91_PIN_PA28, 0); /* ECRS */ at91_set_B_periph(AT91_PIN_PA29, 0); /* ECOL */ at91_set_B_periph(AT91_PIN_PA25, 0); /* ERX2 */ at91_set_B_periph(AT91_PIN_PA26, 0); /* ERX3 */ at91_set_B_periph(AT91_PIN_PA27, 0); /* ERXCK */ at91_set_B_periph(AT91_PIN_PA23, 0); /* ETX2 */ at91_set_B_periph(AT91_PIN_PA24, 0); /* ETX3 */ at91_set_B_periph(AT91_PIN_PA22, 0); /* ETXER */ } eth_data = *data; platform_device_register(&at91sam9260_eth_device); } #else void __init at91_add_device_eth(struct macb_platform_data *data) {} #endif /* -------------------------------------------------------------------- * MMC / SD * -------------------------------------------------------------------- */ #if defined(CONFIG_MMC_AT91) || defined(CONFIG_MMC_AT91_MODULE) static u64 mmc_dmamask = DMA_BIT_MASK(32); static struct at91_mmc_data mmc_data; static struct resource mmc_resources[] = { [0] = { .start = AT91SAM9260_BASE_MCI, .end = AT91SAM9260_BASE_MCI + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_MCI, .end = AT91SAM9260_ID_MCI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_mmc_device = { .name = "at91_mci", .id = -1, .dev = { .dma_mask = &mmc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &mmc_data, }, .resource = mmc_resources, .num_resources = ARRAY_SIZE(mmc_resources), }; void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) { if (!data) return; /* input/irq */ if (gpio_is_valid(data->det_pin)) { at91_set_gpio_input(data->det_pin, 1); at91_set_deglitch(data->det_pin, 1); } if (gpio_is_valid(data->wp_pin)) at91_set_gpio_input(data->wp_pin, 1); if (gpio_is_valid(data->vcc_pin)) at91_set_gpio_output(data->vcc_pin, 0); /* CLK */ at91_set_A_periph(AT91_PIN_PA8, 0); if (data->slot_b) { /* CMD */ at91_set_B_periph(AT91_PIN_PA1, 1); /* DAT0, maybe DAT1..DAT3 */ at91_set_B_periph(AT91_PIN_PA0, 1); if (data->wire4) { at91_set_B_periph(AT91_PIN_PA5, 1); at91_set_B_periph(AT91_PIN_PA4, 1); at91_set_B_periph(AT91_PIN_PA3, 1); } } else { /* CMD */ at91_set_A_periph(AT91_PIN_PA7, 1); /* DAT0, maybe DAT1..DAT3 */ at91_set_A_periph(AT91_PIN_PA6, 1); if (data->wire4) { at91_set_A_periph(AT91_PIN_PA9, 1); at91_set_A_periph(AT91_PIN_PA10, 1); at91_set_A_periph(AT91_PIN_PA11, 1); } } mmc_data = *data; platform_device_register(&at91sam9260_mmc_device); } #else void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {} #endif /* -------------------------------------------------------------------- * MMC / SD Slot for Atmel MCI Driver * -------------------------------------------------------------------- */ #if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) static u64 mmc_dmamask = DMA_BIT_MASK(32); static struct mci_platform_data mmc_data; static struct resource mmc_resources[] = { [0] = { .start = AT91SAM9260_BASE_MCI, .end = AT91SAM9260_BASE_MCI + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_MCI, .end = AT91SAM9260_ID_MCI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_mmc_device = { .name = "atmel_mci", .id = -1, .dev = { .dma_mask = &mmc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &mmc_data, }, .resource = mmc_resources, .num_resources = ARRAY_SIZE(mmc_resources), }; void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) { unsigned int i; unsigned int slot_count = 0; if (!data) return; for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { if (data->slot[i].bus_width) { /* input/irq */ if (gpio_is_valid(data->slot[i].detect_pin)) { at91_set_gpio_input(data->slot[i].detect_pin, 1); at91_set_deglitch(data->slot[i].detect_pin, 1); } if (gpio_is_valid(data->slot[i].wp_pin)) at91_set_gpio_input(data->slot[i].wp_pin, 1); switch (i) { case 0: /* CMD */ at91_set_A_periph(AT91_PIN_PA7, 1); /* DAT0, maybe DAT1..DAT3 */ at91_set_A_periph(AT91_PIN_PA6, 1); if (data->slot[i].bus_width == 4) { at91_set_A_periph(AT91_PIN_PA9, 1); at91_set_A_periph(AT91_PIN_PA10, 1); at91_set_A_periph(AT91_PIN_PA11, 1); } slot_count++; break; case 1: /* CMD */ at91_set_B_periph(AT91_PIN_PA1, 1); /* DAT0, maybe DAT1..DAT3 */ at91_set_B_periph(AT91_PIN_PA0, 1); if (data->slot[i].bus_width == 4) { at91_set_B_periph(AT91_PIN_PA5, 1); at91_set_B_periph(AT91_PIN_PA4, 1); at91_set_B_periph(AT91_PIN_PA3, 1); } slot_count++; break; default: printk(KERN_ERR "AT91: SD/MMC slot %d not available\n", i); break; } } } if (slot_count) { /* CLK */ at91_set_A_periph(AT91_PIN_PA8, 0); mmc_data = *data; platform_device_register(&at91sam9260_mmc_device); } } #else void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) {} #endif /* -------------------------------------------------------------------- * NAND / SmartMedia * -------------------------------------------------------------------- */ #if defined(CONFIG_MTD_NAND_ATMEL) || defined(CONFIG_MTD_NAND_ATMEL_MODULE) static struct atmel_nand_data nand_data; #define NAND_BASE AT91_CHIPSELECT_3 static struct resource nand_resources[] = { [0] = { .start = NAND_BASE, .end = NAND_BASE + SZ_256M - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_BASE_ECC, .end = AT91SAM9260_BASE_ECC + SZ_512 - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device at91sam9260_nand_device = { .name = "atmel_nand", .id = -1, .dev = { .platform_data = &nand_data, }, .resource = nand_resources, .num_resources = ARRAY_SIZE(nand_resources), }; void __init at91_add_device_nand(struct atmel_nand_data *data) { unsigned long csa; if (!data) return; csa = at91_matrix_read(AT91_MATRIX_EBICSA); at91_matrix_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_CS3A_SMC_SMARTMEDIA); /* enable pin */ if (gpio_is_valid(data->enable_pin)) at91_set_gpio_output(data->enable_pin, 1); /* ready/busy pin */ if (gpio_is_valid(data->rdy_pin)) at91_set_gpio_input(data->rdy_pin, 1); /* card detect pin */ if (gpio_is_valid(data->det_pin)) at91_set_gpio_input(data->det_pin, 1); nand_data = *data; platform_device_register(&at91sam9260_nand_device); } #else void __init at91_add_device_nand(struct atmel_nand_data *data) {} #endif /* -------------------------------------------------------------------- * TWI (i2c) * -------------------------------------------------------------------- */ /* * Prefer the GPIO code since the TWI controller isn't robust * (gets overruns and underruns under load) and can only issue * repeated STARTs in one scenario (the driver doesn't yet handle them). */ #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) static struct i2c_gpio_platform_data pdata = { .sda_pin = AT91_PIN_PA23, .sda_is_open_drain = 1, .scl_pin = AT91_PIN_PA24, .scl_is_open_drain = 1, .udelay = 2, /* ~100 kHz */ }; static struct platform_device at91sam9260_twi_device = { .name = "i2c-gpio", .id = 0, .dev.platform_data = &pdata, }; void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) { at91_set_GPIO_periph(AT91_PIN_PA23, 1); /* TWD (SDA) */ at91_set_multi_drive(AT91_PIN_PA23, 1); at91_set_GPIO_periph(AT91_PIN_PA24, 1); /* TWCK (SCL) */ at91_set_multi_drive(AT91_PIN_PA24, 1); i2c_register_board_info(0, devices, nr_devices); platform_device_register(&at91sam9260_twi_device); } #elif defined(CONFIG_I2C_AT91) || defined(CONFIG_I2C_AT91_MODULE) static struct resource twi_resources[] = { [0] = { .start = AT91SAM9260_BASE_TWI, .end = AT91SAM9260_BASE_TWI + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_TWI, .end = AT91SAM9260_ID_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_twi_device = { .name = "at91_i2c", .id = -1, .resource = twi_resources, .num_resources = ARRAY_SIZE(twi_resources), }; void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) { /* pins used for TWI interface */ at91_set_A_periph(AT91_PIN_PA23, 0); /* TWD */ at91_set_multi_drive(AT91_PIN_PA23, 1); at91_set_A_periph(AT91_PIN_PA24, 0); /* TWCK */ at91_set_multi_drive(AT91_PIN_PA24, 1); i2c_register_board_info(0, devices, nr_devices); platform_device_register(&at91sam9260_twi_device); } #else void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) {} #endif /* -------------------------------------------------------------------- * SPI * -------------------------------------------------------------------- */ #if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE) static u64 spi_dmamask = DMA_BIT_MASK(32); static struct resource spi0_resources[] = { [0] = { .start = AT91SAM9260_BASE_SPI0, .end = AT91SAM9260_BASE_SPI0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_SPI0, .end = AT91SAM9260_ID_SPI0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_spi0_device = { .name = "atmel_spi", .id = 0, .dev = { .dma_mask = &spi_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = spi0_resources, .num_resources = ARRAY_SIZE(spi0_resources), }; static const unsigned spi0_standard_cs[4] = { AT91_PIN_PA3, AT91_PIN_PC11, AT91_PIN_PC16, AT91_PIN_PC17 }; static struct resource spi1_resources[] = { [0] = { .start = AT91SAM9260_BASE_SPI1, .end = AT91SAM9260_BASE_SPI1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_SPI1, .end = AT91SAM9260_ID_SPI1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_spi1_device = { .name = "atmel_spi", .id = 1, .dev = { .dma_mask = &spi_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = spi1_resources, .num_resources = ARRAY_SIZE(spi1_resources), }; static const unsigned spi1_standard_cs[4] = { AT91_PIN_PB3, AT91_PIN_PC5, AT91_PIN_PC4, AT91_PIN_PC3 }; void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) { int i; unsigned long cs_pin; short enable_spi0 = 0; short enable_spi1 = 0; /* Choose SPI chip-selects */ for (i = 0; i < nr_devices; i++) { if (devices[i].controller_data) cs_pin = (unsigned long) devices[i].controller_data; else if (devices[i].bus_num == 0) cs_pin = spi0_standard_cs[devices[i].chip_select]; else cs_pin = spi1_standard_cs[devices[i].chip_select]; if (!gpio_is_valid(cs_pin)) continue; if (devices[i].bus_num == 0) enable_spi0 = 1; else enable_spi1 = 1; /* enable chip-select pin */ at91_set_gpio_output(cs_pin, 1); /* pass chip-select pin to driver */ devices[i].controller_data = (void *) cs_pin; } spi_register_board_info(devices, nr_devices); /* Configure SPI bus(es) */ if (enable_spi0) { at91_set_A_periph(AT91_PIN_PA0, 0); /* SPI0_MISO */ at91_set_A_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */ at91_set_A_periph(AT91_PIN_PA2, 0); /* SPI1_SPCK */ platform_device_register(&at91sam9260_spi0_device); } if (enable_spi1) { at91_set_A_periph(AT91_PIN_PB0, 0); /* SPI1_MISO */ at91_set_A_periph(AT91_PIN_PB1, 0); /* SPI1_MOSI */ at91_set_A_periph(AT91_PIN_PB2, 0); /* SPI1_SPCK */ platform_device_register(&at91sam9260_spi1_device); } } #else void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) {} #endif /* -------------------------------------------------------------------- * Timer/Counter blocks * -------------------------------------------------------------------- */ #ifdef CONFIG_ATMEL_TCLIB static struct resource tcb0_resources[] = { [0] = { .start = AT91SAM9260_BASE_TCB0, .end = AT91SAM9260_BASE_TCB0 + SZ_256 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_TC0, .end = AT91SAM9260_ID_TC0, .flags = IORESOURCE_IRQ, }, [2] = { .start = AT91SAM9260_ID_TC1, .end = AT91SAM9260_ID_TC1, .flags = IORESOURCE_IRQ, }, [3] = { .start = AT91SAM9260_ID_TC2, .end = AT91SAM9260_ID_TC2, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_tcb0_device = { .name = "atmel_tcb", .id = 0, .resource = tcb0_resources, .num_resources = ARRAY_SIZE(tcb0_resources), }; static struct resource tcb1_resources[] = { [0] = { .start = AT91SAM9260_BASE_TCB1, .end = AT91SAM9260_BASE_TCB1 + SZ_256 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_TC3, .end = AT91SAM9260_ID_TC3, .flags = IORESOURCE_IRQ, }, [2] = { .start = AT91SAM9260_ID_TC4, .end = AT91SAM9260_ID_TC4, .flags = IORESOURCE_IRQ, }, [3] = { .start = AT91SAM9260_ID_TC5, .end = AT91SAM9260_ID_TC5, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_tcb1_device = { .name = "atmel_tcb", .id = 1, .resource = tcb1_resources, .num_resources = ARRAY_SIZE(tcb1_resources), }; #if defined(CONFIG_OF) static struct of_device_id tcb_ids[] = { { .compatible = "atmel,at91rm9200-tcb" }, { /*sentinel*/ } }; #endif static void __init at91_add_device_tc(void) { #if defined(CONFIG_OF) struct device_node *np; np = of_find_matching_node(NULL, tcb_ids); if (np) { of_node_put(np); return; } #endif platform_device_register(&at91sam9260_tcb0_device); platform_device_register(&at91sam9260_tcb1_device); } #else static void __init at91_add_device_tc(void) { } #endif /* -------------------------------------------------------------------- * RTT * -------------------------------------------------------------------- */ static struct resource rtt_resources[] = { { .start = AT91SAM9260_BASE_RTT, .end = AT91SAM9260_BASE_RTT + SZ_16 - 1, .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, }, }; static struct platform_device at91sam9260_rtt_device = { .name = "at91_rtt", .id = 0, .resource = rtt_resources, }; #if IS_ENABLED(CONFIG_RTC_DRV_AT91SAM9) static void __init at91_add_device_rtt_rtc(void) { at91sam9260_rtt_device.name = "rtc-at91sam9"; /* * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ at91sam9260_rtt_device.num_resources = 2; rtt_resources[1].start = AT91SAM9260_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; } #else static void __init at91_add_device_rtt_rtc(void) { /* Only one resource is needed: RTT not used as RTC */ at91sam9260_rtt_device.num_resources = 1; } #endif static void __init at91_add_device_rtt(void) { at91_add_device_rtt_rtc(); platform_device_register(&at91sam9260_rtt_device); } /* -------------------------------------------------------------------- * Watchdog * -------------------------------------------------------------------- */ #if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE) static struct resource wdt_resources[] = { { .start = AT91SAM9260_BASE_WDT, .end = AT91SAM9260_BASE_WDT + SZ_16 - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device at91sam9260_wdt_device = { .name = "at91_wdt", .id = -1, .resource = wdt_resources, .num_resources = ARRAY_SIZE(wdt_resources), }; static void __init at91_add_device_watchdog(void) { platform_device_register(&at91sam9260_wdt_device); } #else static void __init at91_add_device_watchdog(void) {} #endif /* -------------------------------------------------------------------- * SSC -- Synchronous Serial Controller * -------------------------------------------------------------------- */ #if defined(CONFIG_ATMEL_SSC) || defined(CONFIG_ATMEL_SSC_MODULE) static u64 ssc_dmamask = DMA_BIT_MASK(32); static struct resource ssc_resources[] = { [0] = { .start = AT91SAM9260_BASE_SSC, .end = AT91SAM9260_BASE_SSC + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_SSC, .end = AT91SAM9260_ID_SSC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_ssc_device = { .name = "ssc", .id = 0, .dev = { .dma_mask = &ssc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = ssc_resources, .num_resources = ARRAY_SIZE(ssc_resources), }; static inline void configure_ssc_pins(unsigned pins) { if (pins & ATMEL_SSC_TF) at91_set_A_periph(AT91_PIN_PB17, 1); if (pins & ATMEL_SSC_TK) at91_set_A_periph(AT91_PIN_PB16, 1); if (pins & ATMEL_SSC_TD) at91_set_A_periph(AT91_PIN_PB18, 1); if (pins & ATMEL_SSC_RD) at91_set_A_periph(AT91_PIN_PB19, 1); if (pins & ATMEL_SSC_RK) at91_set_A_periph(AT91_PIN_PB20, 1); if (pins & ATMEL_SSC_RF) at91_set_A_periph(AT91_PIN_PB21, 1); } /* * SSC controllers are accessed through library code, instead of any * kind of all-singing/all-dancing driver. For example one could be * used by a particular I2S audio codec's driver, while another one * on the same system might be used by a custom data capture driver. */ void __init at91_add_device_ssc(unsigned id, unsigned pins) { struct platform_device *pdev; /* * NOTE: caller is responsible for passing information matching * "pins" to whatever will be using each particular controller. */ switch (id) { case AT91SAM9260_ID_SSC: pdev = &at91sam9260_ssc_device; configure_ssc_pins(pins); break; default: return; } platform_device_register(pdev); } #else void __init at91_add_device_ssc(unsigned id, unsigned pins) {} #endif /* -------------------------------------------------------------------- * UART * -------------------------------------------------------------------- */ #if defined(CONFIG_SERIAL_ATMEL) static struct resource dbgu_resources[] = { [0] = { .start = AT91SAM9260_BASE_DBGU, .end = AT91SAM9260_BASE_DBGU + SZ_512 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91_ID_SYS, .end = AT91_ID_SYS, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data dbgu_data = { .use_dma_tx = 0, .use_dma_rx = 0, /* DBGU not capable of receive DMA */ }; static u64 dbgu_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9260_dbgu_device = { .name = "atmel_usart", .id = 0, .dev = { .dma_mask = &dbgu_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &dbgu_data, }, .resource = dbgu_resources, .num_resources = ARRAY_SIZE(dbgu_resources), }; static inline void configure_dbgu_pins(void) { at91_set_A_periph(AT91_PIN_PB14, 0); /* DRXD */ at91_set_A_periph(AT91_PIN_PB15, 1); /* DTXD */ } static struct resource uart0_resources[] = { [0] = { .start = AT91SAM9260_BASE_US0, .end = AT91SAM9260_BASE_US0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_US0, .end = AT91SAM9260_ID_US0, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart0_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart0_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9260_uart0_device = { .name = "atmel_usart", .id = 1, .dev = { .dma_mask = &uart0_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart0_data, }, .resource = uart0_resources, .num_resources = ARRAY_SIZE(uart0_resources), }; static inline void configure_usart0_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PB4, 1); /* TXD0 */ at91_set_A_periph(AT91_PIN_PB5, 0); /* RXD0 */ if (pins & ATMEL_UART_RTS) at91_set_A_periph(AT91_PIN_PB26, 0); /* RTS0 */ if (pins & ATMEL_UART_CTS) at91_set_A_periph(AT91_PIN_PB27, 0); /* CTS0 */ if (pins & ATMEL_UART_DTR) at91_set_A_periph(AT91_PIN_PB24, 0); /* DTR0 */ if (pins & ATMEL_UART_DSR) at91_set_A_periph(AT91_PIN_PB22, 0); /* DSR0 */ if (pins & ATMEL_UART_DCD) at91_set_A_periph(AT91_PIN_PB23, 0); /* DCD0 */ if (pins & ATMEL_UART_RI) at91_set_A_periph(AT91_PIN_PB25, 0); /* RI0 */ } static struct resource uart1_resources[] = { [0] = { .start = AT91SAM9260_BASE_US1, .end = AT91SAM9260_BASE_US1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_US1, .end = AT91SAM9260_ID_US1, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart1_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart1_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9260_uart1_device = { .name = "atmel_usart", .id = 2, .dev = { .dma_mask = &uart1_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart1_data, }, .resource = uart1_resources, .num_resources = ARRAY_SIZE(uart1_resources), }; static inline void configure_usart1_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PB6, 1); /* TXD1 */ at91_set_A_periph(AT91_PIN_PB7, 0); /* RXD1 */ if (pins & ATMEL_UART_RTS) at91_set_A_periph(AT91_PIN_PB28, 0); /* RTS1 */ if (pins & ATMEL_UART_CTS) at91_set_A_periph(AT91_PIN_PB29, 0); /* CTS1 */ } static struct resource uart2_resources[] = { [0] = { .start = AT91SAM9260_BASE_US2, .end = AT91SAM9260_BASE_US2 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_US2, .end = AT91SAM9260_ID_US2, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart2_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart2_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9260_uart2_device = { .name = "atmel_usart", .id = 3, .dev = { .dma_mask = &uart2_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart2_data, }, .resource = uart2_resources, .num_resources = ARRAY_SIZE(uart2_resources), }; static inline void configure_usart2_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PB8, 1); /* TXD2 */ at91_set_A_periph(AT91_PIN_PB9, 0); /* RXD2 */ if (pins & ATMEL_UART_RTS) at91_set_A_periph(AT91_PIN_PA4, 0); /* RTS2 */ if (pins & ATMEL_UART_CTS) at91_set_A_periph(AT91_PIN_PA5, 0); /* CTS2 */ } static struct resource uart3_resources[] = { [0] = { .start = AT91SAM9260_BASE_US3, .end = AT91SAM9260_BASE_US3 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_US3, .end = AT91SAM9260_ID_US3, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart3_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart3_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9260_uart3_device = { .name = "atmel_usart", .id = 4, .dev = { .dma_mask = &uart3_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart3_data, }, .resource = uart3_resources, .num_resources = ARRAY_SIZE(uart3_resources), }; static inline void configure_usart3_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PB10, 1); /* TXD3 */ at91_set_A_periph(AT91_PIN_PB11, 0); /* RXD3 */ if (pins & ATMEL_UART_RTS) at91_set_B_periph(AT91_PIN_PC8, 0); /* RTS3 */ if (pins & ATMEL_UART_CTS) at91_set_B_periph(AT91_PIN_PC10, 0); /* CTS3 */ } static struct resource uart4_resources[] = { [0] = { .start = AT91SAM9260_BASE_US4, .end = AT91SAM9260_BASE_US4 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_US4, .end = AT91SAM9260_ID_US4, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart4_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart4_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9260_uart4_device = { .name = "atmel_usart", .id = 5, .dev = { .dma_mask = &uart4_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart4_data, }, .resource = uart4_resources, .num_resources = ARRAY_SIZE(uart4_resources), }; static inline void configure_usart4_pins(void) { at91_set_B_periph(AT91_PIN_PA31, 1); /* TXD4 */ at91_set_B_periph(AT91_PIN_PA30, 0); /* RXD4 */ } static struct resource uart5_resources[] = { [0] = { .start = AT91SAM9260_BASE_US5, .end = AT91SAM9260_BASE_US5 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_US5, .end = AT91SAM9260_ID_US5, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart5_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart5_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9260_uart5_device = { .name = "atmel_usart", .id = 6, .dev = { .dma_mask = &uart5_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart5_data, }, .resource = uart5_resources, .num_resources = ARRAY_SIZE(uart5_resources), }; static inline void configure_usart5_pins(void) { at91_set_A_periph(AT91_PIN_PB12, 1); /* TXD5 */ at91_set_A_periph(AT91_PIN_PB13, 0); /* RXD5 */ } static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) { struct platform_device *pdev; struct atmel_uart_data *pdata; switch (id) { case 0: /* DBGU */ pdev = &at91sam9260_dbgu_device; configure_dbgu_pins(); break; case AT91SAM9260_ID_US0: pdev = &at91sam9260_uart0_device; configure_usart0_pins(pins); break; case AT91SAM9260_ID_US1: pdev = &at91sam9260_uart1_device; configure_usart1_pins(pins); break; case AT91SAM9260_ID_US2: pdev = &at91sam9260_uart2_device; configure_usart2_pins(pins); break; case AT91SAM9260_ID_US3: pdev = &at91sam9260_uart3_device; configure_usart3_pins(pins); break; case AT91SAM9260_ID_US4: pdev = &at91sam9260_uart4_device; configure_usart4_pins(); break; case AT91SAM9260_ID_US5: pdev = &at91sam9260_uart5_device; configure_usart5_pins(); break; default: return; } pdata = pdev->dev.platform_data; pdata->num = portnr; /* update to mapped ID */ if (portnr < ATMEL_MAX_UART) at91_uarts[portnr] = pdev; } void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; at91sam9260_set_console_clock(at91_uarts[portnr]->id); } } void __init at91_add_device_serial(void) { int i; for (i = 0; i < ATMEL_MAX_UART; i++) { if (at91_uarts[i]) platform_device_register(at91_uarts[i]); } if (!atmel_default_console_device) printk(KERN_INFO "AT91: No default serial console defined.\n"); } #else void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {} void __init at91_set_serial_console(unsigned portnr) {} void __init at91_add_device_serial(void) {} #endif /* -------------------------------------------------------------------- * CF/IDE * -------------------------------------------------------------------- */ #if defined(CONFIG_PATA_AT91) || defined(CONFIG_PATA_AT91_MODULE) || \ defined(CONFIG_AT91_CF) || defined(CONFIG_AT91_CF_MODULE) static struct at91_cf_data cf0_data; static struct resource cf0_resources[] = { [0] = { .start = AT91_CHIPSELECT_4, .end = AT91_CHIPSELECT_4 + SZ_256M - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device cf0_device = { .id = 0, .dev = { .platform_data = &cf0_data, }, .resource = cf0_resources, .num_resources = ARRAY_SIZE(cf0_resources), }; static struct at91_cf_data cf1_data; static struct resource cf1_resources[] = { [0] = { .start = AT91_CHIPSELECT_5, .end = AT91_CHIPSELECT_5 + SZ_256M - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device cf1_device = { .id = 1, .dev = { .platform_data = &cf1_data, }, .resource = cf1_resources, .num_resources = ARRAY_SIZE(cf1_resources), }; void __init at91_add_device_cf(struct at91_cf_data *data) { struct platform_device *pdev; unsigned long csa; if (!data) return; csa = at91_matrix_read(AT91_MATRIX_EBICSA); switch (data->chipselect) { case 4: at91_set_multi_drive(AT91_PIN_PC8, 0); at91_set_A_periph(AT91_PIN_PC8, 0); csa |= AT91_MATRIX_CS4A_SMC_CF1; cf0_data = *data; pdev = &cf0_device; break; case 5: at91_set_multi_drive(AT91_PIN_PC9, 0); at91_set_A_periph(AT91_PIN_PC9, 0); csa |= AT91_MATRIX_CS5A_SMC_CF2; cf1_data = *data; pdev = &cf1_device; break; default: printk(KERN_ERR "AT91 CF: bad chip-select requested (%u)\n", data->chipselect); return; } at91_matrix_write(AT91_MATRIX_EBICSA, csa); if (gpio_is_valid(data->rst_pin)) { at91_set_multi_drive(data->rst_pin, 0); at91_set_gpio_output(data->rst_pin, 1); } if (gpio_is_valid(data->irq_pin)) { at91_set_gpio_input(data->irq_pin, 0); at91_set_deglitch(data->irq_pin, 1); } if (gpio_is_valid(data->det_pin)) { at91_set_gpio_input(data->det_pin, 0); at91_set_deglitch(data->det_pin, 1); } at91_set_B_periph(AT91_PIN_PC6, 0); /* CFCE1 */ at91_set_B_periph(AT91_PIN_PC7, 0); /* CFCE2 */ at91_set_A_periph(AT91_PIN_PC10, 0); /* CFRNW */ at91_set_A_periph(AT91_PIN_PC15, 1); /* NWAIT */ if (data->flags & AT91_CF_TRUE_IDE) #if defined(CONFIG_PATA_AT91) || defined(CONFIG_PATA_AT91_MODULE) pdev->name = "pata_at91"; #else #warning "board requires AT91_CF_TRUE_IDE: enable pata_at91" #endif else pdev->name = "at91_cf"; platform_device_register(pdev); } #else void __init at91_add_device_cf(struct at91_cf_data * data) {} #endif /* -------------------------------------------------------------------- */ /* * These devices are always present and don't need any board-specific * setup. */ static int __init at91_add_standard_devices(void) { at91_add_device_rtt(); at91_add_device_watchdog(); at91_add_device_tc(); return 0; } arch_initcall(at91_add_standard_devices);
3EleVen/kernel_common
arch/arm/mach-at91/at91sam9260_devices.c
C
gpl-2.0
34,302
/* * err_inject.c - * 1.) Inject errors to a processor. * 2.) Query error injection capabilities. * This driver along with user space code can be acting as an error * injection tool. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Written by: Fenghua Yu <fenghua.yu@intel.com>, Intel Corporation * Copyright (C) 2006, Intel Corp. All rights reserved. * */ #include <linux/device.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/cpu.h> #include <linux/module.h> #define ERR_INJ_DEBUG #define ERR_DATA_BUFFER_SIZE 3 // Three 8-byte; #define define_one_ro(name) \ static DEVICE_ATTR(name, 0444, show_##name, NULL) #define define_one_rw(name) \ static DEVICE_ATTR(name, 0644, show_##name, store_##name) static u64 call_start[NR_CPUS]; static u64 phys_addr[NR_CPUS]; static u64 err_type_info[NR_CPUS]; static u64 err_struct_info[NR_CPUS]; static struct { u64 data1; u64 data2; u64 data3; } __attribute__((__aligned__(16))) err_data_buffer[NR_CPUS]; static s64 status[NR_CPUS]; static u64 capabilities[NR_CPUS]; static u64 resources[NR_CPUS]; #define show(name) \ static ssize_t \ show_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ u32 cpu=dev->id; \ return sprintf(buf, "%lx\n", name[cpu]); \ } #define store(name) \ static ssize_t \ store_##name(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t size) \ { \ unsigned int cpu=dev->id; \ name[cpu] = simple_strtoull(buf, NULL, 16); \ return size; \ } show(call_start) /* It's user's responsibility to call the PAL procedure on a specific * processor. The cpu number in driver is only used for storing data. */ static ssize_t store_call_start(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { unsigned int cpu=dev->id; unsigned long call_start = simple_strtoull(buf, NULL, 16); #ifdef ERR_INJ_DEBUG printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu); printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]); printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]); printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n", err_data_buffer[cpu].data1, err_data_buffer[cpu].data2, err_data_buffer[cpu].data3); #endif switch (call_start) { case 0: /* Do nothing. */ break; case 1: /* Call pal_mc_error_inject in physical mode. */ status[cpu]=ia64_pal_mc_error_inject_phys(err_type_info[cpu], err_struct_info[cpu], ia64_tpa(&err_data_buffer[cpu]), &capabilities[cpu], &resources[cpu]); break; case 2: /* Call pal_mc_error_inject in virtual mode. */ status[cpu]=ia64_pal_mc_error_inject_virt(err_type_info[cpu], err_struct_info[cpu], ia64_tpa(&err_data_buffer[cpu]), &capabilities[cpu], &resources[cpu]); break; default: status[cpu] = -EINVAL; break; } #ifdef ERR_INJ_DEBUG printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]); printk(KERN_DEBUG "capapbilities=%lx,\n", capabilities[cpu]); printk(KERN_DEBUG "resources=%lx\n", resources[cpu]); #endif return size; } show(err_type_info) store(err_type_info) static ssize_t show_virtual_to_phys(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int cpu=dev->id; return sprintf(buf, "%lx\n", phys_addr[cpu]); } static ssize_t store_virtual_to_phys(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { unsigned int cpu=dev->id; u64 virt_addr=simple_strtoull(buf, NULL, 16); int ret; ret = get_user_pages(current, current->mm, virt_addr, 1, VM_READ, 0, NULL, NULL); if (ret<=0) { #ifdef ERR_INJ_DEBUG printk("Virtual address %lx is not existing.\n",virt_addr); #endif return -EINVAL; } phys_addr[cpu] = ia64_tpa(virt_addr); return size; } show(err_struct_info) store(err_struct_info) static ssize_t show_err_data_buffer(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int cpu=dev->id; return sprintf(buf, "%lx, %lx, %lx\n", err_data_buffer[cpu].data1, err_data_buffer[cpu].data2, err_data_buffer[cpu].data3); } static ssize_t store_err_data_buffer(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { unsigned int cpu=dev->id; int ret; #ifdef ERR_INJ_DEBUG printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n", err_data_buffer[cpu].data1, err_data_buffer[cpu].data2, err_data_buffer[cpu].data3, cpu); #endif ret=sscanf(buf, "%lx, %lx, %lx", &err_data_buffer[cpu].data1, &err_data_buffer[cpu].data2, &err_data_buffer[cpu].data3); if (ret!=ERR_DATA_BUFFER_SIZE) return -EINVAL; return size; } show(status) show(capabilities) show(resources) define_one_rw(call_start); define_one_rw(err_type_info); define_one_rw(err_struct_info); define_one_rw(err_data_buffer); define_one_rw(virtual_to_phys); define_one_ro(status); define_one_ro(capabilities); define_one_ro(resources); static struct attribute *default_attrs[] = { &dev_attr_call_start.attr, &dev_attr_virtual_to_phys.attr, &dev_attr_err_type_info.attr, &dev_attr_err_struct_info.attr, &dev_attr_err_data_buffer.attr, &dev_attr_status.attr, &dev_attr_capabilities.attr, &dev_attr_resources.attr, NULL }; static struct attribute_group err_inject_attr_group = { .attrs = default_attrs, .name = "err_inject" }; /* Add/Remove err_inject interface for CPU device */ static int err_inject_add_dev(struct device *sys_dev) { return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group); } static int err_inject_remove_dev(struct device *sys_dev) { sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); return 0; } static int err_inject_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct device *sys_dev; sys_dev = get_cpu_device(cpu); switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: err_inject_add_dev(sys_dev); break; case CPU_DEAD: case CPU_DEAD_FROZEN: err_inject_remove_dev(sys_dev); break; } return NOTIFY_OK; } static struct notifier_block err_inject_cpu_notifier = { .notifier_call = err_inject_cpu_callback, }; static int __init err_inject_init(void) { int i; #ifdef ERR_INJ_DEBUG printk(KERN_INFO "Enter error injection driver.\n"); #endif cpu_notifier_register_begin(); for_each_online_cpu(i) { err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE, (void *)(long)i); } __register_hotcpu_notifier(&err_inject_cpu_notifier); cpu_notifier_register_done(); return 0; } static void __exit err_inject_exit(void) { int i; struct device *sys_dev; #ifdef ERR_INJ_DEBUG printk(KERN_INFO "Exit error injection driver.\n"); #endif cpu_notifier_register_begin(); for_each_online_cpu(i) { sys_dev = get_cpu_device(i); sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); } __unregister_hotcpu_notifier(&err_inject_cpu_notifier); cpu_notifier_register_done(); } module_init(err_inject_init); module_exit(err_inject_exit); MODULE_AUTHOR("Fenghua Yu <fenghua.yu@intel.com>"); MODULE_DESCRIPTION("MC error injection kernel sysfs interface"); MODULE_LICENSE("GPL");
apascual89/android_kernel_oneplus_msm8996-1
arch/ia64/kernel/err_inject.c
C
gpl-2.0
7,949
/* * ci13xxx_udc.c - MIPS USB IP core family device controller * * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved. * * Author: David Lopo * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* * Description: MIPS USB IP core family device controller * Currently it only supports IP part number CI13412 * * This driver is composed of several blocks: * - HW: hardware interface * - DBG: debug facilities (optional) * - UTIL: utilities * - ISR: interrupts handling * - ENDPT: endpoint operations (Gadget API) * - GADGET: gadget operations (Gadget API) * - BUS: bus glue code, bus abstraction layer * * Compile Options * - CONFIG_USB_GADGET_DEBUG_FILES: enable debug facilities * - STALL_IN: non-empty bulk-in pipes cannot be halted * if defined mass storage compliance succeeds but with warnings * => case 4: Hi > Dn * => case 5: Hi > Di * => case 8: Hi <> Do * if undefined usbtest 13 fails * - TRACE: enable function tracing (depends on DEBUG) * * Main Features * - Chapter 9 & Mass Storage Compliance with Gadget File Storage * - Chapter 9 Compliance with Gadget Zero (STALL_IN undefined) * - Normal & LPM support * * USBTEST Report * - OK: 0-12, 13 (STALL_IN defined) & 14 * - Not Supported: 15 & 16 (ISO) * * TODO List * - OTG * - Isochronous & Interrupt Traffic * - Handle requests which spawns into several TDs * - GET_STATUS(device) - always reports 0 * - Gadget API (majority of optional features) * - Suspend & Remote Wakeup */ #include <linux/delay.h> #include <linux/device.h> #include <linux/dmapool.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb/otg.h> #include "ci13xxx_udc.h" /****************************************************************************** * DEFINE *****************************************************************************/ /* ctrl register bank access */ static DEFINE_SPINLOCK(udc_lock); /* control endpoint description */ static const struct usb_endpoint_descriptor ctrl_endpt_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_CONTROL, .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX), }; static const struct usb_endpoint_descriptor ctrl_endpt_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_CONTROL, .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX), }; /* UDC descriptor */ static struct ci13xxx *_udc; /* Interrupt statistics */ #define ISR_MASK 0x1F static struct { u32 test; u32 ui; u32 uei; u32 pci; u32 uri; u32 sli; u32 none; struct { u32 cnt; u32 buf[ISR_MASK+1]; u32 idx; } hndl; } isr_statistics; /** * ffs_nr: find first (least significant) bit set * @x: the word to search * * This function returns bit number (instead of position) */ static int ffs_nr(u32 x) { int n = ffs(x); return n ? n-1 : 32; } /****************************************************************************** * HW block *****************************************************************************/ /* register bank descriptor */ static struct { unsigned lpm; /* is LPM? */ void __iomem *abs; /* bus map offset */ void __iomem *cap; /* bus map offset + CAP offset + CAP data */ size_t size; /* bank size */ } hw_bank; /* MSM specific */ #define ABS_AHBBURST (0x0090UL) #define ABS_AHBMODE (0x0098UL) /* UDC register map */ #define ABS_CAPLENGTH (0x100UL) #define ABS_HCCPARAMS (0x108UL) #define ABS_DCCPARAMS (0x124UL) #define ABS_TESTMODE (hw_bank.lpm ? 0x0FCUL : 0x138UL) /* offset to CAPLENTGH (addr + data) */ #define CAP_USBCMD (0x000UL) #define CAP_USBSTS (0x004UL) #define CAP_USBINTR (0x008UL) #define CAP_DEVICEADDR (0x014UL) #define CAP_ENDPTLISTADDR (0x018UL) #define CAP_PORTSC (0x044UL) #define CAP_DEVLC (0x084UL) #define CAP_USBMODE (hw_bank.lpm ? 0x0C8UL : 0x068UL) #define CAP_ENDPTSETUPSTAT (hw_bank.lpm ? 0x0D8UL : 0x06CUL) #define CAP_ENDPTPRIME (hw_bank.lpm ? 0x0DCUL : 0x070UL) #define CAP_ENDPTFLUSH (hw_bank.lpm ? 0x0E0UL : 0x074UL) #define CAP_ENDPTSTAT (hw_bank.lpm ? 0x0E4UL : 0x078UL) #define CAP_ENDPTCOMPLETE (hw_bank.lpm ? 0x0E8UL : 0x07CUL) #define CAP_ENDPTCTRL (hw_bank.lpm ? 0x0ECUL : 0x080UL) #define CAP_LAST (hw_bank.lpm ? 0x12CUL : 0x0C0UL) /* maximum number of enpoints: valid only after hw_device_reset() */ static unsigned hw_ep_max; /** * hw_ep_bit: calculates the bit number * @num: endpoint number * @dir: endpoint direction * * This function returns bit number */ static inline int hw_ep_bit(int num, int dir) { return num + (dir ? 16 : 0); } /** * hw_aread: reads from register bitfield * @addr: address relative to bus map * @mask: bitfield mask * * This function returns register bitfield data */ static u32 hw_aread(u32 addr, u32 mask) { return ioread32(addr + hw_bank.abs) & mask; } /** * hw_awrite: writes to register bitfield * @addr: address relative to bus map * @mask: bitfield mask * @data: new data */ static void hw_awrite(u32 addr, u32 mask, u32 data) { iowrite32(hw_aread(addr, ~mask) | (data & mask), addr + hw_bank.abs); } /** * hw_cread: reads from register bitfield * @addr: address relative to CAP offset plus content * @mask: bitfield mask * * This function returns register bitfield data */ static u32 hw_cread(u32 addr, u32 mask) { return ioread32(addr + hw_bank.cap) & mask; } /** * hw_cwrite: writes to register bitfield * @addr: address relative to CAP offset plus content * @mask: bitfield mask * @data: new data */ static void hw_cwrite(u32 addr, u32 mask, u32 data) { iowrite32(hw_cread(addr, ~mask) | (data & mask), addr + hw_bank.cap); } /** * hw_ctest_and_clear: tests & clears register bitfield * @addr: address relative to CAP offset plus content * @mask: bitfield mask * * This function returns register bitfield data */ static u32 hw_ctest_and_clear(u32 addr, u32 mask) { u32 reg = hw_cread(addr, mask); iowrite32(reg, addr + hw_bank.cap); return reg; } /** * hw_ctest_and_write: tests & writes register bitfield * @addr: address relative to CAP offset plus content * @mask: bitfield mask * @data: new data * * This function returns register bitfield data */ static u32 hw_ctest_and_write(u32 addr, u32 mask, u32 data) { u32 reg = hw_cread(addr, ~0); iowrite32((reg & ~mask) | (data & mask), addr + hw_bank.cap); return (reg & mask) >> ffs_nr(mask); } static int hw_device_init(void __iomem *base) { u32 reg; /* bank is a module variable */ hw_bank.abs = base; hw_bank.cap = hw_bank.abs; hw_bank.cap += ABS_CAPLENGTH; hw_bank.cap += ioread8(hw_bank.cap); reg = hw_aread(ABS_HCCPARAMS, HCCPARAMS_LEN) >> ffs_nr(HCCPARAMS_LEN); hw_bank.lpm = reg; hw_bank.size = hw_bank.cap - hw_bank.abs; hw_bank.size += CAP_LAST; hw_bank.size /= sizeof(u32); reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN); hw_ep_max = reg * 2; /* cache hw ENDPT_MAX */ if (hw_ep_max == 0 || hw_ep_max > ENDPT_MAX) return -ENODEV; /* setup lock mode ? */ /* ENDPTSETUPSTAT is '0' by default */ /* HCSPARAMS.bf.ppc SHOULD BE zero for device */ return 0; } /** * hw_device_reset: resets chip (execute without interruption) * @base: register base address * * This function returns an error code */ static int hw_device_reset(struct ci13xxx *udc) { /* should flush & stop before reset */ hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0); hw_cwrite(CAP_USBCMD, USBCMD_RS, 0); hw_cwrite(CAP_USBCMD, USBCMD_RST, USBCMD_RST); while (hw_cread(CAP_USBCMD, USBCMD_RST)) udelay(10); /* not RTOS friendly */ if (udc->udc_driver->notify_event) udc->udc_driver->notify_event(udc, CI13XXX_CONTROLLER_RESET_EVENT); if (udc->udc_driver->flags & CI13XXX_DISABLE_STREAMING) hw_cwrite(CAP_USBMODE, USBMODE_SDIS, USBMODE_SDIS); /* USBMODE should be configured step by step */ hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE); hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE); hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM); /* HW >= 2.3 */ if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) { pr_err("cannot enter in device mode"); pr_err("lpm = %i", hw_bank.lpm); return -ENODEV; } return 0; } /** * hw_device_state: enables/disables interrupts & starts/stops device (execute * without interruption) * @dma: 0 => disable, !0 => enable and set dma engine * * This function returns an error code */ static int hw_device_state(u32 dma) { if (dma) { hw_cwrite(CAP_ENDPTLISTADDR, ~0, dma); /* interrupt, error, port change, reset, sleep/suspend */ hw_cwrite(CAP_USBINTR, ~0, USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI); hw_cwrite(CAP_USBCMD, USBCMD_RS, USBCMD_RS); } else { hw_cwrite(CAP_USBCMD, USBCMD_RS, 0); hw_cwrite(CAP_USBINTR, ~0, 0); } return 0; } /** * hw_ep_flush: flush endpoint fifo (execute without interruption) * @num: endpoint number * @dir: endpoint direction * * This function returns an error code */ static int hw_ep_flush(int num, int dir) { int n = hw_ep_bit(num, dir); do { /* flush any pending transfer */ hw_cwrite(CAP_ENDPTFLUSH, BIT(n), BIT(n)); while (hw_cread(CAP_ENDPTFLUSH, BIT(n))) cpu_relax(); } while (hw_cread(CAP_ENDPTSTAT, BIT(n))); return 0; } /** * hw_ep_disable: disables endpoint (execute without interruption) * @num: endpoint number * @dir: endpoint direction * * This function returns an error code */ static int hw_ep_disable(int num, int dir) { hw_ep_flush(num, dir); hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0); return 0; } /** * hw_ep_enable: enables endpoint (execute without interruption) * @num: endpoint number * @dir: endpoint direction * @type: endpoint type * * This function returns an error code */ static int hw_ep_enable(int num, int dir, int type) { u32 mask, data; if (dir) { mask = ENDPTCTRL_TXT; /* type */ data = type << ffs_nr(mask); mask |= ENDPTCTRL_TXS; /* unstall */ mask |= ENDPTCTRL_TXR; /* reset data toggle */ data |= ENDPTCTRL_TXR; mask |= ENDPTCTRL_TXE; /* enable */ data |= ENDPTCTRL_TXE; } else { mask = ENDPTCTRL_RXT; /* type */ data = type << ffs_nr(mask); mask |= ENDPTCTRL_RXS; /* unstall */ mask |= ENDPTCTRL_RXR; /* reset data toggle */ data |= ENDPTCTRL_RXR; mask |= ENDPTCTRL_RXE; /* enable */ data |= ENDPTCTRL_RXE; } hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), mask, data); return 0; } /** * hw_ep_get_halt: return endpoint halt status * @num: endpoint number * @dir: endpoint direction * * This function returns 1 if endpoint halted */ static int hw_ep_get_halt(int num, int dir) { u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS; return hw_cread(CAP_ENDPTCTRL + num * sizeof(u32), mask) ? 1 : 0; } /** * hw_test_and_clear_setup_status: test & clear setup status (execute without * interruption) * @n: bit number (endpoint) * * This function returns setup status */ static int hw_test_and_clear_setup_status(int n) { return hw_ctest_and_clear(CAP_ENDPTSETUPSTAT, BIT(n)); } /** * hw_ep_prime: primes endpoint (execute without interruption) * @num: endpoint number * @dir: endpoint direction * @is_ctrl: true if control endpoint * * This function returns an error code */ static int hw_ep_prime(int num, int dir, int is_ctrl) { int n = hw_ep_bit(num, dir); if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num))) return -EAGAIN; hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n)); while (hw_cread(CAP_ENDPTPRIME, BIT(n))) cpu_relax(); if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num))) return -EAGAIN; /* status shoult be tested according with manual but it doesn't work */ return 0; } /** * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute * without interruption) * @num: endpoint number * @dir: endpoint direction * @value: true => stall, false => unstall * * This function returns an error code */ static int hw_ep_set_halt(int num, int dir, int value) { if (value != 0 && value != 1) return -EINVAL; do { u32 addr = CAP_ENDPTCTRL + num * sizeof(u32); u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS; u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR; /* data toggle - reserved for EP0 but it's in ESS */ hw_cwrite(addr, mask_xs|mask_xr, value ? mask_xs : mask_xr); } while (value != hw_ep_get_halt(num, dir)); return 0; } /** * hw_intr_clear: disables interrupt & clears interrupt status (execute without * interruption) * @n: interrupt bit * * This function returns an error code */ static int hw_intr_clear(int n) { if (n >= REG_BITS) return -EINVAL; hw_cwrite(CAP_USBINTR, BIT(n), 0); hw_cwrite(CAP_USBSTS, BIT(n), BIT(n)); return 0; } /** * hw_intr_force: enables interrupt & forces interrupt status (execute without * interruption) * @n: interrupt bit * * This function returns an error code */ static int hw_intr_force(int n) { if (n >= REG_BITS) return -EINVAL; hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, TESTMODE_FORCE); hw_cwrite(CAP_USBINTR, BIT(n), BIT(n)); hw_cwrite(CAP_USBSTS, BIT(n), BIT(n)); hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, 0); return 0; } /** * hw_is_port_high_speed: test if port is high speed * * This function returns true if high speed port */ static int hw_port_is_high_speed(void) { return hw_bank.lpm ? hw_cread(CAP_DEVLC, DEVLC_PSPD) : hw_cread(CAP_PORTSC, PORTSC_HSP); } /** * hw_port_test_get: reads port test mode value * * This function returns port test mode value */ static u8 hw_port_test_get(void) { return hw_cread(CAP_PORTSC, PORTSC_PTC) >> ffs_nr(PORTSC_PTC); } /** * hw_port_test_set: writes port test mode (execute without interruption) * @mode: new value * * This function returns an error code */ static int hw_port_test_set(u8 mode) { const u8 TEST_MODE_MAX = 7; if (mode > TEST_MODE_MAX) return -EINVAL; hw_cwrite(CAP_PORTSC, PORTSC_PTC, mode << ffs_nr(PORTSC_PTC)); return 0; } /** * hw_read_intr_enable: returns interrupt enable register * * This function returns register data */ static u32 hw_read_intr_enable(void) { return hw_cread(CAP_USBINTR, ~0); } /** * hw_read_intr_status: returns interrupt status register * * This function returns register data */ static u32 hw_read_intr_status(void) { return hw_cread(CAP_USBSTS, ~0); } /** * hw_register_read: reads all device registers (execute without interruption) * @buf: destination buffer * @size: buffer size * * This function returns number of registers read */ static size_t hw_register_read(u32 *buf, size_t size) { unsigned i; if (size > hw_bank.size) size = hw_bank.size; for (i = 0; i < size; i++) buf[i] = hw_aread(i * sizeof(u32), ~0); return size; } /** * hw_register_write: writes to register * @addr: register address * @data: register value * * This function returns an error code */ static int hw_register_write(u16 addr, u32 data) { /* align */ addr /= sizeof(u32); if (addr >= hw_bank.size) return -EINVAL; /* align */ addr *= sizeof(u32); hw_awrite(addr, ~0, data); return 0; } /** * hw_test_and_clear_complete: test & clear complete status (execute without * interruption) * @n: bit number (endpoint) * * This function returns complete status */ static int hw_test_and_clear_complete(int n) { return hw_ctest_and_clear(CAP_ENDPTCOMPLETE, BIT(n)); } /** * hw_test_and_clear_intr_active: test & clear active interrupts (execute * without interruption) * * This function returns active interrutps */ static u32 hw_test_and_clear_intr_active(void) { u32 reg = hw_read_intr_status() & hw_read_intr_enable(); hw_cwrite(CAP_USBSTS, ~0, reg); return reg; } /** * hw_test_and_clear_setup_guard: test & clear setup guard (execute without * interruption) * * This function returns guard value */ static int hw_test_and_clear_setup_guard(void) { return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, 0); } /** * hw_test_and_set_setup_guard: test & set setup guard (execute without * interruption) * * This function returns guard value */ static int hw_test_and_set_setup_guard(void) { return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, USBCMD_SUTW); } /** * hw_usb_set_address: configures USB address (execute without interruption) * @value: new USB address * * This function returns an error code */ static int hw_usb_set_address(u8 value) { /* advance */ hw_cwrite(CAP_DEVICEADDR, DEVICEADDR_USBADR | DEVICEADDR_USBADRA, value << ffs_nr(DEVICEADDR_USBADR) | DEVICEADDR_USBADRA); return 0; } /** * hw_usb_reset: restart device after a bus reset (execute without * interruption) * * This function returns an error code */ static int hw_usb_reset(void) { hw_usb_set_address(0); /* ESS flushes only at end?!? */ hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0); /* flush all EPs */ /* clear setup token semaphores */ hw_cwrite(CAP_ENDPTSETUPSTAT, 0, 0); /* writes its content */ /* clear complete status */ hw_cwrite(CAP_ENDPTCOMPLETE, 0, 0); /* writes its content */ /* wait until all bits cleared */ while (hw_cread(CAP_ENDPTPRIME, ~0)) udelay(10); /* not RTOS friendly */ /* reset all endpoints ? */ /* reset internal status and wait for further instructions no need to verify the port reset status (ESS does it) */ return 0; } /****************************************************************************** * DBG block *****************************************************************************/ /** * show_device: prints information about device capabilities and status * * Check "device.h" for details */ static ssize_t show_device(struct device *dev, struct device_attribute *attr, char *buf) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); struct usb_gadget *gadget = &udc->gadget; int n = 0; dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } n += scnprintf(buf + n, PAGE_SIZE - n, "speed = %d\n", gadget->speed); n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed = %d\n", gadget->is_dualspeed); n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg = %d\n", gadget->is_otg); n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral = %d\n", gadget->is_a_peripheral); n += scnprintf(buf + n, PAGE_SIZE - n, "b_hnp_enable = %d\n", gadget->b_hnp_enable); n += scnprintf(buf + n, PAGE_SIZE - n, "a_hnp_support = %d\n", gadget->a_hnp_support); n += scnprintf(buf + n, PAGE_SIZE - n, "a_alt_hnp_support = %d\n", gadget->a_alt_hnp_support); n += scnprintf(buf + n, PAGE_SIZE - n, "name = %s\n", (gadget->name ? gadget->name : "")); return n; } static DEVICE_ATTR(device, S_IRUSR, show_device, NULL); /** * show_driver: prints information about attached gadget (if any) * * Check "device.h" for details */ static ssize_t show_driver(struct device *dev, struct device_attribute *attr, char *buf) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); struct usb_gadget_driver *driver = udc->driver; int n = 0; dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } if (driver == NULL) return scnprintf(buf, PAGE_SIZE, "There is no gadget attached!\n"); n += scnprintf(buf + n, PAGE_SIZE - n, "function = %s\n", (driver->function ? driver->function : "")); n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n", driver->speed); return n; } static DEVICE_ATTR(driver, S_IRUSR, show_driver, NULL); /* Maximum event message length */ #define DBG_DATA_MSG 64UL /* Maximum event messages */ #define DBG_DATA_MAX 128UL /* Event buffer descriptor */ static struct { char (buf[DBG_DATA_MAX])[DBG_DATA_MSG]; /* buffer */ unsigned idx; /* index */ unsigned tty; /* print to console? */ rwlock_t lck; /* lock */ } dbg_data = { .idx = 0, .tty = 0, .lck = __RW_LOCK_UNLOCKED(lck) }; /** * dbg_dec: decrements debug event index * @idx: buffer index */ static void dbg_dec(unsigned *idx) { *idx = (*idx - 1) & (DBG_DATA_MAX-1); } /** * dbg_inc: increments debug event index * @idx: buffer index */ static void dbg_inc(unsigned *idx) { *idx = (*idx + 1) & (DBG_DATA_MAX-1); } /** * dbg_print: prints the common part of the event * @addr: endpoint address * @name: event name * @status: status * @extra: extra information */ static void dbg_print(u8 addr, const char *name, int status, const char *extra) { struct timeval tval; unsigned int stamp; unsigned long flags; write_lock_irqsave(&dbg_data.lck, flags); do_gettimeofday(&tval); stamp = tval.tv_sec & 0xFFFF; /* 2^32 = 4294967296. Limit to 4096s */ stamp = stamp * 1000000 + tval.tv_usec; scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG, "%04X\t» %02X %-7.7s %4i «\t%s\n", stamp, addr, name, status, extra); dbg_inc(&dbg_data.idx); write_unlock_irqrestore(&dbg_data.lck, flags); if (dbg_data.tty != 0) pr_notice("%04X\t» %02X %-7.7s %4i «\t%s\n", stamp, addr, name, status, extra); } /** * dbg_done: prints a DONE event * @addr: endpoint address * @td: transfer descriptor * @status: status */ static void dbg_done(u8 addr, const u32 token, int status) { char msg[DBG_DATA_MSG]; scnprintf(msg, sizeof(msg), "%d %02X", (int)(token & TD_TOTAL_BYTES) >> ffs_nr(TD_TOTAL_BYTES), (int)(token & TD_STATUS) >> ffs_nr(TD_STATUS)); dbg_print(addr, "DONE", status, msg); } /** * dbg_event: prints a generic event * @addr: endpoint address * @name: event name * @status: status */ static void dbg_event(u8 addr, const char *name, int status) { if (name != NULL) dbg_print(addr, name, status, ""); } /* * dbg_queue: prints a QUEUE event * @addr: endpoint address * @req: USB request * @status: status */ static void dbg_queue(u8 addr, const struct usb_request *req, int status) { char msg[DBG_DATA_MSG]; if (req != NULL) { scnprintf(msg, sizeof(msg), "%d %d", !req->no_interrupt, req->length); dbg_print(addr, "QUEUE", status, msg); } } /** * dbg_setup: prints a SETUP event * @addr: endpoint address * @req: setup request */ static void dbg_setup(u8 addr, const struct usb_ctrlrequest *req) { char msg[DBG_DATA_MSG]; if (req != NULL) { scnprintf(msg, sizeof(msg), "%02X %02X %04X %04X %d", req->bRequestType, req->bRequest, le16_to_cpu(req->wValue), le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength)); dbg_print(addr, "SETUP", 0, msg); } } /** * show_events: displays the event buffer * * Check "device.h" for details */ static ssize_t show_events(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long flags; unsigned i, j, n = 0; dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } read_lock_irqsave(&dbg_data.lck, flags); i = dbg_data.idx; for (dbg_dec(&i); i != dbg_data.idx; dbg_dec(&i)) { n += strlen(dbg_data.buf[i]); if (n >= PAGE_SIZE) { n -= strlen(dbg_data.buf[i]); break; } } for (j = 0, dbg_inc(&i); j < n; dbg_inc(&i)) j += scnprintf(buf + j, PAGE_SIZE - j, "%s", dbg_data.buf[i]); read_unlock_irqrestore(&dbg_data.lck, flags); return n; } /** * store_events: configure if events are going to be also printed to console * * Check "device.h" for details */ static ssize_t store_events(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned tty; dbg_trace("[%s] %p, %d\n", __func__, buf, count); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); goto done; } if (sscanf(buf, "%u", &tty) != 1 || tty > 1) { dev_err(dev, "<1|0>: enable|disable console log\n"); goto done; } dbg_data.tty = tty; dev_info(dev, "tty = %u", dbg_data.tty); done: return count; } static DEVICE_ATTR(events, S_IRUSR | S_IWUSR, show_events, store_events); /** * show_inters: interrupt status, enable status and historic * * Check "device.h" for details */ static ssize_t show_inters(struct device *dev, struct device_attribute *attr, char *buf) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long flags; u32 intr; unsigned i, j, n = 0; dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } spin_lock_irqsave(udc->lock, flags); n += scnprintf(buf + n, PAGE_SIZE - n, "status = %08x\n", hw_read_intr_status()); n += scnprintf(buf + n, PAGE_SIZE - n, "enable = %08x\n", hw_read_intr_enable()); n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n", isr_statistics.test); n += scnprintf(buf + n, PAGE_SIZE - n, "» ui = %d\n", isr_statistics.ui); n += scnprintf(buf + n, PAGE_SIZE - n, "» uei = %d\n", isr_statistics.uei); n += scnprintf(buf + n, PAGE_SIZE - n, "» pci = %d\n", isr_statistics.pci); n += scnprintf(buf + n, PAGE_SIZE - n, "» uri = %d\n", isr_statistics.uri); n += scnprintf(buf + n, PAGE_SIZE - n, "» sli = %d\n", isr_statistics.sli); n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n", isr_statistics.none); n += scnprintf(buf + n, PAGE_SIZE - n, "*hndl = %d\n", isr_statistics.hndl.cnt); for (i = isr_statistics.hndl.idx, j = 0; j <= ISR_MASK; j++, i++) { i &= ISR_MASK; intr = isr_statistics.hndl.buf[i]; if (USBi_UI & intr) n += scnprintf(buf + n, PAGE_SIZE - n, "ui "); intr &= ~USBi_UI; if (USBi_UEI & intr) n += scnprintf(buf + n, PAGE_SIZE - n, "uei "); intr &= ~USBi_UEI; if (USBi_PCI & intr) n += scnprintf(buf + n, PAGE_SIZE - n, "pci "); intr &= ~USBi_PCI; if (USBi_URI & intr) n += scnprintf(buf + n, PAGE_SIZE - n, "uri "); intr &= ~USBi_URI; if (USBi_SLI & intr) n += scnprintf(buf + n, PAGE_SIZE - n, "sli "); intr &= ~USBi_SLI; if (intr) n += scnprintf(buf + n, PAGE_SIZE - n, "??? "); if (isr_statistics.hndl.buf[i]) n += scnprintf(buf + n, PAGE_SIZE - n, "\n"); } spin_unlock_irqrestore(udc->lock, flags); return n; } /** * store_inters: enable & force or disable an individual interrutps * (to be used for test purposes only) * * Check "device.h" for details */ static ssize_t store_inters(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long flags; unsigned en, bit; dbg_trace("[%s] %p, %d\n", __func__, buf, count); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); goto done; } if (sscanf(buf, "%u %u", &en, &bit) != 2 || en > 1) { dev_err(dev, "<1|0> <bit>: enable|disable interrupt"); goto done; } spin_lock_irqsave(udc->lock, flags); if (en) { if (hw_intr_force(bit)) dev_err(dev, "invalid bit number\n"); else isr_statistics.test++; } else { if (hw_intr_clear(bit)) dev_err(dev, "invalid bit number\n"); } spin_unlock_irqrestore(udc->lock, flags); done: return count; } static DEVICE_ATTR(inters, S_IRUSR | S_IWUSR, show_inters, store_inters); /** * show_port_test: reads port test mode * * Check "device.h" for details */ static ssize_t show_port_test(struct device *dev, struct device_attribute *attr, char *buf) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long flags; unsigned mode; dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } spin_lock_irqsave(udc->lock, flags); mode = hw_port_test_get(); spin_unlock_irqrestore(udc->lock, flags); return scnprintf(buf, PAGE_SIZE, "mode = %u\n", mode); } /** * store_port_test: writes port test mode * * Check "device.h" for details */ static ssize_t store_port_test(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long flags; unsigned mode; dbg_trace("[%s] %p, %d\n", __func__, buf, count); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); goto done; } if (sscanf(buf, "%u", &mode) != 1) { dev_err(dev, "<mode>: set port test mode"); goto done; } spin_lock_irqsave(udc->lock, flags); if (hw_port_test_set(mode)) dev_err(dev, "invalid mode\n"); spin_unlock_irqrestore(udc->lock, flags); done: return count; } static DEVICE_ATTR(port_test, S_IRUSR | S_IWUSR, show_port_test, store_port_test); /** * show_qheads: DMA contents of all queue heads * * Check "device.h" for details */ static ssize_t show_qheads(struct device *dev, struct device_attribute *attr, char *buf) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long flags; unsigned i, j, n = 0; dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } spin_lock_irqsave(udc->lock, flags); for (i = 0; i < hw_ep_max/2; i++) { struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i]; struct ci13xxx_ep *mEpTx = &udc->ci13xxx_ep[i + hw_ep_max/2]; n += scnprintf(buf + n, PAGE_SIZE - n, "EP=%02i: RX=%08X TX=%08X\n", i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma); for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) { n += scnprintf(buf + n, PAGE_SIZE - n, " %04X: %08X %08X\n", j, *((u32 *)mEpRx->qh.ptr + j), *((u32 *)mEpTx->qh.ptr + j)); } } spin_unlock_irqrestore(udc->lock, flags); return n; } static DEVICE_ATTR(qheads, S_IRUSR, show_qheads, NULL); /** * show_registers: dumps all registers * * Check "device.h" for details */ static ssize_t show_registers(struct device *dev, struct device_attribute *attr, char *buf) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long flags; u32 dump[512]; unsigned i, k, n = 0; dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } spin_lock_irqsave(udc->lock, flags); k = hw_register_read(dump, sizeof(dump)/sizeof(u32)); spin_unlock_irqrestore(udc->lock, flags); for (i = 0; i < k; i++) { n += scnprintf(buf + n, PAGE_SIZE - n, "reg[0x%04X] = 0x%08X\n", i * (unsigned)sizeof(u32), dump[i]); } return n; } /** * store_registers: writes value to register address * * Check "device.h" for details */ static ssize_t store_registers(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long addr, data, flags; dbg_trace("[%s] %p, %d\n", __func__, buf, count); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); goto done; } if (sscanf(buf, "%li %li", &addr, &data) != 2) { dev_err(dev, "<addr> <data>: write data to register address"); goto done; } spin_lock_irqsave(udc->lock, flags); if (hw_register_write(addr, data)) dev_err(dev, "invalid address range\n"); spin_unlock_irqrestore(udc->lock, flags); done: return count; } static DEVICE_ATTR(registers, S_IRUSR | S_IWUSR, show_registers, store_registers); /** * show_requests: DMA contents of all requests currently queued (all endpts) * * Check "device.h" for details */ static ssize_t show_requests(struct device *dev, struct device_attribute *attr, char *buf) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long flags; struct list_head *ptr = NULL; struct ci13xxx_req *req = NULL; unsigned i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32); dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } spin_lock_irqsave(udc->lock, flags); for (i = 0; i < hw_ep_max; i++) list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue) { req = list_entry(ptr, struct ci13xxx_req, queue); n += scnprintf(buf + n, PAGE_SIZE - n, "EP=%02i: TD=%08X %s\n", i % hw_ep_max/2, (u32)req->dma, ((i < hw_ep_max/2) ? "RX" : "TX")); for (j = 0; j < qSize; j++) n += scnprintf(buf + n, PAGE_SIZE - n, " %04X: %08X\n", j, *((u32 *)req->ptr + j)); } spin_unlock_irqrestore(udc->lock, flags); return n; } static DEVICE_ATTR(requests, S_IRUSR, show_requests, NULL); /** * dbg_create_files: initializes the attribute interface * @dev: device * * This function returns an error code */ __maybe_unused static int dbg_create_files(struct device *dev) { int retval = 0; if (dev == NULL) return -EINVAL; retval = device_create_file(dev, &dev_attr_device); if (retval) goto done; retval = device_create_file(dev, &dev_attr_driver); if (retval) goto rm_device; retval = device_create_file(dev, &dev_attr_events); if (retval) goto rm_driver; retval = device_create_file(dev, &dev_attr_inters); if (retval) goto rm_events; retval = device_create_file(dev, &dev_attr_port_test); if (retval) goto rm_inters; retval = device_create_file(dev, &dev_attr_qheads); if (retval) goto rm_port_test; retval = device_create_file(dev, &dev_attr_registers); if (retval) goto rm_qheads; retval = device_create_file(dev, &dev_attr_requests); if (retval) goto rm_registers; return 0; rm_registers: device_remove_file(dev, &dev_attr_registers); rm_qheads: device_remove_file(dev, &dev_attr_qheads); rm_port_test: device_remove_file(dev, &dev_attr_port_test); rm_inters: device_remove_file(dev, &dev_attr_inters); rm_events: device_remove_file(dev, &dev_attr_events); rm_driver: device_remove_file(dev, &dev_attr_driver); rm_device: device_remove_file(dev, &dev_attr_device); done: return retval; } /** * dbg_remove_files: destroys the attribute interface * @dev: device * * This function returns an error code */ __maybe_unused static int dbg_remove_files(struct device *dev) { if (dev == NULL) return -EINVAL; device_remove_file(dev, &dev_attr_requests); device_remove_file(dev, &dev_attr_registers); device_remove_file(dev, &dev_attr_qheads); device_remove_file(dev, &dev_attr_port_test); device_remove_file(dev, &dev_attr_inters); device_remove_file(dev, &dev_attr_events); device_remove_file(dev, &dev_attr_driver); device_remove_file(dev, &dev_attr_device); return 0; } /****************************************************************************** * UTIL block *****************************************************************************/ /** * _usb_addr: calculates endpoint address from direction & number * @ep: endpoint */ static inline u8 _usb_addr(struct ci13xxx_ep *ep) { return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num; } /** * _hardware_queue: configures a request at hardware level * @gadget: gadget * @mEp: endpoint * * This function returns an error code */ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) { unsigned i; int ret = 0; unsigned length = mReq->req.length; trace("%p, %p", mEp, mReq); /* don't queue twice */ if (mReq->req.status == -EALREADY) return -EALREADY; mReq->req.status = -EALREADY; if (length && !mReq->req.dma) { mReq->req.dma = \ dma_map_single(mEp->device, mReq->req.buf, length, mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (mReq->req.dma == 0) return -ENOMEM; mReq->map = 1; } if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) { mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC, &mReq->zdma); if (mReq->zptr == NULL) { if (mReq->map) { dma_unmap_single(mEp->device, mReq->req.dma, length, mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); mReq->req.dma = 0; mReq->map = 0; } return -ENOMEM; } memset(mReq->zptr, 0, sizeof(*mReq->zptr)); mReq->zptr->next = TD_TERMINATE; mReq->zptr->token = TD_STATUS_ACTIVE; if (!mReq->req.no_interrupt) mReq->zptr->token |= TD_IOC; } /* * TD configuration * TODO - handle requests which spawns into several TDs */ memset(mReq->ptr, 0, sizeof(*mReq->ptr)); mReq->ptr->token = length << ffs_nr(TD_TOTAL_BYTES); mReq->ptr->token &= TD_TOTAL_BYTES; mReq->ptr->token |= TD_STATUS_ACTIVE; if (mReq->zptr) { mReq->ptr->next = mReq->zdma; } else { mReq->ptr->next = TD_TERMINATE; if (!mReq->req.no_interrupt) mReq->ptr->token |= TD_IOC; } mReq->ptr->page[0] = mReq->req.dma; for (i = 1; i < 5; i++) mReq->ptr->page[i] = (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK; if (!list_empty(&mEp->qh.queue)) { struct ci13xxx_req *mReqPrev; int n = hw_ep_bit(mEp->num, mEp->dir); int tmp_stat; mReqPrev = list_entry(mEp->qh.queue.prev, struct ci13xxx_req, queue); if (mReqPrev->zptr) mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK; else mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK; wmb(); if (hw_cread(CAP_ENDPTPRIME, BIT(n))) goto done; do { hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW); tmp_stat = hw_cread(CAP_ENDPTSTAT, BIT(n)); } while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW)); hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, 0); if (tmp_stat) goto done; } /* QH configuration */ mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */ mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */ mEp->qh.ptr->cap |= QH_ZLT; wmb(); /* synchronize before ep prime */ ret = hw_ep_prime(mEp->num, mEp->dir, mEp->type == USB_ENDPOINT_XFER_CONTROL); done: return ret; } /** * _hardware_dequeue: handles a request at hardware level * @gadget: gadget * @mEp: endpoint * * This function returns an error code */ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) { trace("%p, %p", mEp, mReq); if (mReq->req.status != -EALREADY) return -EINVAL; if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0) return -EBUSY; if (mReq->zptr) { if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0) return -EBUSY; dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma); mReq->zptr = NULL; } mReq->req.status = 0; if (mReq->map) { dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); mReq->req.dma = 0; mReq->map = 0; } mReq->req.status = mReq->ptr->token & TD_STATUS; if ((TD_STATUS_HALTED & mReq->req.status) != 0) mReq->req.status = -1; else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0) mReq->req.status = -1; else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0) mReq->req.status = -1; mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES; mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES); mReq->req.actual = mReq->req.length - mReq->req.actual; mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual; return mReq->req.actual; } /** * _ep_nuke: dequeues all endpoint requests * @mEp: endpoint * * This function returns an error code * Caller must hold lock */ static int _ep_nuke(struct ci13xxx_ep *mEp) __releases(mEp->lock) __acquires(mEp->lock) { trace("%p", mEp); if (mEp == NULL) return -EINVAL; hw_ep_flush(mEp->num, mEp->dir); while (!list_empty(&mEp->qh.queue)) { /* pop oldest request */ struct ci13xxx_req *mReq = \ list_entry(mEp->qh.queue.next, struct ci13xxx_req, queue); list_del_init(&mReq->queue); mReq->req.status = -ESHUTDOWN; if (mReq->req.complete != NULL) { spin_unlock(mEp->lock); mReq->req.complete(&mEp->ep, &mReq->req); spin_lock(mEp->lock); } } return 0; } /** * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts * @gadget: gadget * * This function returns an error code * Caller must hold lock */ static int _gadget_stop_activity(struct usb_gadget *gadget) { struct usb_ep *ep; struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget); unsigned long flags; trace("%p", gadget); if (gadget == NULL) return -EINVAL; spin_lock_irqsave(udc->lock, flags); udc->gadget.speed = USB_SPEED_UNKNOWN; udc->remote_wakeup = 0; udc->suspended = 0; spin_unlock_irqrestore(udc->lock, flags); /* flush all endpoints */ gadget_for_each_ep(ep, gadget) { usb_ep_fifo_flush(ep); } usb_ep_fifo_flush(&udc->ep0out.ep); usb_ep_fifo_flush(&udc->ep0in.ep); udc->driver->disconnect(gadget); /* make sure to disable all endpoints */ gadget_for_each_ep(ep, gadget) { usb_ep_disable(ep); } if (udc->status != NULL) { usb_ep_free_request(&udc->ep0in.ep, udc->status); udc->status = NULL; } return 0; } /****************************************************************************** * ISR block *****************************************************************************/ /** * isr_reset_handler: USB reset interrupt handler * @udc: UDC device * * This function resets USB engine after a bus reset occurred */ static void isr_reset_handler(struct ci13xxx *udc) __releases(udc->lock) __acquires(udc->lock) { int retval; trace("%p", udc); if (udc == NULL) { err("EINVAL"); return; } dbg_event(0xFF, "BUS RST", 0); spin_unlock(udc->lock); retval = _gadget_stop_activity(&udc->gadget); if (retval) goto done; retval = hw_usb_reset(); if (retval) goto done; udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_ATOMIC); if (udc->status == NULL) retval = -ENOMEM; spin_lock(udc->lock); done: if (retval) err("error: %i", retval); } /** * isr_get_status_complete: get_status request complete function * @ep: endpoint * @req: request handled * * Caller must release lock */ static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req) { trace("%p, %p", ep, req); if (ep == NULL || req == NULL) { err("EINVAL"); return; } kfree(req->buf); usb_ep_free_request(ep, req); } /** * isr_get_status_response: get_status request response * @udc: udc struct * @setup: setup request packet * * This function returns an error code */ static int isr_get_status_response(struct ci13xxx *udc, struct usb_ctrlrequest *setup) __releases(mEp->lock) __acquires(mEp->lock) { struct ci13xxx_ep *mEp = &udc->ep0in; struct usb_request *req = NULL; gfp_t gfp_flags = GFP_ATOMIC; int dir, num, retval; trace("%p, %p", mEp, setup); if (mEp == NULL || setup == NULL) return -EINVAL; spin_unlock(mEp->lock); req = usb_ep_alloc_request(&mEp->ep, gfp_flags); spin_lock(mEp->lock); if (req == NULL) return -ENOMEM; req->complete = isr_get_status_complete; req->length = 2; req->buf = kzalloc(req->length, gfp_flags); if (req->buf == NULL) { retval = -ENOMEM; goto err_free_req; } if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) { /* Assume that device is bus powered for now. */ *((u16 *)req->buf) = _udc->remote_wakeup << 1; retval = 0; } else if ((setup->bRequestType & USB_RECIP_MASK) \ == USB_RECIP_ENDPOINT) { dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ? TX : RX; num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK; *((u16 *)req->buf) = hw_ep_get_halt(num, dir); } /* else do nothing; reserved for future use */ spin_unlock(mEp->lock); retval = usb_ep_queue(&mEp->ep, req, gfp_flags); spin_lock(mEp->lock); if (retval) goto err_free_buf; return 0; err_free_buf: kfree(req->buf); err_free_req: spin_unlock(mEp->lock); usb_ep_free_request(&mEp->ep, req); spin_lock(mEp->lock); return retval; } /** * isr_setup_status_complete: setup_status request complete function * @ep: endpoint * @req: request handled * * Caller must release lock. Put the port in test mode if test mode * feature is selected. */ static void isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req) { struct ci13xxx *udc = req->context; unsigned long flags; trace("%p, %p", ep, req); spin_lock_irqsave(udc->lock, flags); if (udc->test_mode) hw_port_test_set(udc->test_mode); spin_unlock_irqrestore(udc->lock, flags); } /** * isr_setup_status_phase: queues the status phase of a setup transation * @udc: udc struct * * This function returns an error code */ static int isr_setup_status_phase(struct ci13xxx *udc) __releases(mEp->lock) __acquires(mEp->lock) { int retval; struct ci13xxx_ep *mEp; trace("%p", udc); mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in; udc->status->context = udc; udc->status->complete = isr_setup_status_complete; spin_unlock(mEp->lock); retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC); spin_lock(mEp->lock); return retval; } /** * isr_tr_complete_low: transaction complete low level handler * @mEp: endpoint * * This function returns an error code * Caller must hold lock */ static int isr_tr_complete_low(struct ci13xxx_ep *mEp) __releases(mEp->lock) __acquires(mEp->lock) { struct ci13xxx_req *mReq, *mReqTemp; struct ci13xxx_ep *mEpTemp = mEp; int uninitialized_var(retval); trace("%p", mEp); if (list_empty(&mEp->qh.queue)) return -EINVAL; list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue, queue) { retval = _hardware_dequeue(mEp, mReq); if (retval < 0) break; list_del_init(&mReq->queue); dbg_done(_usb_addr(mEp), mReq->ptr->token, retval); if (mReq->req.complete != NULL) { spin_unlock(mEp->lock); if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) && mReq->req.length) mEpTemp = &_udc->ep0in; mReq->req.complete(&mEpTemp->ep, &mReq->req); spin_lock(mEp->lock); } } if (retval == -EBUSY) retval = 0; if (retval < 0) dbg_event(_usb_addr(mEp), "DONE", retval); return retval; } /** * isr_tr_complete_handler: transaction complete interrupt handler * @udc: UDC descriptor * * This function handles traffic events */ static void isr_tr_complete_handler(struct ci13xxx *udc) __releases(udc->lock) __acquires(udc->lock) { unsigned i; u8 tmode = 0; trace("%p", udc); if (udc == NULL) { err("EINVAL"); return; } for (i = 0; i < hw_ep_max; i++) { struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i]; int type, num, dir, err = -EINVAL; struct usb_ctrlrequest req; if (mEp->desc == NULL) continue; /* not configured */ if (hw_test_and_clear_complete(i)) { err = isr_tr_complete_low(mEp); if (mEp->type == USB_ENDPOINT_XFER_CONTROL) { if (err > 0) /* needs status phase */ err = isr_setup_status_phase(udc); if (err < 0) { dbg_event(_usb_addr(mEp), "ERROR", err); spin_unlock(udc->lock); if (usb_ep_set_halt(&mEp->ep)) err("error: ep_set_halt"); spin_lock(udc->lock); } } } if (mEp->type != USB_ENDPOINT_XFER_CONTROL || !hw_test_and_clear_setup_status(i)) continue; if (i != 0) { warn("ctrl traffic received at endpoint"); continue; } /* * Flush data and handshake transactions of previous * setup packet. */ _ep_nuke(&udc->ep0out); _ep_nuke(&udc->ep0in); /* read_setup_packet */ do { hw_test_and_set_setup_guard(); memcpy(&req, &mEp->qh.ptr->setup, sizeof(req)); } while (!hw_test_and_clear_setup_guard()); type = req.bRequestType; udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX; dbg_setup(_usb_addr(mEp), &req); switch (req.bRequest) { case USB_REQ_CLEAR_FEATURE: if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) && le16_to_cpu(req.wValue) == USB_ENDPOINT_HALT) { if (req.wLength != 0) break; num = le16_to_cpu(req.wIndex); dir = num & USB_ENDPOINT_DIR_MASK; num &= USB_ENDPOINT_NUMBER_MASK; if (dir) /* TX */ num += hw_ep_max/2; if (!udc->ci13xxx_ep[num].wedge) { spin_unlock(udc->lock); err = usb_ep_clear_halt( &udc->ci13xxx_ep[num].ep); spin_lock(udc->lock); if (err) break; } err = isr_setup_status_phase(udc); } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) && le16_to_cpu(req.wValue) == USB_DEVICE_REMOTE_WAKEUP) { if (req.wLength != 0) break; udc->remote_wakeup = 0; err = isr_setup_status_phase(udc); } else { goto delegate; } break; case USB_REQ_GET_STATUS: if (type != (USB_DIR_IN|USB_RECIP_DEVICE) && type != (USB_DIR_IN|USB_RECIP_ENDPOINT) && type != (USB_DIR_IN|USB_RECIP_INTERFACE)) goto delegate; if (le16_to_cpu(req.wLength) != 2 || le16_to_cpu(req.wValue) != 0) break; err = isr_get_status_response(udc, &req); break; case USB_REQ_SET_ADDRESS: if (type != (USB_DIR_OUT|USB_RECIP_DEVICE)) goto delegate; if (le16_to_cpu(req.wLength) != 0 || le16_to_cpu(req.wIndex) != 0) break; err = hw_usb_set_address((u8)le16_to_cpu(req.wValue)); if (err) break; err = isr_setup_status_phase(udc); break; case USB_REQ_SET_FEATURE: if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) && le16_to_cpu(req.wValue) == USB_ENDPOINT_HALT) { if (req.wLength != 0) break; num = le16_to_cpu(req.wIndex); dir = num & USB_ENDPOINT_DIR_MASK; num &= USB_ENDPOINT_NUMBER_MASK; if (dir) /* TX */ num += hw_ep_max/2; spin_unlock(udc->lock); err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep); spin_lock(udc->lock); if (!err) isr_setup_status_phase(udc); } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) { if (req.wLength != 0) break; switch (le16_to_cpu(req.wValue)) { case USB_DEVICE_REMOTE_WAKEUP: udc->remote_wakeup = 1; err = isr_setup_status_phase(udc); break; case USB_DEVICE_TEST_MODE: tmode = le16_to_cpu(req.wIndex) >> 8; switch (tmode) { case TEST_J: case TEST_K: case TEST_SE0_NAK: case TEST_PACKET: case TEST_FORCE_EN: udc->test_mode = tmode; err = isr_setup_status_phase( udc); break; default: break; } default: goto delegate; } } else { goto delegate; } break; default: delegate: if (req.wLength == 0) /* no data phase */ udc->ep0_dir = TX; spin_unlock(udc->lock); err = udc->driver->setup(&udc->gadget, &req); spin_lock(udc->lock); break; } if (err < 0) { dbg_event(_usb_addr(mEp), "ERROR", err); spin_unlock(udc->lock); if (usb_ep_set_halt(&mEp->ep)) err("error: ep_set_halt"); spin_lock(udc->lock); } } } /****************************************************************************** * ENDPT block *****************************************************************************/ /** * ep_enable: configure endpoint, making it usable * * Check usb_ep_enable() at "usb_gadget.h" for details */ static int ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); int retval = 0; unsigned long flags; trace("%p, %p", ep, desc); if (ep == NULL || desc == NULL) return -EINVAL; spin_lock_irqsave(mEp->lock, flags); /* only internal SW should enable ctrl endpts */ mEp->desc = desc; if (!list_empty(&mEp->qh.queue)) warn("enabling a non-empty endpoint!"); mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX; mEp->num = usb_endpoint_num(desc); mEp->type = usb_endpoint_type(desc); mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize); dbg_event(_usb_addr(mEp), "ENABLE", 0); mEp->qh.ptr->cap = 0; if (mEp->type == USB_ENDPOINT_XFER_CONTROL) mEp->qh.ptr->cap |= QH_IOS; else if (mEp->type == USB_ENDPOINT_XFER_ISOC) mEp->qh.ptr->cap &= ~QH_MULT; else mEp->qh.ptr->cap &= ~QH_ZLT; mEp->qh.ptr->cap |= (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT; mEp->qh.ptr->td.next |= TD_TERMINATE; /* needed? */ /* * Enable endpoints in the HW other than ep0 as ep0 * is always enabled */ if (mEp->num) retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type); spin_unlock_irqrestore(mEp->lock, flags); return retval; } /** * ep_disable: endpoint is no longer usable * * Check usb_ep_disable() at "usb_gadget.h" for details */ static int ep_disable(struct usb_ep *ep) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); int direction, retval = 0; unsigned long flags; trace("%p", ep); if (ep == NULL) return -EINVAL; else if (mEp->desc == NULL) return -EBUSY; spin_lock_irqsave(mEp->lock, flags); /* only internal SW should disable ctrl endpts */ direction = mEp->dir; do { dbg_event(_usb_addr(mEp), "DISABLE", 0); retval |= _ep_nuke(mEp); retval |= hw_ep_disable(mEp->num, mEp->dir); if (mEp->type == USB_ENDPOINT_XFER_CONTROL) mEp->dir = (mEp->dir == TX) ? RX : TX; } while (mEp->dir != direction); mEp->desc = NULL; spin_unlock_irqrestore(mEp->lock, flags); return retval; } /** * ep_alloc_request: allocate a request object to use with this endpoint * * Check usb_ep_alloc_request() at "usb_gadget.h" for details */ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); struct ci13xxx_req *mReq = NULL; trace("%p, %i", ep, gfp_flags); if (ep == NULL) { err("EINVAL"); return NULL; } mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags); if (mReq != NULL) { INIT_LIST_HEAD(&mReq->queue); mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags, &mReq->dma); if (mReq->ptr == NULL) { kfree(mReq); mReq = NULL; } } dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL); return (mReq == NULL) ? NULL : &mReq->req; } /** * ep_free_request: frees a request object * * Check usb_ep_free_request() at "usb_gadget.h" for details */ static void ep_free_request(struct usb_ep *ep, struct usb_request *req) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req); unsigned long flags; trace("%p, %p", ep, req); if (ep == NULL || req == NULL) { err("EINVAL"); return; } else if (!list_empty(&mReq->queue)) { err("EBUSY"); return; } spin_lock_irqsave(mEp->lock, flags); if (mReq->ptr) dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma); kfree(mReq); dbg_event(_usb_addr(mEp), "FREE", 0); spin_unlock_irqrestore(mEp->lock, flags); } /** * ep_queue: queues (submits) an I/O request to an endpoint * * Check usb_ep_queue()* at usb_gadget.h" for details */ static int ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t __maybe_unused gfp_flags) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req); int retval = 0; unsigned long flags; trace("%p, %p, %X", ep, req, gfp_flags); if (ep == NULL || req == NULL || mEp->desc == NULL) return -EINVAL; spin_lock_irqsave(mEp->lock, flags); if (mEp->type == USB_ENDPOINT_XFER_CONTROL) { if (req->length) mEp = (_udc->ep0_dir == RX) ? &_udc->ep0out : &_udc->ep0in; if (!list_empty(&mEp->qh.queue)) { _ep_nuke(mEp); retval = -EOVERFLOW; warn("endpoint ctrl %X nuked", _usb_addr(mEp)); } } /* first nuke then test link, e.g. previous status has not sent */ if (!list_empty(&mReq->queue)) { retval = -EBUSY; err("request already in queue"); goto done; } if (req->length > (4 * CI13XXX_PAGE_SIZE)) { req->length = (4 * CI13XXX_PAGE_SIZE); retval = -EMSGSIZE; warn("request length truncated"); } dbg_queue(_usb_addr(mEp), req, retval); /* push request */ mReq->req.status = -EINPROGRESS; mReq->req.actual = 0; retval = _hardware_enqueue(mEp, mReq); if (retval == -EALREADY) { dbg_event(_usb_addr(mEp), "QUEUE", retval); retval = 0; } if (!retval) list_add_tail(&mReq->queue, &mEp->qh.queue); done: spin_unlock_irqrestore(mEp->lock, flags); return retval; } /** * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint * * Check usb_ep_dequeue() at "usb_gadget.h" for details */ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req); unsigned long flags; trace("%p, %p", ep, req); if (ep == NULL || req == NULL || mReq->req.status != -EALREADY || mEp->desc == NULL || list_empty(&mReq->queue) || list_empty(&mEp->qh.queue)) return -EINVAL; spin_lock_irqsave(mEp->lock, flags); dbg_event(_usb_addr(mEp), "DEQUEUE", 0); hw_ep_flush(mEp->num, mEp->dir); /* pop request */ list_del_init(&mReq->queue); if (mReq->map) { dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); mReq->req.dma = 0; mReq->map = 0; } req->status = -ECONNRESET; if (mReq->req.complete != NULL) { spin_unlock(mEp->lock); mReq->req.complete(&mEp->ep, &mReq->req); spin_lock(mEp->lock); } spin_unlock_irqrestore(mEp->lock, flags); return 0; } /** * ep_set_halt: sets the endpoint halt feature * * Check usb_ep_set_halt() at "usb_gadget.h" for details */ static int ep_set_halt(struct usb_ep *ep, int value) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); int direction, retval = 0; unsigned long flags; trace("%p, %i", ep, value); if (ep == NULL || mEp->desc == NULL) return -EINVAL; spin_lock_irqsave(mEp->lock, flags); #ifndef STALL_IN /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */ if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX && !list_empty(&mEp->qh.queue)) { spin_unlock_irqrestore(mEp->lock, flags); return -EAGAIN; } #endif direction = mEp->dir; do { dbg_event(_usb_addr(mEp), "HALT", value); retval |= hw_ep_set_halt(mEp->num, mEp->dir, value); if (!value) mEp->wedge = 0; if (mEp->type == USB_ENDPOINT_XFER_CONTROL) mEp->dir = (mEp->dir == TX) ? RX : TX; } while (mEp->dir != direction); spin_unlock_irqrestore(mEp->lock, flags); return retval; } /** * ep_set_wedge: sets the halt feature and ignores clear requests * * Check usb_ep_set_wedge() at "usb_gadget.h" for details */ static int ep_set_wedge(struct usb_ep *ep) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); unsigned long flags; trace("%p", ep); if (ep == NULL || mEp->desc == NULL) return -EINVAL; spin_lock_irqsave(mEp->lock, flags); dbg_event(_usb_addr(mEp), "WEDGE", 0); mEp->wedge = 1; spin_unlock_irqrestore(mEp->lock, flags); return usb_ep_set_halt(ep); } /** * ep_fifo_flush: flushes contents of a fifo * * Check usb_ep_fifo_flush() at "usb_gadget.h" for details */ static void ep_fifo_flush(struct usb_ep *ep) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); unsigned long flags; trace("%p", ep); if (ep == NULL) { err("%02X: -EINVAL", _usb_addr(mEp)); return; } spin_lock_irqsave(mEp->lock, flags); dbg_event(_usb_addr(mEp), "FFLUSH", 0); hw_ep_flush(mEp->num, mEp->dir); spin_unlock_irqrestore(mEp->lock, flags); } /** * Endpoint-specific part of the API to the USB controller hardware * Check "usb_gadget.h" for details */ static const struct usb_ep_ops usb_ep_ops = { .enable = ep_enable, .disable = ep_disable, .alloc_request = ep_alloc_request, .free_request = ep_free_request, .queue = ep_queue, .dequeue = ep_dequeue, .set_halt = ep_set_halt, .set_wedge = ep_set_wedge, .fifo_flush = ep_fifo_flush, }; /****************************************************************************** * GADGET block *****************************************************************************/ static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active) { struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget); unsigned long flags; int gadget_ready = 0; if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS)) return -EOPNOTSUPP; spin_lock_irqsave(udc->lock, flags); udc->vbus_active = is_active; if (udc->driver) gadget_ready = 1; spin_unlock_irqrestore(udc->lock, flags); if (gadget_ready) { if (is_active) { pm_runtime_get_sync(&_gadget->dev); hw_device_reset(udc); hw_device_state(udc->ep0out.qh.dma); } else { hw_device_state(0); if (udc->udc_driver->notify_event) udc->udc_driver->notify_event(udc, CI13XXX_CONTROLLER_STOPPED_EVENT); _gadget_stop_activity(&udc->gadget); pm_runtime_put_sync(&_gadget->dev); } } return 0; } static int ci13xxx_wakeup(struct usb_gadget *_gadget) { struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget); unsigned long flags; int ret = 0; trace(); spin_lock_irqsave(udc->lock, flags); if (!udc->remote_wakeup) { ret = -EOPNOTSUPP; dbg_trace("remote wakeup feature is not enabled\n"); goto out; } if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) { ret = -EINVAL; dbg_trace("port is not suspended\n"); goto out; } hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR); out: spin_unlock_irqrestore(udc->lock, flags); return ret; } static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA) { struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget); if (udc->transceiver) return otg_set_power(udc->transceiver, mA); return -ENOTSUPP; } /** * Device operations part of the API to the USB controller hardware, * which don't involve endpoints (or i/o) * Check "usb_gadget.h" for details */ static const struct usb_gadget_ops usb_gadget_ops = { .vbus_session = ci13xxx_vbus_session, .wakeup = ci13xxx_wakeup, .vbus_draw = ci13xxx_vbus_draw, }; /** * usb_gadget_probe_driver: register a gadget driver * @driver: the driver being registered * @bind: the driver's bind callback * * Check usb_gadget_probe_driver() at <linux/usb/gadget.h> for details. * Interrupts are enabled here. */ int usb_gadget_probe_driver(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)) { struct ci13xxx *udc = _udc; unsigned long flags; int i, j; int retval = -ENOMEM; trace("%p", driver); if (driver == NULL || bind == NULL || driver->setup == NULL || driver->disconnect == NULL || driver->suspend == NULL || driver->resume == NULL) return -EINVAL; else if (udc == NULL) return -ENODEV; else if (udc->driver != NULL) return -EBUSY; /* alloc resources */ udc->qh_pool = dma_pool_create("ci13xxx_qh", &udc->gadget.dev, sizeof(struct ci13xxx_qh), 64, CI13XXX_PAGE_SIZE); if (udc->qh_pool == NULL) return -ENOMEM; udc->td_pool = dma_pool_create("ci13xxx_td", &udc->gadget.dev, sizeof(struct ci13xxx_td), 64, CI13XXX_PAGE_SIZE); if (udc->td_pool == NULL) { dma_pool_destroy(udc->qh_pool); udc->qh_pool = NULL; return -ENOMEM; } spin_lock_irqsave(udc->lock, flags); info("hw_ep_max = %d", hw_ep_max); udc->gadget.dev.driver = NULL; retval = 0; for (i = 0; i < hw_ep_max/2; i++) { for (j = RX; j <= TX; j++) { int k = i + j * hw_ep_max/2; struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k]; scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i, (j == TX) ? "in" : "out"); mEp->lock = udc->lock; mEp->device = &udc->gadget.dev; mEp->td_pool = udc->td_pool; mEp->ep.name = mEp->name; mEp->ep.ops = &usb_ep_ops; mEp->ep.maxpacket = CTRL_PAYLOAD_MAX; INIT_LIST_HEAD(&mEp->qh.queue); spin_unlock_irqrestore(udc->lock, flags); mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL, &mEp->qh.dma); spin_lock_irqsave(udc->lock, flags); if (mEp->qh.ptr == NULL) retval = -ENOMEM; else memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr)); /* skip ep0 out and in endpoints */ if (i == 0) continue; list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list); } } if (retval) goto done; spin_unlock_irqrestore(udc->lock, flags); retval = usb_ep_enable(&udc->ep0out.ep, &ctrl_endpt_out_desc); if (retval) return retval; retval = usb_ep_enable(&udc->ep0in.ep, &ctrl_endpt_in_desc); if (retval) return retval; spin_lock_irqsave(udc->lock, flags); udc->gadget.ep0 = &udc->ep0in.ep; /* bind gadget */ driver->driver.bus = NULL; udc->gadget.dev.driver = &driver->driver; spin_unlock_irqrestore(udc->lock, flags); retval = bind(&udc->gadget); /* MAY SLEEP */ spin_lock_irqsave(udc->lock, flags); if (retval) { udc->gadget.dev.driver = NULL; goto done; } udc->driver = driver; pm_runtime_get_sync(&udc->gadget.dev); if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) { if (udc->vbus_active) { if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) hw_device_reset(udc); } else { pm_runtime_put_sync(&udc->gadget.dev); goto done; } } retval = hw_device_state(udc->ep0out.qh.dma); if (retval) pm_runtime_put_sync(&udc->gadget.dev); done: spin_unlock_irqrestore(udc->lock, flags); return retval; } EXPORT_SYMBOL(usb_gadget_probe_driver); /** * usb_gadget_unregister_driver: unregister a gadget driver * * Check usb_gadget_unregister_driver() at "usb_gadget.h" for details */ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) { struct ci13xxx *udc = _udc; unsigned long i, flags; trace("%p", driver); if (driver == NULL || driver->unbind == NULL || driver->setup == NULL || driver->disconnect == NULL || driver->suspend == NULL || driver->resume == NULL || driver != udc->driver) return -EINVAL; spin_lock_irqsave(udc->lock, flags); if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) || udc->vbus_active) { hw_device_state(0); if (udc->udc_driver->notify_event) udc->udc_driver->notify_event(udc, CI13XXX_CONTROLLER_STOPPED_EVENT); _gadget_stop_activity(&udc->gadget); pm_runtime_put(&udc->gadget.dev); } /* unbind gadget */ spin_unlock_irqrestore(udc->lock, flags); driver->unbind(&udc->gadget); /* MAY SLEEP */ spin_lock_irqsave(udc->lock, flags); udc->gadget.dev.driver = NULL; /* free resources */ for (i = 0; i < hw_ep_max; i++) { struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i]; if (!list_empty(&mEp->ep.ep_list)) list_del_init(&mEp->ep.ep_list); if (mEp->qh.ptr != NULL) dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma); } udc->gadget.ep0 = NULL; udc->driver = NULL; spin_unlock_irqrestore(udc->lock, flags); if (udc->td_pool != NULL) { dma_pool_destroy(udc->td_pool); udc->td_pool = NULL; } if (udc->qh_pool != NULL) { dma_pool_destroy(udc->qh_pool); udc->qh_pool = NULL; } return 0; } EXPORT_SYMBOL(usb_gadget_unregister_driver); /****************************************************************************** * BUS block *****************************************************************************/ /** * udc_irq: global interrupt handler * * This function returns IRQ_HANDLED if the IRQ has been handled * It locks access to registers */ static irqreturn_t udc_irq(void) { struct ci13xxx *udc = _udc; irqreturn_t retval; u32 intr; trace(); if (udc == NULL) { err("ENODEV"); return IRQ_HANDLED; } spin_lock(udc->lock); if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) { if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) { spin_unlock(udc->lock); return IRQ_NONE; } } intr = hw_test_and_clear_intr_active(); if (intr) { isr_statistics.hndl.buf[isr_statistics.hndl.idx++] = intr; isr_statistics.hndl.idx &= ISR_MASK; isr_statistics.hndl.cnt++; /* order defines priority - do NOT change it */ if (USBi_URI & intr) { isr_statistics.uri++; isr_reset_handler(udc); } if (USBi_PCI & intr) { isr_statistics.pci++; udc->gadget.speed = hw_port_is_high_speed() ? USB_SPEED_HIGH : USB_SPEED_FULL; if (udc->suspended) { spin_unlock(udc->lock); udc->driver->resume(&udc->gadget); spin_lock(udc->lock); udc->suspended = 0; } } if (USBi_UEI & intr) isr_statistics.uei++; if (USBi_UI & intr) { isr_statistics.ui++; isr_tr_complete_handler(udc); } if (USBi_SLI & intr) { if (udc->gadget.speed != USB_SPEED_UNKNOWN) { udc->suspended = 1; spin_unlock(udc->lock); udc->driver->suspend(&udc->gadget); spin_lock(udc->lock); } isr_statistics.sli++; } retval = IRQ_HANDLED; } else { isr_statistics.none++; retval = IRQ_NONE; } spin_unlock(udc->lock); return retval; } /** * udc_release: driver release function * @dev: device * * Currently does nothing */ static void udc_release(struct device *dev) { trace("%p", dev); if (dev == NULL) err("EINVAL"); } /** * udc_probe: parent probe must call this to initialize UDC * @dev: parent device * @regs: registers base address * @name: driver name * * This function returns an error code * No interrupts active, the IRQ has not been requested yet * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask */ static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev, void __iomem *regs) { struct ci13xxx *udc; int retval = 0; trace("%p, %p, %p", dev, regs, name); if (dev == NULL || regs == NULL || driver == NULL || driver->name == NULL) return -EINVAL; udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL); if (udc == NULL) return -ENOMEM; udc->lock = &udc_lock; udc->regs = regs; udc->udc_driver = driver; udc->gadget.ops = &usb_gadget_ops; udc->gadget.speed = USB_SPEED_UNKNOWN; udc->gadget.is_dualspeed = 1; udc->gadget.is_otg = 0; udc->gadget.name = driver->name; INIT_LIST_HEAD(&udc->gadget.ep_list); udc->gadget.ep0 = NULL; dev_set_name(&udc->gadget.dev, "gadget"); udc->gadget.dev.dma_mask = dev->dma_mask; udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask; udc->gadget.dev.parent = dev; udc->gadget.dev.release = udc_release; retval = hw_device_init(regs); if (retval < 0) goto free_udc; udc->transceiver = otg_get_transceiver(); if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) { if (udc->transceiver == NULL) { retval = -ENODEV; goto free_udc; } } if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) { retval = hw_device_reset(udc); if (retval) goto put_transceiver; } retval = device_register(&udc->gadget.dev); if (retval) { put_device(&udc->gadget.dev); goto put_transceiver; } #ifdef CONFIG_USB_GADGET_DEBUG_FILES retval = dbg_create_files(&udc->gadget.dev); #endif if (retval) goto unreg_device; if (udc->transceiver) { retval = otg_set_peripheral(udc->transceiver, &udc->gadget); if (retval) goto remove_dbg; } pm_runtime_no_callbacks(&udc->gadget.dev); pm_runtime_enable(&udc->gadget.dev); _udc = udc; return retval; err("error = %i", retval); remove_dbg: #ifdef CONFIG_USB_GADGET_DEBUG_FILES dbg_remove_files(&udc->gadget.dev); #endif unreg_device: device_unregister(&udc->gadget.dev); put_transceiver: if (udc->transceiver) otg_put_transceiver(udc->transceiver); free_udc: kfree(udc); _udc = NULL; return retval; } /** * udc_remove: parent remove must call this to remove UDC * * No interrupts active, the IRQ has been released */ static void udc_remove(void) { struct ci13xxx *udc = _udc; if (udc == NULL) { err("EINVAL"); return; } if (udc->transceiver) { otg_set_peripheral(udc->transceiver, &udc->gadget); otg_put_transceiver(udc->transceiver); } #ifdef CONFIG_USB_GADGET_DEBUG_FILES dbg_remove_files(&udc->gadget.dev); #endif device_unregister(&udc->gadget.dev); kfree(udc); _udc = NULL; }
CyanogenMod/android_kernel_samsung_aries
drivers/usb/gadget/ci13xxx_udc.c
C
gpl-2.0
72,589
/* ----------------------------------------------------------------------------- * Copyright (c) 2011 Ozmo Inc * Released under the GNU General Public License Version 2 (GPLv2). * ----------------------------------------------------------------------------- */ #include <linux/init.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/sched.h> #include <linux/netdevice.h> #include <linux/errno.h> #include <linux/ieee80211.h> #include "ozconfig.h" #include "ozpd.h" #include "ozproto.h" #include "ozcdev.h" #include "oztrace.h" #include "ozevent.h" /*------------------------------------------------------------------------------ * The name of the 802.11 mac device. Empty string is the default value but a * value can be supplied as a parameter to the module. An empty string means * bind to nothing. '*' means bind to all netcards - this includes non-802.11 * netcards. Bindings can be added later using an IOCTL. */ static char *g_net_dev = ""; /*------------------------------------------------------------------------------ * Context: process */ static int __init ozwpan_init(void) { oz_event_init(); oz_cdev_register(); oz_protocol_init(g_net_dev); oz_app_enable(OZ_APPID_USB, 1); oz_apps_init(); #ifdef CONFIG_DEBUG_FS oz_debugfs_init(); #endif return 0; } /*------------------------------------------------------------------------------ * Context: process */ static void __exit ozwpan_exit(void) { oz_protocol_term(); oz_apps_term(); oz_cdev_deregister(); oz_event_term(); #ifdef CONFIG_DEBUG_FS oz_debugfs_remove(); #endif } /*------------------------------------------------------------------------------ */ module_param(g_net_dev, charp, S_IRUGO); module_init(ozwpan_init); module_exit(ozwpan_exit); MODULE_AUTHOR("Chris Kelly"); MODULE_DESCRIPTION("Ozmo Devices USB over WiFi hcd driver"); MODULE_VERSION("1.0.13"); MODULE_LICENSE("GPL");
boa19861105/android_kernel_htc_b3uhl-JP
drivers/staging/ozwpan/ozmain.c
C
gpl-2.0
1,897
/* * xfrm6_policy.c: based on xfrm4_policy.c * * Authors: * Mitsuru KANDA @USAGI * Kazunori MIYAZAWA @USAGI * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * IPv6 support * YOSHIFUJI Hideaki * Split up af-specific portion * */ #include <linux/err.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <net/addrconf.h> #include <net/dst.h> #include <net/xfrm.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/ip6_route.h> #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) #include <net/mip6.h> #endif static struct xfrm_policy_afinfo xfrm6_policy_afinfo; static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, const xfrm_address_t *saddr, const xfrm_address_t *daddr) { struct flowi6 fl6; struct dst_entry *dst; int err; memset(&fl6, 0, sizeof(fl6)); memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr)); if (saddr) memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr)); dst = ip6_route_output(net, NULL, &fl6); err = dst->error; if (dst->error) { dst_release(dst); dst = ERR_PTR(err); } return dst; } static int xfrm6_get_saddr(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr) { struct dst_entry *dst; struct net_device *dev; dst = xfrm6_dst_lookup(net, 0, NULL, daddr); if (IS_ERR(dst)) return -EHOSTUNREACH; dev = ip6_dst_idev(dst)->dev; ipv6_dev_get_saddr(dev_net(dev), dev, (struct in6_addr *)&daddr->a6, 0, (struct in6_addr *)&saddr->a6); dst_release(dst); return 0; } static int xfrm6_get_tos(const struct flowi *fl) { return 0; } static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst, int nfheader_len) { if (dst->ops->family == AF_INET6) { struct rt6_info *rt = (struct rt6_info*)dst; if (rt->rt6i_node) path->path_cookie = rt->rt6i_node->fn_sernum; } path->u.rt6.rt6i_nfheader_len = nfheader_len; return 0; } static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, const struct flowi *fl) { struct rt6_info *rt = (struct rt6_info*)xdst->route; xdst->u.dst.dev = dev; dev_hold(dev); xdst->u.rt6.rt6i_idev = in6_dev_get(dev); if (!xdst->u.rt6.rt6i_idev) return -ENODEV; xdst->u.rt6.rt6i_peer = rt->rt6i_peer; if (rt->rt6i_peer) atomic_inc(&rt->rt6i_peer->refcnt); /* Sheit... I remember I did this right. Apparently, * it was magically lost, so this code needs audit */ xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST | RTF_LOCAL); xdst->u.rt6.rt6i_metric = rt->rt6i_metric; xdst->u.rt6.rt6i_node = rt->rt6i_node; if (rt->rt6i_node) xdst->route_cookie = rt->rt6i_node->fn_sernum; xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway; xdst->u.rt6.rt6i_dst = rt->rt6i_dst; xdst->u.rt6.rt6i_src = rt->rt6i_src; return 0; } static inline void _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) { struct flowi6 *fl6 = &fl->u.ip6; int onlyproto = 0; u16 offset = skb_network_header_len(skb); const struct ipv6hdr *hdr = ipv6_hdr(skb); struct ipv6_opt_hdr *exthdr; const unsigned char *nh = skb_network_header(skb); u8 nexthdr = nh[IP6CB(skb)->nhoff]; memset(fl6, 0, sizeof(struct flowi6)); fl6->flowi6_mark = skb->mark; ipv6_addr_copy(&fl6->daddr, reverse ? &hdr->saddr : &hdr->daddr); ipv6_addr_copy(&fl6->saddr, reverse ? &hdr->daddr : &hdr->saddr); while (nh + offset + 1 < skb->data || pskb_may_pull(skb, nh + offset + 1 - skb->data)) { nh = skb_network_header(skb); exthdr = (struct ipv6_opt_hdr *)(nh + offset); switch (nexthdr) { case NEXTHDR_FRAGMENT: onlyproto = 1; case NEXTHDR_ROUTING: case NEXTHDR_HOP: case NEXTHDR_DEST: offset += ipv6_optlen(exthdr); nexthdr = exthdr->nexthdr; exthdr = (struct ipv6_opt_hdr *)(nh + offset); break; case IPPROTO_UDP: case IPPROTO_UDPLITE: case IPPROTO_TCP: case IPPROTO_SCTP: case IPPROTO_DCCP: if (!onlyproto && (nh + offset + 4 < skb->data || pskb_may_pull(skb, nh + offset + 4 - skb->data))) { __be16 *ports = (__be16 *)exthdr; fl6->fl6_sport = ports[!!reverse]; fl6->fl6_dport = ports[!reverse]; } fl6->flowi6_proto = nexthdr; return; case IPPROTO_ICMPV6: if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) { u8 *icmp = (u8 *)exthdr; fl6->fl6_icmp_type = icmp[0]; fl6->fl6_icmp_code = icmp[1]; } fl6->flowi6_proto = nexthdr; return; #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) case IPPROTO_MH: if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { struct ip6_mh *mh; mh = (struct ip6_mh *)exthdr; fl6->fl6_mh_type = mh->ip6mh_type; } fl6->flowi6_proto = nexthdr; return; #endif /* XXX Why are there these headers? */ case IPPROTO_AH: case IPPROTO_ESP: case IPPROTO_COMP: default: fl6->fl6_ipsec_spi = 0; fl6->flowi6_proto = nexthdr; return; } } } static inline int xfrm6_garbage_collect(struct dst_ops *ops) { struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops); xfrm6_policy_afinfo.garbage_collect(net); return dst_entries_get_fast(ops) > ops->gc_thresh * 2; } static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu) { struct xfrm_dst *xdst = (struct xfrm_dst *)dst; struct dst_entry *path = xdst->route; path->ops->update_pmtu(path, mtu); } static void xfrm6_dst_destroy(struct dst_entry *dst) { struct xfrm_dst *xdst = (struct xfrm_dst *)dst; if (likely(xdst->u.rt6.rt6i_idev)) in6_dev_put(xdst->u.rt6.rt6i_idev); dst_destroy_metrics_generic(dst); if (likely(xdst->u.rt6.rt6i_peer)) inet_putpeer(xdst->u.rt6.rt6i_peer); xfrm_dst_destroy(xdst); } static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int unregister) { struct xfrm_dst *xdst; if (!unregister) return; xdst = (struct xfrm_dst *)dst; if (xdst->u.rt6.rt6i_idev->dev == dev) { struct inet6_dev *loopback_idev = in6_dev_get(dev_net(dev)->loopback_dev); BUG_ON(!loopback_idev); do { in6_dev_put(xdst->u.rt6.rt6i_idev); xdst->u.rt6.rt6i_idev = loopback_idev; in6_dev_hold(loopback_idev); xdst = (struct xfrm_dst *)xdst->u.dst.child; } while (xdst->u.dst.xfrm); __in6_dev_put(loopback_idev); } xfrm_dst_ifdown(dst, dev); } static struct dst_ops xfrm6_dst_ops = { .family = AF_INET6, .protocol = cpu_to_be16(ETH_P_IPV6), .gc = xfrm6_garbage_collect, .update_pmtu = xfrm6_update_pmtu, .cow_metrics = dst_cow_metrics_generic, .destroy = xfrm6_dst_destroy, .ifdown = xfrm6_dst_ifdown, .local_out = __ip6_local_out, .gc_thresh = 1024, }; static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { .family = AF_INET6, .dst_ops = &xfrm6_dst_ops, .dst_lookup = xfrm6_dst_lookup, .get_saddr = xfrm6_get_saddr, .decode_session = _decode_session6, .get_tos = xfrm6_get_tos, .init_path = xfrm6_init_path, .fill_dst = xfrm6_fill_dst, .blackhole_route = ip6_blackhole_route, }; static int __init xfrm6_policy_init(void) { return xfrm_policy_register_afinfo(&xfrm6_policy_afinfo); } static void xfrm6_policy_fini(void) { xfrm_policy_unregister_afinfo(&xfrm6_policy_afinfo); } #ifdef CONFIG_SYSCTL static struct ctl_table xfrm6_policy_table[] = { { .procname = "xfrm6_gc_thresh", .data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; static struct ctl_table_header *sysctl_hdr; #endif int __init xfrm6_init(void) { int ret; unsigned int gc_thresh; /* * We need a good default value for the xfrm6 gc threshold. * In ipv4 we set it to the route hash table size * 8, which * is half the size of the maximaum route cache for ipv4. It * would be good to do the same thing for v6, except the table is * constructed differently here. Here each table for a net namespace * can have FIB_TABLE_HASHSZ entries, so lets go with the same * computation that we used for ipv4 here. Also, lets keep the initial * gc_thresh to a minimum of 1024, since, the ipv6 route cache defaults * to that as a minimum as well */ gc_thresh = FIB6_TABLE_HASHSZ * 8; xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh; dst_entries_init(&xfrm6_dst_ops); ret = xfrm6_policy_init(); if (ret) { dst_entries_destroy(&xfrm6_dst_ops); goto out; } ret = xfrm6_state_init(); if (ret) goto out_policy; #ifdef CONFIG_SYSCTL sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, xfrm6_policy_table); #endif out: return ret; out_policy: xfrm6_policy_fini(); goto out; } void xfrm6_fini(void) { #ifdef CONFIG_SYSCTL if (sysctl_hdr) unregister_net_sysctl_table(sysctl_hdr); #endif //xfrm6_input_fini(); xfrm6_policy_fini(); xfrm6_state_fini(); dst_entries_destroy(&xfrm6_dst_ops); }
DirtyUnicorns/android_kernel_asus_grouper
net/ipv6/xfrm6_policy.c
C
gpl-2.0
8,759
/* fp_arith.c: floating-point math routines for the Linux-m68k floating point emulator. Copyright (c) 1998-1999 David Huggins-Daines. Somewhat based on the AlphaLinux floating point emulator, by David Mosberger-Tang. You may copy, modify, and redistribute this file under the terms of the GNU General Public License, version 2, or any later version, at your convenience. */ #include "fp_emu.h" #include "multi_arith.h" #include "fp_arith.h" const struct fp_ext fp_QNaN = { .exp = 0x7fff, .mant = { .m64 = ~0 } }; const struct fp_ext fp_Inf = { .exp = 0x7fff, }; /* let's start with the easy ones */ struct fp_ext * fp_fabs(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fabs\n"); fp_monadic_check(dest, src); dest->sign = 0; return dest; } struct fp_ext * fp_fneg(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fneg\n"); fp_monadic_check(dest, src); dest->sign = !dest->sign; return dest; } /* Now, the slightly harder ones */ /* fp_fadd: Implements the kernel of the FADD, FSADD, FDADD, FSUB, FDSUB, and FCMP instructions. */ struct fp_ext * fp_fadd(struct fp_ext *dest, struct fp_ext *src) { int diff; dprint(PINSTR, "fadd\n"); fp_dyadic_check(dest, src); if (IS_INF(dest)) { /* infinity - infinity == NaN */ if (IS_INF(src) && (src->sign != dest->sign)) fp_set_nan(dest); return dest; } if (IS_INF(src)) { fp_copy_ext(dest, src); return dest; } if (IS_ZERO(dest)) { if (IS_ZERO(src)) { if (src->sign != dest->sign) { if (FPDATA->rnd == FPCR_ROUND_RM) dest->sign = 1; else dest->sign = 0; } } else fp_copy_ext(dest, src); return dest; } dest->lowmant = src->lowmant = 0; if ((diff = dest->exp - src->exp) > 0) fp_denormalize(src, diff); else if ((diff = -diff) > 0) fp_denormalize(dest, diff); if (dest->sign == src->sign) { if (fp_addmant(dest, src)) if (!fp_addcarry(dest)) return dest; } else { if (dest->mant.m64 < src->mant.m64) { fp_submant(dest, src, dest); dest->sign = !dest->sign; } else fp_submant(dest, dest, src); } return dest; } /* fp_fsub: Implements the kernel of the FSUB, FSSUB, and FDSUB instructions. Remember that the arguments are in assembler-syntax order! */ struct fp_ext * fp_fsub(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fsub "); src->sign = !src->sign; return fp_fadd(dest, src); } struct fp_ext * fp_fcmp(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fcmp "); FPDATA->temp[1] = *dest; src->sign = !src->sign; return fp_fadd(&FPDATA->temp[1], src); } struct fp_ext * fp_ftst(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "ftst\n"); (void)dest; return src; } struct fp_ext * fp_fmul(struct fp_ext *dest, struct fp_ext *src) { union fp_mant128 temp; int exp; dprint(PINSTR, "fmul\n"); fp_dyadic_check(dest, src); /* calculate the correct sign now, as it's necessary for infinities */ dest->sign = src->sign ^ dest->sign; /* Handle infinities */ if (IS_INF(dest)) { if (IS_ZERO(src)) fp_set_nan(dest); return dest; } if (IS_INF(src)) { if (IS_ZERO(dest)) fp_set_nan(dest); else fp_copy_ext(dest, src); return dest; } /* Of course, as we all know, zero * anything = zero. You may not have known that it might be a positive or negative zero... */ if (IS_ZERO(dest) || IS_ZERO(src)) { dest->exp = 0; dest->mant.m64 = 0; dest->lowmant = 0; return dest; } exp = dest->exp + src->exp - 0x3ffe; /* shift up the mantissa for denormalized numbers, so that the highest bit is set, this makes the shift of the result below easier */ if ((long)dest->mant.m32[0] >= 0) exp -= fp_overnormalize(dest); if ((long)src->mant.m32[0] >= 0) exp -= fp_overnormalize(src); /* now, do a 64-bit multiply with expansion */ fp_multiplymant(&temp, dest, src); /* normalize it back to 64 bits and stuff it back into the destination struct */ if ((long)temp.m32[0] > 0) { exp--; fp_putmant128(dest, &temp, 1); } else fp_putmant128(dest, &temp, 0); if (exp >= 0x7fff) { fp_set_ovrflw(dest); return dest; } dest->exp = exp; if (exp < 0) { fp_set_sr(FPSR_EXC_UNFL); fp_denormalize(dest, -exp); } return dest; } /* fp_fdiv: Implements the "kernel" of the FDIV, FSDIV, FDDIV and FSGLDIV instructions. Note that the order of the operands is counter-intuitive: instead of src / dest, the result is actually dest / src. */ struct fp_ext * fp_fdiv(struct fp_ext *dest, struct fp_ext *src) { union fp_mant128 temp; int exp; dprint(PINSTR, "fdiv\n"); fp_dyadic_check(dest, src); /* calculate the correct sign now, as it's necessary for infinities */ dest->sign = src->sign ^ dest->sign; /* Handle infinities */ if (IS_INF(dest)) { /* infinity / infinity = NaN (quiet, as always) */ if (IS_INF(src)) fp_set_nan(dest); /* infinity / anything else = infinity (with approprate sign) */ return dest; } if (IS_INF(src)) { /* anything / infinity = zero (with appropriate sign) */ dest->exp = 0; dest->mant.m64 = 0; dest->lowmant = 0; return dest; } /* zeroes */ if (IS_ZERO(dest)) { /* zero / zero = NaN */ if (IS_ZERO(src)) fp_set_nan(dest); /* zero / anything else = zero */ return dest; } if (IS_ZERO(src)) { /* anything / zero = infinity (with appropriate sign) */ fp_set_sr(FPSR_EXC_DZ); dest->exp = 0x7fff; dest->mant.m64 = 0; return dest; } exp = dest->exp - src->exp + 0x3fff; /* shift up the mantissa for denormalized numbers, so that the highest bit is set, this makes lots of things below easier */ if ((long)dest->mant.m32[0] >= 0) exp -= fp_overnormalize(dest); if ((long)src->mant.m32[0] >= 0) exp -= fp_overnormalize(src); /* now, do the 64-bit divide */ fp_dividemant(&temp, dest, src); /* normalize it back to 64 bits and stuff it back into the destination struct */ if (!temp.m32[0]) { exp--; fp_putmant128(dest, &temp, 32); } else fp_putmant128(dest, &temp, 31); if (exp >= 0x7fff) { fp_set_ovrflw(dest); return dest; } dest->exp = exp; if (exp < 0) { fp_set_sr(FPSR_EXC_UNFL); fp_denormalize(dest, -exp); } return dest; } struct fp_ext * fp_fsglmul(struct fp_ext *dest, struct fp_ext *src) { int exp; dprint(PINSTR, "fsglmul\n"); fp_dyadic_check(dest, src); /* calculate the correct sign now, as it's necessary for infinities */ dest->sign = src->sign ^ dest->sign; /* Handle infinities */ if (IS_INF(dest)) { if (IS_ZERO(src)) fp_set_nan(dest); return dest; } if (IS_INF(src)) { if (IS_ZERO(dest)) fp_set_nan(dest); else fp_copy_ext(dest, src); return dest; } /* Of course, as we all know, zero * anything = zero. You may not have known that it might be a positive or negative zero... */ if (IS_ZERO(dest) || IS_ZERO(src)) { dest->exp = 0; dest->mant.m64 = 0; dest->lowmant = 0; return dest; } exp = dest->exp + src->exp - 0x3ffe; /* do a 32-bit multiply */ fp_mul64(dest->mant.m32[0], dest->mant.m32[1], dest->mant.m32[0] & 0xffffff00, src->mant.m32[0] & 0xffffff00); if (exp >= 0x7fff) { fp_set_ovrflw(dest); return dest; } dest->exp = exp; if (exp < 0) { fp_set_sr(FPSR_EXC_UNFL); fp_denormalize(dest, -exp); } return dest; } struct fp_ext * fp_fsgldiv(struct fp_ext *dest, struct fp_ext *src) { int exp; unsigned long quot, rem; dprint(PINSTR, "fsgldiv\n"); fp_dyadic_check(dest, src); /* calculate the correct sign now, as it's necessary for infinities */ dest->sign = src->sign ^ dest->sign; /* Handle infinities */ if (IS_INF(dest)) { /* infinity / infinity = NaN (quiet, as always) */ if (IS_INF(src)) fp_set_nan(dest); /* infinity / anything else = infinity (with approprate sign) */ return dest; } if (IS_INF(src)) { /* anything / infinity = zero (with appropriate sign) */ dest->exp = 0; dest->mant.m64 = 0; dest->lowmant = 0; return dest; } /* zeroes */ if (IS_ZERO(dest)) { /* zero / zero = NaN */ if (IS_ZERO(src)) fp_set_nan(dest); /* zero / anything else = zero */ return dest; } if (IS_ZERO(src)) { /* anything / zero = infinity (with appropriate sign) */ fp_set_sr(FPSR_EXC_DZ); dest->exp = 0x7fff; dest->mant.m64 = 0; return dest; } exp = dest->exp - src->exp + 0x3fff; dest->mant.m32[0] &= 0xffffff00; src->mant.m32[0] &= 0xffffff00; /* do the 32-bit divide */ if (dest->mant.m32[0] >= src->mant.m32[0]) { fp_sub64(dest->mant, src->mant); fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]); dest->mant.m32[0] = 0x80000000 | (quot >> 1); dest->mant.m32[1] = (quot & 1) | rem; /* only for rounding */ } else { fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]); dest->mant.m32[0] = quot; dest->mant.m32[1] = rem; /* only for rounding */ exp--; } if (exp >= 0x7fff) { fp_set_ovrflw(dest); return dest; } dest->exp = exp; if (exp < 0) { fp_set_sr(FPSR_EXC_UNFL); fp_denormalize(dest, -exp); } return dest; } /* fp_roundint: Internal rounding function for use by several of these emulated instructions. This one rounds off the fractional part using the rounding mode specified. */ static void fp_roundint(struct fp_ext *dest, int mode) { union fp_mant64 oldmant; unsigned long mask; if (!fp_normalize_ext(dest)) return; /* infinities and zeroes */ if (IS_INF(dest) || IS_ZERO(dest)) return; /* first truncate the lower bits */ oldmant = dest->mant; switch (dest->exp) { case 0 ... 0x3ffe: dest->mant.m64 = 0; break; case 0x3fff ... 0x401e: dest->mant.m32[0] &= 0xffffffffU << (0x401e - dest->exp); dest->mant.m32[1] = 0; if (oldmant.m64 == dest->mant.m64) return; break; case 0x401f ... 0x403e: dest->mant.m32[1] &= 0xffffffffU << (0x403e - dest->exp); if (oldmant.m32[1] == dest->mant.m32[1]) return; break; default: return; } fp_set_sr(FPSR_EXC_INEX2); /* We might want to normalize upwards here... however, since we know that this is only called on the output of fp_fdiv, or with the input to fp_fint or fp_fintrz, and the inputs to all these functions are either normal or denormalized (no subnormals allowed!), there's really no need. In the case of fp_fdiv, observe that 0x80000000 / 0xffff = 0xffff8000, and the same holds for 128-bit / 64-bit. (i.e. the smallest possible normal dividend and the largest possible normal divisor will still produce a normal quotient, therefore, (normal << 64) / normal is normal in all cases) */ switch (mode) { case FPCR_ROUND_RN: switch (dest->exp) { case 0 ... 0x3ffd: return; case 0x3ffe: /* As noted above, the input is always normal, so the guard bit (bit 63) is always set. therefore, the only case in which we will NOT round to 1.0 is when the input is exactly 0.5. */ if (oldmant.m64 == (1ULL << 63)) return; break; case 0x3fff ... 0x401d: mask = 1 << (0x401d - dest->exp); if (!(oldmant.m32[0] & mask)) return; if (oldmant.m32[0] & (mask << 1)) break; if (!(oldmant.m32[0] << (dest->exp - 0x3ffd)) && !oldmant.m32[1]) return; break; case 0x401e: if (oldmant.m32[1] & 0x80000000) return; if (oldmant.m32[0] & 1) break; if (!(oldmant.m32[1] << 1)) return; break; case 0x401f ... 0x403d: mask = 1 << (0x403d - dest->exp); if (!(oldmant.m32[1] & mask)) return; if (oldmant.m32[1] & (mask << 1)) break; if (!(oldmant.m32[1] << (dest->exp - 0x401d))) return; break; default: return; } break; case FPCR_ROUND_RZ: return; default: if (dest->sign ^ (mode - FPCR_ROUND_RM)) break; return; } switch (dest->exp) { case 0 ... 0x3ffe: dest->exp = 0x3fff; dest->mant.m64 = 1ULL << 63; break; case 0x3fff ... 0x401e: mask = 1 << (0x401e - dest->exp); if (dest->mant.m32[0] += mask) break; dest->mant.m32[0] = 0x80000000; dest->exp++; break; case 0x401f ... 0x403e: mask = 1 << (0x403e - dest->exp); if (dest->mant.m32[1] += mask) break; if (dest->mant.m32[0] += 1) break; dest->mant.m32[0] = 0x80000000; dest->exp++; break; } } /* modrem_kernel: Implementation of the FREM and FMOD instructions (which are exactly the same, except for the rounding used on the intermediate value) */ static struct fp_ext * modrem_kernel(struct fp_ext *dest, struct fp_ext *src, int mode) { struct fp_ext tmp; fp_dyadic_check(dest, src); /* Infinities and zeros */ if (IS_INF(dest) || IS_ZERO(src)) { fp_set_nan(dest); return dest; } if (IS_ZERO(dest) || IS_INF(src)) return dest; /* FIXME: there is almost certainly a smarter way to do this */ fp_copy_ext(&tmp, dest); fp_fdiv(&tmp, src); /* NOTE: src might be modified */ fp_roundint(&tmp, mode); fp_fmul(&tmp, src); fp_fsub(dest, &tmp); /* set the quotient byte */ fp_set_quotient((dest->mant.m64 & 0x7f) | (dest->sign << 7)); return dest; } /* fp_fmod: Implements the kernel of the FMOD instruction. Again, the argument order is backwards. The result, as defined in the Motorola manuals, is: fmod(src,dest) = (dest - (src * floor(dest / src))) */ struct fp_ext * fp_fmod(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fmod\n"); return modrem_kernel(dest, src, FPCR_ROUND_RZ); } /* fp_frem: Implements the kernel of the FREM instruction. frem(src,dest) = (dest - (src * round(dest / src))) */ struct fp_ext * fp_frem(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "frem\n"); return modrem_kernel(dest, src, FPCR_ROUND_RN); } struct fp_ext * fp_fint(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fint\n"); fp_copy_ext(dest, src); fp_roundint(dest, FPDATA->rnd); return dest; } struct fp_ext * fp_fintrz(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fintrz\n"); fp_copy_ext(dest, src); fp_roundint(dest, FPCR_ROUND_RZ); return dest; } struct fp_ext * fp_fscale(struct fp_ext *dest, struct fp_ext *src) { int scale, oldround; dprint(PINSTR, "fscale\n"); fp_dyadic_check(dest, src); /* Infinities */ if (IS_INF(src)) { fp_set_nan(dest); return dest; } if (IS_INF(dest)) return dest; /* zeroes */ if (IS_ZERO(src) || IS_ZERO(dest)) return dest; /* Source exponent out of range */ if (src->exp >= 0x400c) { fp_set_ovrflw(dest); return dest; } /* src must be rounded with round to zero. */ oldround = FPDATA->rnd; FPDATA->rnd = FPCR_ROUND_RZ; scale = fp_conv_ext2long(src); FPDATA->rnd = oldround; /* new exponent */ scale += dest->exp; if (scale >= 0x7fff) { fp_set_ovrflw(dest); } else if (scale <= 0) { fp_set_sr(FPSR_EXC_UNFL); fp_denormalize(dest, -scale); } else dest->exp = scale; return dest; }
qizy09/PMFS-forked
arch/m68k/math-emu/fp_arith.c
C
gpl-2.0
14,782
/* devices.c: Initial scan of the prom device tree for important * Sparc device nodes which we need to find. * * This is based on the sparc64 version, but sun4m doesn't always use * the hardware MIDs, so be careful. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/kernel.h> #include <linux/threads.h> #include <linux/string.h> #include <linux/init.h> #include <linux/errno.h> #include <asm/page.h> #include <asm/oplib.h> #include <asm/prom.h> #include <asm/smp.h> #include <asm/cpudata.h> #include <asm/cpu_type.h> extern void clock_stop_probe(void); /* tadpole.c */ static char *cpu_mid_prop(void) { if (sparc_cpu_model == sun4d) return "cpu-id"; return "mid"; } static int check_cpu_node(phandle nd, int *cur_inst, int (*compare)(phandle, int, void *), void *compare_arg, phandle *prom_node, int *mid) { if (!compare(nd, *cur_inst, compare_arg)) { if (prom_node) *prom_node = nd; if (mid) { *mid = prom_getintdefault(nd, cpu_mid_prop(), 0); if (sparc_cpu_model == sun4m) *mid &= 3; } return 0; } (*cur_inst)++; return -ENODEV; } static int __cpu_find_by(int (*compare)(phandle, int, void *), void *compare_arg, phandle *prom_node, int *mid) { struct device_node *dp; int cur_inst; cur_inst = 0; for_each_node_by_type(dp, "cpu") { int err = check_cpu_node(dp->phandle, &cur_inst, compare, compare_arg, prom_node, mid); if (!err) { of_node_put(dp); return 0; } } return -ENODEV; } static int cpu_instance_compare(phandle nd, int instance, void *_arg) { int desired_instance = (int) _arg; if (instance == desired_instance) return 0; return -ENODEV; } int cpu_find_by_instance(int instance, phandle *prom_node, int *mid) { return __cpu_find_by(cpu_instance_compare, (void *)instance, prom_node, mid); } static int cpu_mid_compare(phandle nd, int instance, void *_arg) { int desired_mid = (int) _arg; int this_mid; this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0); if (this_mid == desired_mid || (sparc_cpu_model == sun4m && (this_mid & 3) == desired_mid)) return 0; return -ENODEV; } int cpu_find_by_mid(int mid, phandle *prom_node) { return __cpu_find_by(cpu_mid_compare, (void *)mid, prom_node, NULL); } /* sun4m uses truncated mids since we base the cpuid on the ttable/irqset * address (0-3). This gives us the true hardware mid, which might have * some other bits set. On 4d hardware and software mids are the same. */ int cpu_get_hwmid(phandle prom_node) { return prom_getintdefault(prom_node, cpu_mid_prop(), -ENODEV); } void __init device_scan(void) { printk(KERN_NOTICE "Booting Linux...\n"); #ifndef CONFIG_SMP { phandle cpu_node; int err; err = cpu_find_by_instance(0, &cpu_node, NULL); if (err) { /* Probably a sun4e, Sun is trying to trick us ;-) */ prom_printf("No cpu nodes, cannot continue\n"); prom_halt(); } cpu_data(0).clock_tick = prom_getintdefault(cpu_node, "clock-frequency", 0); } #endif /* !CONFIG_SMP */ { extern void auxio_probe(void); extern void auxio_power_probe(void); auxio_probe(); auxio_power_probe(); } clock_stop_probe(); }
SM-G920P/G920P-MM
arch/sparc/kernel/devices.c
C
gpl-2.0
3,193
/* * Driver for S3 SonicVibes soundcard * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * BUGS: * It looks like 86c617 rev 3 doesn't supports DDMA buffers above 16MB? * Driver sometimes hangs... Nobody knows why at this moment... * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/gameport.h> #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/info.h> #include <sound/control.h> #include <sound/mpu401.h> #include <sound/opl3.h> #include <sound/initval.h> #include <asm/io.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("S3 SonicVibes PCI"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{S3,SonicVibes PCI}}"); #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) #define SUPPORT_JOYSTICK 1 #endif static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static int reverb[SNDRV_CARDS]; static int mge[SNDRV_CARDS]; static unsigned int dmaio = 0x7a00; /* DDMA i/o address */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for S3 SonicVibes soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for S3 SonicVibes soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable S3 SonicVibes soundcard."); module_param_array(reverb, bool, NULL, 0444); MODULE_PARM_DESC(reverb, "Enable reverb (SRAM is present) for S3 SonicVibes soundcard."); module_param_array(mge, bool, NULL, 0444); MODULE_PARM_DESC(mge, "MIC Gain Enable for S3 SonicVibes soundcard."); module_param(dmaio, uint, 0444); MODULE_PARM_DESC(dmaio, "DDMA i/o base address for S3 SonicVibes soundcard."); /* * Enhanced port direct registers */ #define SV_REG(sonic, x) ((sonic)->enh_port + SV_REG_##x) #define SV_REG_CONTROL 0x00 /* R/W: CODEC/Mixer control register */ #define SV_ENHANCED 0x01 /* audio mode select - enhanced mode */ #define SV_TEST 0x02 /* test bit */ #define SV_REVERB 0x04 /* reverb enable */ #define SV_WAVETABLE 0x08 /* wavetable active / FM active if not set */ #define SV_INTA 0x20 /* INTA driving - should be always 1 */ #define SV_RESET 0x80 /* reset chip */ #define SV_REG_IRQMASK 0x01 /* R/W: CODEC/Mixer interrupt mask register */ #define SV_DMAA_MASK 0x01 /* mask DMA-A interrupt */ #define SV_DMAC_MASK 0x04 /* mask DMA-C interrupt */ #define SV_SPEC_MASK 0x08 /* special interrupt mask - should be always masked */ #define SV_UD_MASK 0x40 /* Up/Down button interrupt mask */ #define SV_MIDI_MASK 0x80 /* mask MIDI interrupt */ #define SV_REG_STATUS 0x02 /* R/O: CODEC/Mixer status register */ #define SV_DMAA_IRQ 0x01 /* DMA-A interrupt */ #define SV_DMAC_IRQ 0x04 /* DMA-C interrupt */ #define SV_SPEC_IRQ 0x08 /* special interrupt */ #define SV_UD_IRQ 0x40 /* Up/Down interrupt */ #define SV_MIDI_IRQ 0x80 /* MIDI interrupt */ #define SV_REG_INDEX 0x04 /* R/W: CODEC/Mixer index address register */ #define SV_MCE 0x40 /* mode change enable */ #define SV_TRD 0x80 /* DMA transfer request disabled */ #define SV_REG_DATA 0x05 /* R/W: CODEC/Mixer index data register */ /* * Enhanced port indirect registers */ #define SV_IREG_LEFT_ADC 0x00 /* Left ADC Input Control */ #define SV_IREG_RIGHT_ADC 0x01 /* Right ADC Input Control */ #define SV_IREG_LEFT_AUX1 0x02 /* Left AUX1 Input Control */ #define SV_IREG_RIGHT_AUX1 0x03 /* Right AUX1 Input Control */ #define SV_IREG_LEFT_CD 0x04 /* Left CD Input Control */ #define SV_IREG_RIGHT_CD 0x05 /* Right CD Input Control */ #define SV_IREG_LEFT_LINE 0x06 /* Left Line Input Control */ #define SV_IREG_RIGHT_LINE 0x07 /* Right Line Input Control */ #define SV_IREG_MIC 0x08 /* MIC Input Control */ #define SV_IREG_GAME_PORT 0x09 /* Game Port Control */ #define SV_IREG_LEFT_SYNTH 0x0a /* Left Synth Input Control */ #define SV_IREG_RIGHT_SYNTH 0x0b /* Right Synth Input Control */ #define SV_IREG_LEFT_AUX2 0x0c /* Left AUX2 Input Control */ #define SV_IREG_RIGHT_AUX2 0x0d /* Right AUX2 Input Control */ #define SV_IREG_LEFT_ANALOG 0x0e /* Left Analog Mixer Output Control */ #define SV_IREG_RIGHT_ANALOG 0x0f /* Right Analog Mixer Output Control */ #define SV_IREG_LEFT_PCM 0x10 /* Left PCM Input Control */ #define SV_IREG_RIGHT_PCM 0x11 /* Right PCM Input Control */ #define SV_IREG_DMA_DATA_FMT 0x12 /* DMA Data Format */ #define SV_IREG_PC_ENABLE 0x13 /* Playback/Capture Enable Register */ #define SV_IREG_UD_BUTTON 0x14 /* Up/Down Button Register */ #define SV_IREG_REVISION 0x15 /* Revision */ #define SV_IREG_ADC_OUTPUT_CTRL 0x16 /* ADC Output Control */ #define SV_IREG_DMA_A_UPPER 0x18 /* DMA A Upper Base Count */ #define SV_IREG_DMA_A_LOWER 0x19 /* DMA A Lower Base Count */ #define SV_IREG_DMA_C_UPPER 0x1c /* DMA C Upper Base Count */ #define SV_IREG_DMA_C_LOWER 0x1d /* DMA C Lower Base Count */ #define SV_IREG_PCM_RATE_LOW 0x1e /* PCM Sampling Rate Low Byte */ #define SV_IREG_PCM_RATE_HIGH 0x1f /* PCM Sampling Rate High Byte */ #define SV_IREG_SYNTH_RATE_LOW 0x20 /* Synthesizer Sampling Rate Low Byte */ #define SV_IREG_SYNTH_RATE_HIGH 0x21 /* Synthesizer Sampling Rate High Byte */ #define SV_IREG_ADC_CLOCK 0x22 /* ADC Clock Source Selection */ #define SV_IREG_ADC_ALT_RATE 0x23 /* ADC Alternative Sampling Rate Selection */ #define SV_IREG_ADC_PLL_M 0x24 /* ADC PLL M Register */ #define SV_IREG_ADC_PLL_N 0x25 /* ADC PLL N Register */ #define SV_IREG_SYNTH_PLL_M 0x26 /* Synthesizer PLL M Register */ #define SV_IREG_SYNTH_PLL_N 0x27 /* Synthesizer PLL N Register */ #define SV_IREG_MPU401 0x2a /* MPU-401 UART Operation */ #define SV_IREG_DRIVE_CTRL 0x2b /* Drive Control */ #define SV_IREG_SRS_SPACE 0x2c /* SRS Space Control */ #define SV_IREG_SRS_CENTER 0x2d /* SRS Center Control */ #define SV_IREG_WAVE_SOURCE 0x2e /* Wavetable Sample Source Select */ #define SV_IREG_ANALOG_POWER 0x30 /* Analog Power Down Control */ #define SV_IREG_DIGITAL_POWER 0x31 /* Digital Power Down Control */ #define SV_IREG_ADC_PLL SV_IREG_ADC_PLL_M #define SV_IREG_SYNTH_PLL SV_IREG_SYNTH_PLL_M /* * DMA registers */ #define SV_DMA_ADDR0 0x00 #define SV_DMA_ADDR1 0x01 #define SV_DMA_ADDR2 0x02 #define SV_DMA_ADDR3 0x03 #define SV_DMA_COUNT0 0x04 #define SV_DMA_COUNT1 0x05 #define SV_DMA_COUNT2 0x06 #define SV_DMA_MODE 0x0b #define SV_DMA_RESET 0x0d #define SV_DMA_MASK 0x0f /* * Record sources */ #define SV_RECSRC_RESERVED (0x00<<5) #define SV_RECSRC_CD (0x01<<5) #define SV_RECSRC_DAC (0x02<<5) #define SV_RECSRC_AUX2 (0x03<<5) #define SV_RECSRC_LINE (0x04<<5) #define SV_RECSRC_AUX1 (0x05<<5) #define SV_RECSRC_MIC (0x06<<5) #define SV_RECSRC_OUT (0x07<<5) /* * constants */ #define SV_FULLRATE 48000 #define SV_REFFREQUENCY 24576000 #define SV_ADCMULT 512 #define SV_MODE_PLAY 1 #define SV_MODE_CAPTURE 2 /* */ struct sonicvibes { unsigned long dma1size; unsigned long dma2size; int irq; unsigned long sb_port; unsigned long enh_port; unsigned long synth_port; unsigned long midi_port; unsigned long game_port; unsigned int dmaa_port; struct resource *res_dmaa; unsigned int dmac_port; struct resource *res_dmac; unsigned char enable; unsigned char irqmask; unsigned char revision; unsigned char format; unsigned char srs_space; unsigned char srs_center; unsigned char mpu_switch; unsigned char wave_source; unsigned int mode; struct pci_dev *pci; struct snd_card *card; struct snd_pcm *pcm; struct snd_pcm_substream *playback_substream; struct snd_pcm_substream *capture_substream; struct snd_rawmidi *rmidi; struct snd_hwdep *fmsynth; /* S3FM */ spinlock_t reg_lock; unsigned int p_dma_size; unsigned int c_dma_size; struct snd_kcontrol *master_mute; struct snd_kcontrol *master_volume; #ifdef SUPPORT_JOYSTICK struct gameport *gameport; #endif }; static DEFINE_PCI_DEVICE_TABLE(snd_sonic_ids) = { { PCI_VDEVICE(S3, 0xca00), 0, }, { 0, } }; MODULE_DEVICE_TABLE(pci, snd_sonic_ids); static struct snd_ratden sonicvibes_adc_clock = { .num_min = 4000 * 65536, .num_max = 48000UL * 65536, .num_step = 1, .den = 65536, }; static struct snd_pcm_hw_constraint_ratdens snd_sonicvibes_hw_constraints_adc_clock = { .nrats = 1, .rats = &sonicvibes_adc_clock, }; /* * common I/O routines */ static inline void snd_sonicvibes_setdmaa(struct sonicvibes * sonic, unsigned int addr, unsigned int count) { count--; outl(addr, sonic->dmaa_port + SV_DMA_ADDR0); outl(count, sonic->dmaa_port + SV_DMA_COUNT0); outb(0x18, sonic->dmaa_port + SV_DMA_MODE); #if 0 printk(KERN_DEBUG "program dmaa: addr = 0x%x, paddr = 0x%x\n", addr, inl(sonic->dmaa_port + SV_DMA_ADDR0)); #endif } static inline void snd_sonicvibes_setdmac(struct sonicvibes * sonic, unsigned int addr, unsigned int count) { /* note: dmac is working in word mode!!! */ count >>= 1; count--; outl(addr, sonic->dmac_port + SV_DMA_ADDR0); outl(count, sonic->dmac_port + SV_DMA_COUNT0); outb(0x14, sonic->dmac_port + SV_DMA_MODE); #if 0 printk(KERN_DEBUG "program dmac: addr = 0x%x, paddr = 0x%x\n", addr, inl(sonic->dmac_port + SV_DMA_ADDR0)); #endif } static inline unsigned int snd_sonicvibes_getdmaa(struct sonicvibes * sonic) { return (inl(sonic->dmaa_port + SV_DMA_COUNT0) & 0xffffff) + 1; } static inline unsigned int snd_sonicvibes_getdmac(struct sonicvibes * sonic) { /* note: dmac is working in word mode!!! */ return ((inl(sonic->dmac_port + SV_DMA_COUNT0) & 0xffffff) + 1) << 1; } static void snd_sonicvibes_out1(struct sonicvibes * sonic, unsigned char reg, unsigned char value) { outb(reg, SV_REG(sonic, INDEX)); udelay(10); outb(value, SV_REG(sonic, DATA)); udelay(10); } static void snd_sonicvibes_out(struct sonicvibes * sonic, unsigned char reg, unsigned char value) { unsigned long flags; spin_lock_irqsave(&sonic->reg_lock, flags); outb(reg, SV_REG(sonic, INDEX)); udelay(10); outb(value, SV_REG(sonic, DATA)); udelay(10); spin_unlock_irqrestore(&sonic->reg_lock, flags); } static unsigned char snd_sonicvibes_in1(struct sonicvibes * sonic, unsigned char reg) { unsigned char value; outb(reg, SV_REG(sonic, INDEX)); udelay(10); value = inb(SV_REG(sonic, DATA)); udelay(10); return value; } static unsigned char snd_sonicvibes_in(struct sonicvibes * sonic, unsigned char reg) { unsigned long flags; unsigned char value; spin_lock_irqsave(&sonic->reg_lock, flags); outb(reg, SV_REG(sonic, INDEX)); udelay(10); value = inb(SV_REG(sonic, DATA)); udelay(10); spin_unlock_irqrestore(&sonic->reg_lock, flags); return value; } #if 0 static void snd_sonicvibes_debug(struct sonicvibes * sonic) { printk(KERN_DEBUG "SV REGS: INDEX = 0x%02x ", inb(SV_REG(sonic, INDEX))); printk(" STATUS = 0x%02x\n", inb(SV_REG(sonic, STATUS))); printk(KERN_DEBUG " 0x00: left input = 0x%02x ", snd_sonicvibes_in(sonic, 0x00)); printk(" 0x20: synth rate low = 0x%02x\n", snd_sonicvibes_in(sonic, 0x20)); printk(KERN_DEBUG " 0x01: right input = 0x%02x ", snd_sonicvibes_in(sonic, 0x01)); printk(" 0x21: synth rate high = 0x%02x\n", snd_sonicvibes_in(sonic, 0x21)); printk(KERN_DEBUG " 0x02: left AUX1 = 0x%02x ", snd_sonicvibes_in(sonic, 0x02)); printk(" 0x22: ADC clock = 0x%02x\n", snd_sonicvibes_in(sonic, 0x22)); printk(KERN_DEBUG " 0x03: right AUX1 = 0x%02x ", snd_sonicvibes_in(sonic, 0x03)); printk(" 0x23: ADC alt rate = 0x%02x\n", snd_sonicvibes_in(sonic, 0x23)); printk(KERN_DEBUG " 0x04: left CD = 0x%02x ", snd_sonicvibes_in(sonic, 0x04)); printk(" 0x24: ADC pll M = 0x%02x\n", snd_sonicvibes_in(sonic, 0x24)); printk(KERN_DEBUG " 0x05: right CD = 0x%02x ", snd_sonicvibes_in(sonic, 0x05)); printk(" 0x25: ADC pll N = 0x%02x\n", snd_sonicvibes_in(sonic, 0x25)); printk(KERN_DEBUG " 0x06: left line = 0x%02x ", snd_sonicvibes_in(sonic, 0x06)); printk(" 0x26: Synth pll M = 0x%02x\n", snd_sonicvibes_in(sonic, 0x26)); printk(KERN_DEBUG " 0x07: right line = 0x%02x ", snd_sonicvibes_in(sonic, 0x07)); printk(" 0x27: Synth pll N = 0x%02x\n", snd_sonicvibes_in(sonic, 0x27)); printk(KERN_DEBUG " 0x08: MIC = 0x%02x ", snd_sonicvibes_in(sonic, 0x08)); printk(" 0x28: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x28)); printk(KERN_DEBUG " 0x09: Game port = 0x%02x ", snd_sonicvibes_in(sonic, 0x09)); printk(" 0x29: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x29)); printk(KERN_DEBUG " 0x0a: left synth = 0x%02x ", snd_sonicvibes_in(sonic, 0x0a)); printk(" 0x2a: MPU401 = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2a)); printk(KERN_DEBUG " 0x0b: right synth = 0x%02x ", snd_sonicvibes_in(sonic, 0x0b)); printk(" 0x2b: drive ctrl = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2b)); printk(KERN_DEBUG " 0x0c: left AUX2 = 0x%02x ", snd_sonicvibes_in(sonic, 0x0c)); printk(" 0x2c: SRS space = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2c)); printk(KERN_DEBUG " 0x0d: right AUX2 = 0x%02x ", snd_sonicvibes_in(sonic, 0x0d)); printk(" 0x2d: SRS center = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2d)); printk(KERN_DEBUG " 0x0e: left analog = 0x%02x ", snd_sonicvibes_in(sonic, 0x0e)); printk(" 0x2e: wave source = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2e)); printk(KERN_DEBUG " 0x0f: right analog = 0x%02x ", snd_sonicvibes_in(sonic, 0x0f)); printk(" 0x2f: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2f)); printk(KERN_DEBUG " 0x10: left PCM = 0x%02x ", snd_sonicvibes_in(sonic, 0x10)); printk(" 0x30: analog power = 0x%02x\n", snd_sonicvibes_in(sonic, 0x30)); printk(KERN_DEBUG " 0x11: right PCM = 0x%02x ", snd_sonicvibes_in(sonic, 0x11)); printk(" 0x31: analog power = 0x%02x\n", snd_sonicvibes_in(sonic, 0x31)); printk(KERN_DEBUG " 0x12: DMA data format = 0x%02x ", snd_sonicvibes_in(sonic, 0x12)); printk(" 0x32: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x32)); printk(KERN_DEBUG " 0x13: P/C enable = 0x%02x ", snd_sonicvibes_in(sonic, 0x13)); printk(" 0x33: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x33)); printk(KERN_DEBUG " 0x14: U/D button = 0x%02x ", snd_sonicvibes_in(sonic, 0x14)); printk(" 0x34: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x34)); printk(KERN_DEBUG " 0x15: revision = 0x%02x ", snd_sonicvibes_in(sonic, 0x15)); printk(" 0x35: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x35)); printk(KERN_DEBUG " 0x16: ADC output ctrl = 0x%02x ", snd_sonicvibes_in(sonic, 0x16)); printk(" 0x36: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x36)); printk(KERN_DEBUG " 0x17: --- = 0x%02x ", snd_sonicvibes_in(sonic, 0x17)); printk(" 0x37: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x37)); printk(KERN_DEBUG " 0x18: DMA A upper cnt = 0x%02x ", snd_sonicvibes_in(sonic, 0x18)); printk(" 0x38: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x38)); printk(KERN_DEBUG " 0x19: DMA A lower cnt = 0x%02x ", snd_sonicvibes_in(sonic, 0x19)); printk(" 0x39: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x39)); printk(KERN_DEBUG " 0x1a: --- = 0x%02x ", snd_sonicvibes_in(sonic, 0x1a)); printk(" 0x3a: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3a)); printk(KERN_DEBUG " 0x1b: --- = 0x%02x ", snd_sonicvibes_in(sonic, 0x1b)); printk(" 0x3b: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3b)); printk(KERN_DEBUG " 0x1c: DMA C upper cnt = 0x%02x ", snd_sonicvibes_in(sonic, 0x1c)); printk(" 0x3c: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3c)); printk(KERN_DEBUG " 0x1d: DMA C upper cnt = 0x%02x ", snd_sonicvibes_in(sonic, 0x1d)); printk(" 0x3d: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3d)); printk(KERN_DEBUG " 0x1e: PCM rate low = 0x%02x ", snd_sonicvibes_in(sonic, 0x1e)); printk(" 0x3e: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3e)); printk(KERN_DEBUG " 0x1f: PCM rate high = 0x%02x ", snd_sonicvibes_in(sonic, 0x1f)); printk(" 0x3f: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3f)); } #endif static void snd_sonicvibes_setfmt(struct sonicvibes * sonic, unsigned char mask, unsigned char value) { unsigned long flags; spin_lock_irqsave(&sonic->reg_lock, flags); outb(SV_MCE | SV_IREG_DMA_DATA_FMT, SV_REG(sonic, INDEX)); if (mask) { sonic->format = inb(SV_REG(sonic, DATA)); udelay(10); } sonic->format = (sonic->format & mask) | value; outb(sonic->format, SV_REG(sonic, DATA)); udelay(10); outb(0, SV_REG(sonic, INDEX)); udelay(10); spin_unlock_irqrestore(&sonic->reg_lock, flags); } static void snd_sonicvibes_pll(unsigned int rate, unsigned int *res_r, unsigned int *res_m, unsigned int *res_n) { unsigned int r, m = 0, n = 0; unsigned int xm, xn, xr, xd, metric = ~0U; if (rate < 625000 / SV_ADCMULT) rate = 625000 / SV_ADCMULT; if (rate > 150000000 / SV_ADCMULT) rate = 150000000 / SV_ADCMULT; /* slight violation of specs, needed for continuous sampling rates */ for (r = 0; rate < 75000000 / SV_ADCMULT; r += 0x20, rate <<= 1); for (xn = 3; xn < 33; xn++) /* 35 */ for (xm = 3; xm < 257; xm++) { xr = ((SV_REFFREQUENCY / SV_ADCMULT) * xm) / xn; if (xr >= rate) xd = xr - rate; else xd = rate - xr; if (xd < metric) { metric = xd; m = xm - 2; n = xn - 2; } } *res_r = r; *res_m = m; *res_n = n; #if 0 printk(KERN_DEBUG "metric = %i, xm = %i, xn = %i\n", metric, xm, xn); printk(KERN_DEBUG "pll: m = 0x%x, r = 0x%x, n = 0x%x\n", reg, m, r, n); #endif } static void snd_sonicvibes_setpll(struct sonicvibes * sonic, unsigned char reg, unsigned int rate) { unsigned long flags; unsigned int r, m, n; snd_sonicvibes_pll(rate, &r, &m, &n); if (sonic != NULL) { spin_lock_irqsave(&sonic->reg_lock, flags); snd_sonicvibes_out1(sonic, reg, m); snd_sonicvibes_out1(sonic, reg + 1, r | n); spin_unlock_irqrestore(&sonic->reg_lock, flags); } } static void snd_sonicvibes_set_adc_rate(struct sonicvibes * sonic, unsigned int rate) { unsigned long flags; unsigned int div; unsigned char clock; div = 48000 / rate; if (div > 8) div = 8; if ((48000 / div) == rate) { /* use the alternate clock */ clock = 0x10; } else { /* use the PLL source */ clock = 0x00; snd_sonicvibes_setpll(sonic, SV_IREG_ADC_PLL, rate); } spin_lock_irqsave(&sonic->reg_lock, flags); snd_sonicvibes_out1(sonic, SV_IREG_ADC_ALT_RATE, (div - 1) << 4); snd_sonicvibes_out1(sonic, SV_IREG_ADC_CLOCK, clock); spin_unlock_irqrestore(&sonic->reg_lock, flags); } static int snd_sonicvibes_hw_constraint_dac_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned int rate, div, r, m, n; if (hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->min == hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->max) { rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->min; div = 48000 / rate; if (div > 8) div = 8; if ((48000 / div) == rate) { params->rate_num = rate; params->rate_den = 1; } else { snd_sonicvibes_pll(rate, &r, &m, &n); snd_BUG_ON(SV_REFFREQUENCY % 16); snd_BUG_ON(SV_ADCMULT % 512); params->rate_num = (SV_REFFREQUENCY/16) * (n+2) * r; params->rate_den = (SV_ADCMULT/512) * (m+2); } } return 0; } static void snd_sonicvibes_set_dac_rate(struct sonicvibes * sonic, unsigned int rate) { unsigned int div; unsigned long flags; div = (rate * 65536 + SV_FULLRATE / 2) / SV_FULLRATE; if (div > 65535) div = 65535; spin_lock_irqsave(&sonic->reg_lock, flags); snd_sonicvibes_out1(sonic, SV_IREG_PCM_RATE_HIGH, div >> 8); snd_sonicvibes_out1(sonic, SV_IREG_PCM_RATE_LOW, div); spin_unlock_irqrestore(&sonic->reg_lock, flags); } static int snd_sonicvibes_trigger(struct sonicvibes * sonic, int what, int cmd) { int result = 0; spin_lock(&sonic->reg_lock); if (cmd == SNDRV_PCM_TRIGGER_START) { if (!(sonic->enable & what)) { sonic->enable |= what; snd_sonicvibes_out1(sonic, SV_IREG_PC_ENABLE, sonic->enable); } } else if (cmd == SNDRV_PCM_TRIGGER_STOP) { if (sonic->enable & what) { sonic->enable &= ~what; snd_sonicvibes_out1(sonic, SV_IREG_PC_ENABLE, sonic->enable); } } else { result = -EINVAL; } spin_unlock(&sonic->reg_lock); return result; } static irqreturn_t snd_sonicvibes_interrupt(int irq, void *dev_id) { struct sonicvibes *sonic = dev_id; unsigned char status; status = inb(SV_REG(sonic, STATUS)); if (!(status & (SV_DMAA_IRQ | SV_DMAC_IRQ | SV_MIDI_IRQ))) return IRQ_NONE; if (status == 0xff) { /* failure */ outb(sonic->irqmask = ~0, SV_REG(sonic, IRQMASK)); snd_printk(KERN_ERR "IRQ failure - interrupts disabled!!\n"); return IRQ_HANDLED; } if (sonic->pcm) { if (status & SV_DMAA_IRQ) snd_pcm_period_elapsed(sonic->playback_substream); if (status & SV_DMAC_IRQ) snd_pcm_period_elapsed(sonic->capture_substream); } if (sonic->rmidi) { if (status & SV_MIDI_IRQ) snd_mpu401_uart_interrupt(irq, sonic->rmidi->private_data); } if (status & SV_UD_IRQ) { unsigned char udreg; int vol, oleft, oright, mleft, mright; spin_lock(&sonic->reg_lock); udreg = snd_sonicvibes_in1(sonic, SV_IREG_UD_BUTTON); vol = udreg & 0x3f; if (!(udreg & 0x40)) vol = -vol; oleft = mleft = snd_sonicvibes_in1(sonic, SV_IREG_LEFT_ANALOG); oright = mright = snd_sonicvibes_in1(sonic, SV_IREG_RIGHT_ANALOG); oleft &= 0x1f; oright &= 0x1f; oleft += vol; if (oleft < 0) oleft = 0; if (oleft > 0x1f) oleft = 0x1f; oright += vol; if (oright < 0) oright = 0; if (oright > 0x1f) oright = 0x1f; if (udreg & 0x80) { mleft ^= 0x80; mright ^= 0x80; } oleft |= mleft & 0x80; oright |= mright & 0x80; snd_sonicvibes_out1(sonic, SV_IREG_LEFT_ANALOG, oleft); snd_sonicvibes_out1(sonic, SV_IREG_RIGHT_ANALOG, oright); spin_unlock(&sonic->reg_lock); snd_ctl_notify(sonic->card, SNDRV_CTL_EVENT_MASK_VALUE, &sonic->master_mute->id); snd_ctl_notify(sonic->card, SNDRV_CTL_EVENT_MASK_VALUE, &sonic->master_volume->id); } return IRQ_HANDLED; } /* * PCM part */ static int snd_sonicvibes_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); return snd_sonicvibes_trigger(sonic, 1, cmd); } static int snd_sonicvibes_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); return snd_sonicvibes_trigger(sonic, 2, cmd); } static int snd_sonicvibes_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_sonicvibes_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static int snd_sonicvibes_playback_prepare(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned char fmt = 0; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); sonic->p_dma_size = size; count--; if (runtime->channels > 1) fmt |= 1; if (snd_pcm_format_width(runtime->format) == 16) fmt |= 2; snd_sonicvibes_setfmt(sonic, ~3, fmt); snd_sonicvibes_set_dac_rate(sonic, runtime->rate); spin_lock_irq(&sonic->reg_lock); snd_sonicvibes_setdmaa(sonic, runtime->dma_addr, size); snd_sonicvibes_out1(sonic, SV_IREG_DMA_A_UPPER, count >> 8); snd_sonicvibes_out1(sonic, SV_IREG_DMA_A_LOWER, count); spin_unlock_irq(&sonic->reg_lock); return 0; } static int snd_sonicvibes_capture_prepare(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned char fmt = 0; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); sonic->c_dma_size = size; count >>= 1; count--; if (runtime->channels > 1) fmt |= 0x10; if (snd_pcm_format_width(runtime->format) == 16) fmt |= 0x20; snd_sonicvibes_setfmt(sonic, ~0x30, fmt); snd_sonicvibes_set_adc_rate(sonic, runtime->rate); spin_lock_irq(&sonic->reg_lock); snd_sonicvibes_setdmac(sonic, runtime->dma_addr, size); snd_sonicvibes_out1(sonic, SV_IREG_DMA_C_UPPER, count >> 8); snd_sonicvibes_out1(sonic, SV_IREG_DMA_C_LOWER, count); spin_unlock_irq(&sonic->reg_lock); return 0; } static snd_pcm_uframes_t snd_sonicvibes_playback_pointer(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); size_t ptr; if (!(sonic->enable & 1)) return 0; ptr = sonic->p_dma_size - snd_sonicvibes_getdmaa(sonic); return bytes_to_frames(substream->runtime, ptr); } static snd_pcm_uframes_t snd_sonicvibes_capture_pointer(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); size_t ptr; if (!(sonic->enable & 2)) return 0; ptr = sonic->c_dma_size - snd_sonicvibes_getdmac(sonic); return bytes_to_frames(substream->runtime, ptr); } static struct snd_pcm_hardware snd_sonicvibes_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 32, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_sonicvibes_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 32, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static int snd_sonicvibes_playback_open(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; sonic->mode |= SV_MODE_PLAY; sonic->playback_substream = substream; runtime->hw = snd_sonicvibes_playback; snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_sonicvibes_hw_constraint_dac_rate, NULL, SNDRV_PCM_HW_PARAM_RATE, -1); return 0; } static int snd_sonicvibes_capture_open(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; sonic->mode |= SV_MODE_CAPTURE; sonic->capture_substream = substream; runtime->hw = snd_sonicvibes_capture; snd_pcm_hw_constraint_ratdens(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &snd_sonicvibes_hw_constraints_adc_clock); return 0; } static int snd_sonicvibes_playback_close(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); sonic->playback_substream = NULL; sonic->mode &= ~SV_MODE_PLAY; return 0; } static int snd_sonicvibes_capture_close(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); sonic->capture_substream = NULL; sonic->mode &= ~SV_MODE_CAPTURE; return 0; } static struct snd_pcm_ops snd_sonicvibes_playback_ops = { .open = snd_sonicvibes_playback_open, .close = snd_sonicvibes_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_sonicvibes_hw_params, .hw_free = snd_sonicvibes_hw_free, .prepare = snd_sonicvibes_playback_prepare, .trigger = snd_sonicvibes_playback_trigger, .pointer = snd_sonicvibes_playback_pointer, }; static struct snd_pcm_ops snd_sonicvibes_capture_ops = { .open = snd_sonicvibes_capture_open, .close = snd_sonicvibes_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_sonicvibes_hw_params, .hw_free = snd_sonicvibes_hw_free, .prepare = snd_sonicvibes_capture_prepare, .trigger = snd_sonicvibes_capture_trigger, .pointer = snd_sonicvibes_capture_pointer, }; static int __devinit snd_sonicvibes_pcm(struct sonicvibes * sonic, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if ((err = snd_pcm_new(sonic->card, "s3_86c617", device, 1, 1, &pcm)) < 0) return err; if (snd_BUG_ON(!pcm)) return -EINVAL; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_sonicvibes_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_sonicvibes_capture_ops); pcm->private_data = sonic; pcm->info_flags = 0; strcpy(pcm->name, "S3 SonicVibes"); sonic->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(sonic->pci), 64*1024, 128*1024); if (rpcm) *rpcm = pcm; return 0; } /* * Mixer part */ #define SONICVIBES_MUX(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_sonicvibes_info_mux, \ .get = snd_sonicvibes_get_mux, .put = snd_sonicvibes_put_mux } static int snd_sonicvibes_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[7] = { "CD", "PCM", "Aux1", "Line", "Aux0", "Mic", "Mix" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 2; uinfo->value.enumerated.items = 7; if (uinfo->value.enumerated.item >= 7) uinfo->value.enumerated.item = 6; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_sonicvibes_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); spin_lock_irq(&sonic->reg_lock); ucontrol->value.enumerated.item[0] = ((snd_sonicvibes_in1(sonic, SV_IREG_LEFT_ADC) & SV_RECSRC_OUT) >> 5) - 1; ucontrol->value.enumerated.item[1] = ((snd_sonicvibes_in1(sonic, SV_IREG_RIGHT_ADC) & SV_RECSRC_OUT) >> 5) - 1; spin_unlock_irq(&sonic->reg_lock); return 0; } static int snd_sonicvibes_put_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); unsigned short left, right, oval1, oval2; int change; if (ucontrol->value.enumerated.item[0] >= 7 || ucontrol->value.enumerated.item[1] >= 7) return -EINVAL; left = (ucontrol->value.enumerated.item[0] + 1) << 5; right = (ucontrol->value.enumerated.item[1] + 1) << 5; spin_lock_irq(&sonic->reg_lock); oval1 = snd_sonicvibes_in1(sonic, SV_IREG_LEFT_ADC); oval2 = snd_sonicvibes_in1(sonic, SV_IREG_RIGHT_ADC); left = (oval1 & ~SV_RECSRC_OUT) | left; right = (oval2 & ~SV_RECSRC_OUT) | right; change = left != oval1 || right != oval2; snd_sonicvibes_out1(sonic, SV_IREG_LEFT_ADC, left); snd_sonicvibes_out1(sonic, SV_IREG_RIGHT_ADC, right); spin_unlock_irq(&sonic->reg_lock); return change; } #define SONICVIBES_SINGLE(xname, xindex, reg, shift, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_sonicvibes_info_single, \ .get = snd_sonicvibes_get_single, .put = snd_sonicvibes_put_single, \ .private_value = reg | (shift << 8) | (mask << 16) | (invert << 24) } static int snd_sonicvibes_info_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 16) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_sonicvibes_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; spin_lock_irq(&sonic->reg_lock); ucontrol->value.integer.value[0] = (snd_sonicvibes_in1(sonic, reg)>> shift) & mask; spin_unlock_irq(&sonic->reg_lock); if (invert) ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; return 0; } static int snd_sonicvibes_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; int change; unsigned short val, oval; val = (ucontrol->value.integer.value[0] & mask); if (invert) val = mask - val; val <<= shift; spin_lock_irq(&sonic->reg_lock); oval = snd_sonicvibes_in1(sonic, reg); val = (oval & ~(mask << shift)) | val; change = val != oval; snd_sonicvibes_out1(sonic, reg, val); spin_unlock_irq(&sonic->reg_lock); return change; } #define SONICVIBES_DOUBLE(xname, xindex, left_reg, right_reg, shift_left, shift_right, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_sonicvibes_info_double, \ .get = snd_sonicvibes_get_double, .put = snd_sonicvibes_put_double, \ .private_value = left_reg | (right_reg << 8) | (shift_left << 16) | (shift_right << 19) | (mask << 24) | (invert << 22) } static int snd_sonicvibes_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 24) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_sonicvibes_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; spin_lock_irq(&sonic->reg_lock); ucontrol->value.integer.value[0] = (snd_sonicvibes_in1(sonic, left_reg) >> shift_left) & mask; ucontrol->value.integer.value[1] = (snd_sonicvibes_in1(sonic, right_reg) >> shift_right) & mask; spin_unlock_irq(&sonic->reg_lock); if (invert) { ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; ucontrol->value.integer.value[1] = mask - ucontrol->value.integer.value[1]; } return 0; } static int snd_sonicvibes_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; int change; unsigned short val1, val2, oval1, oval2; val1 = ucontrol->value.integer.value[0] & mask; val2 = ucontrol->value.integer.value[1] & mask; if (invert) { val1 = mask - val1; val2 = mask - val2; } val1 <<= shift_left; val2 <<= shift_right; spin_lock_irq(&sonic->reg_lock); oval1 = snd_sonicvibes_in1(sonic, left_reg); oval2 = snd_sonicvibes_in1(sonic, right_reg); val1 = (oval1 & ~(mask << shift_left)) | val1; val2 = (oval2 & ~(mask << shift_right)) | val2; change = val1 != oval1 || val2 != oval2; snd_sonicvibes_out1(sonic, left_reg, val1); snd_sonicvibes_out1(sonic, right_reg, val2); spin_unlock_irq(&sonic->reg_lock); return change; } static struct snd_kcontrol_new snd_sonicvibes_controls[] __devinitdata = { SONICVIBES_DOUBLE("Capture Volume", 0, SV_IREG_LEFT_ADC, SV_IREG_RIGHT_ADC, 0, 0, 15, 0), SONICVIBES_DOUBLE("Aux Playback Switch", 0, SV_IREG_LEFT_AUX1, SV_IREG_RIGHT_AUX1, 7, 7, 1, 1), SONICVIBES_DOUBLE("Aux Playback Volume", 0, SV_IREG_LEFT_AUX1, SV_IREG_RIGHT_AUX1, 0, 0, 31, 1), SONICVIBES_DOUBLE("CD Playback Switch", 0, SV_IREG_LEFT_CD, SV_IREG_RIGHT_CD, 7, 7, 1, 1), SONICVIBES_DOUBLE("CD Playback Volume", 0, SV_IREG_LEFT_CD, SV_IREG_RIGHT_CD, 0, 0, 31, 1), SONICVIBES_DOUBLE("Line Playback Switch", 0, SV_IREG_LEFT_LINE, SV_IREG_RIGHT_LINE, 7, 7, 1, 1), SONICVIBES_DOUBLE("Line Playback Volume", 0, SV_IREG_LEFT_LINE, SV_IREG_RIGHT_LINE, 0, 0, 31, 1), SONICVIBES_SINGLE("Mic Playback Switch", 0, SV_IREG_MIC, 7, 1, 1), SONICVIBES_SINGLE("Mic Playback Volume", 0, SV_IREG_MIC, 0, 15, 1), SONICVIBES_SINGLE("Mic Boost", 0, SV_IREG_LEFT_ADC, 4, 1, 0), SONICVIBES_DOUBLE("Synth Playback Switch", 0, SV_IREG_LEFT_SYNTH, SV_IREG_RIGHT_SYNTH, 7, 7, 1, 1), SONICVIBES_DOUBLE("Synth Playback Volume", 0, SV_IREG_LEFT_SYNTH, SV_IREG_RIGHT_SYNTH, 0, 0, 31, 1), SONICVIBES_DOUBLE("Aux Playback Switch", 1, SV_IREG_LEFT_AUX2, SV_IREG_RIGHT_AUX2, 7, 7, 1, 1), SONICVIBES_DOUBLE("Aux Playback Volume", 1, SV_IREG_LEFT_AUX2, SV_IREG_RIGHT_AUX2, 0, 0, 31, 1), SONICVIBES_DOUBLE("Master Playback Switch", 0, SV_IREG_LEFT_ANALOG, SV_IREG_RIGHT_ANALOG, 7, 7, 1, 1), SONICVIBES_DOUBLE("Master Playback Volume", 0, SV_IREG_LEFT_ANALOG, SV_IREG_RIGHT_ANALOG, 0, 0, 31, 1), SONICVIBES_DOUBLE("PCM Playback Switch", 0, SV_IREG_LEFT_PCM, SV_IREG_RIGHT_PCM, 7, 7, 1, 1), SONICVIBES_DOUBLE("PCM Playback Volume", 0, SV_IREG_LEFT_PCM, SV_IREG_RIGHT_PCM, 0, 0, 63, 1), SONICVIBES_SINGLE("Loopback Capture Switch", 0, SV_IREG_ADC_OUTPUT_CTRL, 0, 1, 0), SONICVIBES_SINGLE("Loopback Capture Volume", 0, SV_IREG_ADC_OUTPUT_CTRL, 2, 63, 1), SONICVIBES_MUX("Capture Source", 0) }; static void snd_sonicvibes_master_free(struct snd_kcontrol *kcontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); sonic->master_mute = NULL; sonic->master_volume = NULL; } static int __devinit snd_sonicvibes_mixer(struct sonicvibes * sonic) { struct snd_card *card; struct snd_kcontrol *kctl; unsigned int idx; int err; if (snd_BUG_ON(!sonic || !sonic->card)) return -EINVAL; card = sonic->card; strcpy(card->mixername, "S3 SonicVibes"); for (idx = 0; idx < ARRAY_SIZE(snd_sonicvibes_controls); idx++) { if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_sonicvibes_controls[idx], sonic))) < 0) return err; switch (idx) { case 0: case 1: kctl->private_free = snd_sonicvibes_master_free; break; } } return 0; } /* */ static void snd_sonicvibes_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct sonicvibes *sonic = entry->private_data; unsigned char tmp; tmp = sonic->srs_space & 0x0f; snd_iprintf(buffer, "SRS 3D : %s\n", sonic->srs_space & 0x80 ? "off" : "on"); snd_iprintf(buffer, "SRS Space : %s\n", tmp == 0x00 ? "100%" : tmp == 0x01 ? "75%" : tmp == 0x02 ? "50%" : tmp == 0x03 ? "25%" : "0%"); tmp = sonic->srs_center & 0x0f; snd_iprintf(buffer, "SRS Center : %s\n", tmp == 0x00 ? "100%" : tmp == 0x01 ? "75%" : tmp == 0x02 ? "50%" : tmp == 0x03 ? "25%" : "0%"); tmp = sonic->wave_source & 0x03; snd_iprintf(buffer, "WaveTable Source : %s\n", tmp == 0x00 ? "on-board ROM" : tmp == 0x01 ? "PCI bus" : "on-board ROM + PCI bus"); tmp = sonic->mpu_switch; snd_iprintf(buffer, "Onboard synth : %s\n", tmp & 0x01 ? "on" : "off"); snd_iprintf(buffer, "Ext. Rx to synth : %s\n", tmp & 0x02 ? "on" : "off"); snd_iprintf(buffer, "MIDI to ext. Tx : %s\n", tmp & 0x04 ? "on" : "off"); } static void __devinit snd_sonicvibes_proc_init(struct sonicvibes * sonic) { struct snd_info_entry *entry; if (! snd_card_proc_new(sonic->card, "sonicvibes", &entry)) snd_info_set_text_ops(entry, sonic, snd_sonicvibes_proc_read); } /* */ #ifdef SUPPORT_JOYSTICK static struct snd_kcontrol_new snd_sonicvibes_game_control __devinitdata = SONICVIBES_SINGLE("Joystick Speed", 0, SV_IREG_GAME_PORT, 1, 15, 0); static int __devinit snd_sonicvibes_create_gameport(struct sonicvibes *sonic) { struct gameport *gp; sonic->gameport = gp = gameport_allocate_port(); if (!gp) { printk(KERN_ERR "sonicvibes: cannot allocate memory for gameport\n"); return -ENOMEM; } gameport_set_name(gp, "SonicVibes Gameport"); gameport_set_phys(gp, "pci%s/gameport0", pci_name(sonic->pci)); gameport_set_dev_parent(gp, &sonic->pci->dev); gp->io = sonic->game_port; gameport_register_port(gp); snd_ctl_add(sonic->card, snd_ctl_new1(&snd_sonicvibes_game_control, sonic)); return 0; } static void snd_sonicvibes_free_gameport(struct sonicvibes *sonic) { if (sonic->gameport) { gameport_unregister_port(sonic->gameport); sonic->gameport = NULL; } } #else static inline int snd_sonicvibes_create_gameport(struct sonicvibes *sonic) { return -ENOSYS; } static inline void snd_sonicvibes_free_gameport(struct sonicvibes *sonic) { } #endif static int snd_sonicvibes_free(struct sonicvibes *sonic) { snd_sonicvibes_free_gameport(sonic); pci_write_config_dword(sonic->pci, 0x40, sonic->dmaa_port); pci_write_config_dword(sonic->pci, 0x48, sonic->dmac_port); if (sonic->irq >= 0) free_irq(sonic->irq, sonic); release_and_free_resource(sonic->res_dmaa); release_and_free_resource(sonic->res_dmac); pci_release_regions(sonic->pci); pci_disable_device(sonic->pci); kfree(sonic); return 0; } static int snd_sonicvibes_dev_free(struct snd_device *device) { struct sonicvibes *sonic = device->device_data; return snd_sonicvibes_free(sonic); } static int __devinit snd_sonicvibes_create(struct snd_card *card, struct pci_dev *pci, int reverb, int mge, struct sonicvibes ** rsonic) { struct sonicvibes *sonic; unsigned int dmaa, dmac; int err; static struct snd_device_ops ops = { .dev_free = snd_sonicvibes_dev_free, }; *rsonic = NULL; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; /* check, if we can restrict PCI DMA transfers to 24 bits */ if (pci_set_dma_mask(pci, DMA_BIT_MASK(24)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(24)) < 0) { snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n"); pci_disable_device(pci); return -ENXIO; } sonic = kzalloc(sizeof(*sonic), GFP_KERNEL); if (sonic == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&sonic->reg_lock); sonic->card = card; sonic->pci = pci; sonic->irq = -1; if ((err = pci_request_regions(pci, "S3 SonicVibes")) < 0) { kfree(sonic); pci_disable_device(pci); return err; } sonic->sb_port = pci_resource_start(pci, 0); sonic->enh_port = pci_resource_start(pci, 1); sonic->synth_port = pci_resource_start(pci, 2); sonic->midi_port = pci_resource_start(pci, 3); sonic->game_port = pci_resource_start(pci, 4); if (request_irq(pci->irq, snd_sonicvibes_interrupt, IRQF_SHARED, "S3 SonicVibes", sonic)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_sonicvibes_free(sonic); return -EBUSY; } sonic->irq = pci->irq; pci_read_config_dword(pci, 0x40, &dmaa); pci_read_config_dword(pci, 0x48, &dmac); dmaio &= ~0x0f; dmaa &= ~0x0f; dmac &= ~0x0f; if (!dmaa) { dmaa = dmaio; dmaio += 0x10; snd_printk(KERN_INFO "BIOS did not allocate DDMA channel A i/o, allocated at 0x%x\n", dmaa); } if (!dmac) { dmac = dmaio; dmaio += 0x10; snd_printk(KERN_INFO "BIOS did not allocate DDMA channel C i/o, allocated at 0x%x\n", dmac); } pci_write_config_dword(pci, 0x40, dmaa); pci_write_config_dword(pci, 0x48, dmac); if ((sonic->res_dmaa = request_region(dmaa, 0x10, "S3 SonicVibes DDMA-A")) == NULL) { snd_sonicvibes_free(sonic); snd_printk(KERN_ERR "unable to grab DDMA-A port at 0x%x-0x%x\n", dmaa, dmaa + 0x10 - 1); return -EBUSY; } if ((sonic->res_dmac = request_region(dmac, 0x10, "S3 SonicVibes DDMA-C")) == NULL) { snd_sonicvibes_free(sonic); snd_printk(KERN_ERR "unable to grab DDMA-C port at 0x%x-0x%x\n", dmac, dmac + 0x10 - 1); return -EBUSY; } pci_read_config_dword(pci, 0x40, &sonic->dmaa_port); pci_read_config_dword(pci, 0x48, &sonic->dmac_port); sonic->dmaa_port &= ~0x0f; sonic->dmac_port &= ~0x0f; pci_write_config_dword(pci, 0x40, sonic->dmaa_port | 9); /* enable + enhanced */ pci_write_config_dword(pci, 0x48, sonic->dmac_port | 9); /* enable */ /* ok.. initialize S3 SonicVibes chip */ outb(SV_RESET, SV_REG(sonic, CONTROL)); /* reset chip */ udelay(100); outb(0, SV_REG(sonic, CONTROL)); /* release reset */ udelay(100); outb(SV_ENHANCED | SV_INTA | (reverb ? SV_REVERB : 0), SV_REG(sonic, CONTROL)); inb(SV_REG(sonic, STATUS)); /* clear IRQs */ #if 1 snd_sonicvibes_out(sonic, SV_IREG_DRIVE_CTRL, 0); /* drive current 16mA */ #else snd_sonicvibes_out(sonic, SV_IREG_DRIVE_CTRL, 0x40); /* drive current 8mA */ #endif snd_sonicvibes_out(sonic, SV_IREG_PC_ENABLE, sonic->enable = 0); /* disable playback & capture */ outb(sonic->irqmask = ~(SV_DMAA_MASK | SV_DMAC_MASK | SV_UD_MASK), SV_REG(sonic, IRQMASK)); inb(SV_REG(sonic, STATUS)); /* clear IRQs */ snd_sonicvibes_out(sonic, SV_IREG_ADC_CLOCK, 0); /* use PLL as clock source */ snd_sonicvibes_out(sonic, SV_IREG_ANALOG_POWER, 0); /* power up analog parts */ snd_sonicvibes_out(sonic, SV_IREG_DIGITAL_POWER, 0); /* power up digital parts */ snd_sonicvibes_setpll(sonic, SV_IREG_ADC_PLL, 8000); snd_sonicvibes_out(sonic, SV_IREG_SRS_SPACE, sonic->srs_space = 0x80); /* SRS space off */ snd_sonicvibes_out(sonic, SV_IREG_SRS_CENTER, sonic->srs_center = 0x00);/* SRS center off */ snd_sonicvibes_out(sonic, SV_IREG_MPU401, sonic->mpu_switch = 0x05); /* MPU-401 switch */ snd_sonicvibes_out(sonic, SV_IREG_WAVE_SOURCE, sonic->wave_source = 0x00); /* onboard ROM */ snd_sonicvibes_out(sonic, SV_IREG_PCM_RATE_LOW, (8000 * 65536 / SV_FULLRATE) & 0xff); snd_sonicvibes_out(sonic, SV_IREG_PCM_RATE_HIGH, ((8000 * 65536 / SV_FULLRATE) >> 8) & 0xff); snd_sonicvibes_out(sonic, SV_IREG_LEFT_ADC, mge ? 0xd0 : 0xc0); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_ADC, 0xc0); snd_sonicvibes_out(sonic, SV_IREG_LEFT_AUX1, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_AUX1, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_CD, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_CD, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_LINE, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_LINE, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_MIC, 0x8f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_SYNTH, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_SYNTH, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_AUX2, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_AUX2, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_ANALOG, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_ANALOG, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_PCM, 0xbf); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_PCM, 0xbf); snd_sonicvibes_out(sonic, SV_IREG_ADC_OUTPUT_CTRL, 0xfc); #if 0 snd_sonicvibes_debug(sonic); #endif sonic->revision = snd_sonicvibes_in(sonic, SV_IREG_REVISION); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, sonic, &ops)) < 0) { snd_sonicvibes_free(sonic); return err; } snd_sonicvibes_proc_init(sonic); snd_card_set_dev(card, &pci->dev); *rsonic = sonic; return 0; } /* * MIDI section */ static struct snd_kcontrol_new snd_sonicvibes_midi_controls[] __devinitdata = { SONICVIBES_SINGLE("SonicVibes Wave Source RAM", 0, SV_IREG_WAVE_SOURCE, 0, 1, 0), SONICVIBES_SINGLE("SonicVibes Wave Source RAM+ROM", 0, SV_IREG_WAVE_SOURCE, 1, 1, 0), SONICVIBES_SINGLE("SonicVibes Onboard Synth", 0, SV_IREG_MPU401, 0, 1, 0), SONICVIBES_SINGLE("SonicVibes External Rx to Synth", 0, SV_IREG_MPU401, 1, 1, 0), SONICVIBES_SINGLE("SonicVibes External Tx", 0, SV_IREG_MPU401, 2, 1, 0) }; static int snd_sonicvibes_midi_input_open(struct snd_mpu401 * mpu) { struct sonicvibes *sonic = mpu->private_data; outb(sonic->irqmask &= ~SV_MIDI_MASK, SV_REG(sonic, IRQMASK)); return 0; } static void snd_sonicvibes_midi_input_close(struct snd_mpu401 * mpu) { struct sonicvibes *sonic = mpu->private_data; outb(sonic->irqmask |= SV_MIDI_MASK, SV_REG(sonic, IRQMASK)); } static int __devinit snd_sonicvibes_midi(struct sonicvibes * sonic, struct snd_rawmidi *rmidi) { struct snd_mpu401 * mpu = rmidi->private_data; struct snd_card *card = sonic->card; struct snd_rawmidi_str *dir; unsigned int idx; int err; mpu->private_data = sonic; mpu->open_input = snd_sonicvibes_midi_input_open; mpu->close_input = snd_sonicvibes_midi_input_close; dir = &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT]; for (idx = 0; idx < ARRAY_SIZE(snd_sonicvibes_midi_controls); idx++) if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_sonicvibes_midi_controls[idx], sonic))) < 0) return err; return 0; } static int __devinit snd_sonic_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct sonicvibes *sonic; struct snd_rawmidi *midi_uart; struct snd_opl3 *opl3; int idx, err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; for (idx = 0; idx < 5; idx++) { if (pci_resource_start(pci, idx) == 0 || !(pci_resource_flags(pci, idx) & IORESOURCE_IO)) { snd_card_free(card); return -ENODEV; } } if ((err = snd_sonicvibes_create(card, pci, reverb[dev] ? 1 : 0, mge[dev] ? 1 : 0, &sonic)) < 0) { snd_card_free(card); return err; } strcpy(card->driver, "SonicVibes"); strcpy(card->shortname, "S3 SonicVibes"); sprintf(card->longname, "%s rev %i at 0x%llx, irq %i", card->shortname, sonic->revision, (unsigned long long)pci_resource_start(pci, 1), sonic->irq); if ((err = snd_sonicvibes_pcm(sonic, 0, NULL)) < 0) { snd_card_free(card); return err; } if ((err = snd_sonicvibes_mixer(sonic)) < 0) { snd_card_free(card); return err; } if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_SONICVIBES, sonic->midi_port, MPU401_INFO_INTEGRATED, sonic->irq, 0, &midi_uart)) < 0) { snd_card_free(card); return err; } snd_sonicvibes_midi(sonic, midi_uart); if ((err = snd_opl3_create(card, sonic->synth_port, sonic->synth_port + 2, OPL3_HW_OPL3_SV, 1, &opl3)) < 0) { snd_card_free(card); return err; } if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { snd_card_free(card); return err; } snd_sonicvibes_create_gameport(sonic); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_sonic_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = "S3 SonicVibes", .id_table = snd_sonic_ids, .probe = snd_sonic_probe, .remove = __devexit_p(snd_sonic_remove), }; static int __init alsa_card_sonicvibes_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_sonicvibes_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_sonicvibes_init) module_exit(alsa_card_sonicvibes_exit)
HONO/lge-kernel-iproj-mod
sound/pci/sonicvibes.c
C
gpl-2.0
52,997
/* * c 2001 PPC 64 Team, IBM Corp * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * scan-log-data driver for PPC64 Todd Inglett <tinglett@vnet.ibm.com> * * When ppc64 hardware fails the service processor dumps internal state * of the system. After a reboot the operating system can access a dump * of this data using this driver. A dump exists if the device-tree * /chosen/ibm,scan-log-data property exists. * * This driver exports /proc/powerpc/scan-log-dump which can be read. * The driver supports only sequential reads. * * The driver looks at a write to the driver for the single word "reset". * If given, the driver will reset the scanlog so the platform can free it. */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/rtas.h> #include <asm/prom.h> #define MODULE_VERS "1.0" #define MODULE_NAME "scanlog" /* Status returns from ibm,scan-log-dump */ #define SCANLOG_COMPLETE 0 #define SCANLOG_HWERROR -1 #define SCANLOG_CONTINUE 1 static unsigned int ibm_scan_log_dump; /* RTAS token */ static unsigned int *scanlog_buffer; /* The data buffer */ static ssize_t scanlog_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned int *data = scanlog_buffer; int status; unsigned long len, off; unsigned int wait_time; if (count > RTAS_DATA_BUF_SIZE) count = RTAS_DATA_BUF_SIZE; if (count < 1024) { /* This is the min supported by this RTAS call. Rather * than do all the buffering we insist the user code handle * larger reads. As long as cp works... :) */ printk(KERN_ERR "scanlog: cannot perform a small read (%ld)\n", count); return -EINVAL; } if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; for (;;) { wait_time = 500; /* default wait if no data */ spin_lock(&rtas_data_buf_lock); memcpy(rtas_data_buf, data, RTAS_DATA_BUF_SIZE); status = rtas_call(ibm_scan_log_dump, 2, 1, NULL, (u32) __pa(rtas_data_buf), (u32) count); memcpy(data, rtas_data_buf, RTAS_DATA_BUF_SIZE); spin_unlock(&rtas_data_buf_lock); pr_debug("scanlog: status=%d, data[0]=%x, data[1]=%x, " \ "data[2]=%x\n", status, data[0], data[1], data[2]); switch (status) { case SCANLOG_COMPLETE: pr_debug("scanlog: hit eof\n"); return 0; case SCANLOG_HWERROR: pr_debug("scanlog: hardware error reading data\n"); return -EIO; case SCANLOG_CONTINUE: /* We may or may not have data yet */ len = data[1]; off = data[2]; if (len > 0) { if (copy_to_user(buf, ((char *)data)+off, len)) return -EFAULT; return len; } /* Break to sleep default time */ break; default: /* Assume extended busy */ wait_time = rtas_busy_delay_time(status); if (!wait_time) { printk(KERN_ERR "scanlog: unknown error " \ "from rtas: %d\n", status); return -EIO; } } /* Apparently no data yet. Wait and try again. */ msleep_interruptible(wait_time); } /*NOTREACHED*/ } static ssize_t scanlog_write(struct file * file, const char __user * buf, size_t count, loff_t *ppos) { char stkbuf[20]; int status; if (count > 19) count = 19; if (copy_from_user (stkbuf, buf, count)) { return -EFAULT; } stkbuf[count] = 0; if (buf) { if (strncmp(stkbuf, "reset", 5) == 0) { pr_debug("scanlog: reset scanlog\n"); status = rtas_call(ibm_scan_log_dump, 2, 1, NULL, 0, 0); pr_debug("scanlog: rtas returns %d\n", status); } } return count; } static int scanlog_open(struct inode * inode, struct file * file) { unsigned int *data = scanlog_buffer; if (data[0] != 0) { /* This imperfect test stops a second copy of the * data (or a reset while data is being copied) */ return -EBUSY; } data[0] = 0; /* re-init so we restart the scan */ return 0; } static int scanlog_release(struct inode * inode, struct file * file) { unsigned int *data = scanlog_buffer; data[0] = 0; return 0; } const struct file_operations scanlog_fops = { .owner = THIS_MODULE, .read = scanlog_read, .write = scanlog_write, .open = scanlog_open, .release = scanlog_release, .llseek = noop_llseek, }; static int __init scanlog_init(void) { struct proc_dir_entry *ent; int err = -ENOMEM; ibm_scan_log_dump = rtas_token("ibm,scan-log-dump"); if (ibm_scan_log_dump == RTAS_UNKNOWN_SERVICE) return -ENODEV; /* Ideally we could allocate a buffer < 4G */ scanlog_buffer = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); if (!scanlog_buffer) goto err; ent = proc_create("powerpc/rtas/scan-log-dump", S_IRUSR, NULL, &scanlog_fops); if (!ent) goto err; return 0; err: kfree(scanlog_buffer); return err; } static void __exit scanlog_cleanup(void) { remove_proc_entry("powerpc/rtas/scan-log-dump", NULL); kfree(scanlog_buffer); } module_init(scanlog_init); module_exit(scanlog_cleanup); MODULE_LICENSE("GPL");
wpandroidios/android_kernel_htc_b3uhl_TWRP
arch/powerpc/platforms/pseries/scanlog.c
C
gpl-2.0
5,214