repo_name
string
path
string
copies
string
size
string
content
string
license
string
googyanas/GoogyMax-S6
drivers/pwm/core.c
2072
19372
/* * Generic pwmlib implementation * * Copyright (C) 2011 Sascha Hauer <s.hauer@pengutronix.de> * Copyright (C) 2011-2012 Avionic Design GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/pwm.h> #include <linux/radix-tree.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #define MAX_PWMS 1024 /* flags in the third cell of the DT PWM specifier */ #define PWM_SPEC_POLARITY (1 << 0) static DEFINE_MUTEX(pwm_lookup_lock); static LIST_HEAD(pwm_lookup_list); static DEFINE_MUTEX(pwm_lock); static LIST_HEAD(pwm_chips); static DECLARE_BITMAP(allocated_pwms, MAX_PWMS); static RADIX_TREE(pwm_tree, GFP_KERNEL); static struct pwm_device *pwm_to_device(unsigned int pwm) { return radix_tree_lookup(&pwm_tree, pwm); } static int alloc_pwms(int pwm, unsigned int count) { unsigned int from = 0; unsigned int start; if (pwm >= MAX_PWMS) return -EINVAL; if (pwm >= 0) from = pwm; start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, from, count, 0); if (pwm >= 0 && start != pwm) return -EEXIST; if (start + count > MAX_PWMS) return -ENOSPC; return start; } static void free_pwms(struct pwm_chip *chip) { unsigned int i; for (i = 0; i < chip->npwm; i++) { struct pwm_device *pwm = &chip->pwms[i]; radix_tree_delete(&pwm_tree, pwm->pwm); } bitmap_clear(allocated_pwms, chip->base, chip->npwm); kfree(chip->pwms); chip->pwms = NULL; } static struct pwm_chip *pwmchip_find_by_name(const char *name) { struct pwm_chip *chip; if (!name) return NULL; mutex_lock(&pwm_lock); list_for_each_entry(chip, &pwm_chips, list) { const char *chip_name = dev_name(chip->dev); if (chip_name && strcmp(chip_name, name) == 0) { mutex_unlock(&pwm_lock); return chip; } } mutex_unlock(&pwm_lock); return NULL; } static int pwm_device_request(struct pwm_device *pwm, const char *label) { int err; if (test_bit(PWMF_REQUESTED, &pwm->flags)) return -EBUSY; if (!try_module_get(pwm->chip->ops->owner)) return -ENODEV; if (pwm->chip->ops->request) { err = pwm->chip->ops->request(pwm->chip, pwm); if (err) { module_put(pwm->chip->ops->owner); return err; } } set_bit(PWMF_REQUESTED, &pwm->flags); pwm->label = label; return 0; } struct pwm_device * of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args) { struct pwm_device *pwm; if (pc->of_pwm_n_cells < 3) return ERR_PTR(-EINVAL); if (args->args[0] >= pc->npwm) return ERR_PTR(-EINVAL); pwm = pwm_request_from_chip(pc, args->args[0], NULL); if (IS_ERR(pwm)) return pwm; pwm_set_period(pwm, args->args[1]); if (args->args[2] & PWM_SPEC_POLARITY) pwm_set_polarity(pwm, PWM_POLARITY_INVERSED); else pwm_set_polarity(pwm, PWM_POLARITY_NORMAL); return pwm; } EXPORT_SYMBOL_GPL(of_pwm_xlate_with_flags); static struct pwm_device * of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args) { struct pwm_device *pwm; if (pc->of_pwm_n_cells < 2) return ERR_PTR(-EINVAL); if (args->args[0] >= pc->npwm) return ERR_PTR(-EINVAL); pwm = pwm_request_from_chip(pc, args->args[0], NULL); if (IS_ERR(pwm)) return pwm; pwm_set_period(pwm, args->args[1]); return pwm; } static void of_pwmchip_add(struct pwm_chip *chip) { if (!chip->dev || !chip->dev->of_node) return; if (!chip->of_xlate) { chip->of_xlate = of_pwm_simple_xlate; chip->of_pwm_n_cells = 2; } of_node_get(chip->dev->of_node); } static void of_pwmchip_remove(struct pwm_chip *chip) { if (chip->dev && chip->dev->of_node) of_node_put(chip->dev->of_node); } /** * pwm_set_chip_data() - set private chip data for a PWM * @pwm: PWM device * @data: pointer to chip-specific data */ int pwm_set_chip_data(struct pwm_device *pwm, void *data) { if (!pwm) return -EINVAL; pwm->chip_data = data; return 0; } EXPORT_SYMBOL_GPL(pwm_set_chip_data); /** * pwm_get_chip_data() - get private chip data for a PWM * @pwm: PWM device */ void *pwm_get_chip_data(struct pwm_device *pwm) { return pwm ? pwm->chip_data : NULL; } EXPORT_SYMBOL_GPL(pwm_get_chip_data); /** * pwmchip_add() - register a new PWM chip * @chip: the PWM chip to add * * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base * will be used. */ int pwmchip_add(struct pwm_chip *chip) { struct pwm_device *pwm; unsigned int i; int ret; if (!chip || !chip->dev || !chip->ops || !chip->ops->config || !chip->ops->enable || !chip->ops->disable) return -EINVAL; mutex_lock(&pwm_lock); ret = alloc_pwms(chip->base, chip->npwm); if (ret < 0) goto out; chip->pwms = kzalloc(chip->npwm * sizeof(*pwm), GFP_KERNEL); if (!chip->pwms) { ret = -ENOMEM; goto out; } chip->base = ret; for (i = 0; i < chip->npwm; i++) { pwm = &chip->pwms[i]; pwm->chip = chip; pwm->pwm = chip->base + i; pwm->hwpwm = i; radix_tree_insert(&pwm_tree, pwm->pwm, pwm); } bitmap_set(allocated_pwms, chip->base, chip->npwm); INIT_LIST_HEAD(&chip->list); list_add(&chip->list, &pwm_chips); ret = 0; if (IS_ENABLED(CONFIG_OF)) of_pwmchip_add(chip); out: mutex_unlock(&pwm_lock); return ret; } EXPORT_SYMBOL_GPL(pwmchip_add); /** * pwmchip_remove() - remove a PWM chip * @chip: the PWM chip to remove * * Removes a PWM chip. This function may return busy if the PWM chip provides * a PWM device that is still requested. */ int pwmchip_remove(struct pwm_chip *chip) { unsigned int i; int ret = 0; mutex_lock(&pwm_lock); for (i = 0; i < chip->npwm; i++) { struct pwm_device *pwm = &chip->pwms[i]; if (test_bit(PWMF_REQUESTED, &pwm->flags)) { ret = -EBUSY; goto out; } } list_del_init(&chip->list); if (IS_ENABLED(CONFIG_OF)) of_pwmchip_remove(chip); free_pwms(chip); out: mutex_unlock(&pwm_lock); return ret; } EXPORT_SYMBOL_GPL(pwmchip_remove); /** * pwm_request() - request a PWM device * @pwm_id: global PWM device index * @label: PWM device label * * This function is deprecated, use pwm_get() instead. */ struct pwm_device *pwm_request(int pwm, const char *label) { struct pwm_device *dev; int err; if (pwm < 0 || pwm >= MAX_PWMS) return ERR_PTR(-EINVAL); mutex_lock(&pwm_lock); dev = pwm_to_device(pwm); if (!dev) { dev = ERR_PTR(-EPROBE_DEFER); goto out; } err = pwm_device_request(dev, label); if (err < 0) dev = ERR_PTR(err); out: mutex_unlock(&pwm_lock); return dev; } EXPORT_SYMBOL_GPL(pwm_request); /** * pwm_request_from_chip() - request a PWM device relative to a PWM chip * @chip: PWM chip * @index: per-chip index of the PWM to request * @label: a literal description string of this PWM * * Returns the PWM at the given index of the given PWM chip. A negative error * code is returned if the index is not valid for the specified PWM chip or * if the PWM device cannot be requested. */ struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, unsigned int index, const char *label) { struct pwm_device *pwm; int err; if (!chip || index >= chip->npwm) return ERR_PTR(-EINVAL); mutex_lock(&pwm_lock); pwm = &chip->pwms[index]; err = pwm_device_request(pwm, label); if (err < 0) pwm = ERR_PTR(err); mutex_unlock(&pwm_lock); return pwm; } EXPORT_SYMBOL_GPL(pwm_request_from_chip); /** * pwm_free() - free a PWM device * @pwm: PWM device * * This function is deprecated, use pwm_put() instead. */ void pwm_free(struct pwm_device *pwm) { pwm_put(pwm); } EXPORT_SYMBOL_GPL(pwm_free); /** * pwm_config() - change a PWM device configuration * @pwm: PWM device * @duty_ns: "on" time (in nanoseconds) * @period_ns: duration (in nanoseconds) of one cycle */ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) { if (!pwm || duty_ns < 0 || period_ns <= 0 || duty_ns > period_ns) return -EINVAL; return pwm->chip->ops->config(pwm->chip, pwm, duty_ns, period_ns); } EXPORT_SYMBOL_GPL(pwm_config); /** * pwm_set_polarity() - configure the polarity of a PWM signal * @pwm: PWM device * @polarity: new polarity of the PWM signal * * Note that the polarity cannot be configured while the PWM device is enabled */ int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity) { if (!pwm || !pwm->chip->ops) return -EINVAL; if (!pwm->chip->ops->set_polarity) return -ENOSYS; if (test_bit(PWMF_ENABLED, &pwm->flags)) return -EBUSY; return pwm->chip->ops->set_polarity(pwm->chip, pwm, polarity); } EXPORT_SYMBOL_GPL(pwm_set_polarity); /** * pwm_enable() - start a PWM output toggling * @pwm: PWM device */ int pwm_enable(struct pwm_device *pwm) { if (pwm && !test_and_set_bit(PWMF_ENABLED, &pwm->flags)) return pwm->chip->ops->enable(pwm->chip, pwm); return pwm ? 0 : -EINVAL; } EXPORT_SYMBOL_GPL(pwm_enable); /** * pwm_disable() - stop a PWM output toggling * @pwm: PWM device */ void pwm_disable(struct pwm_device *pwm) { if (pwm && test_and_clear_bit(PWMF_ENABLED, &pwm->flags)) pwm->chip->ops->disable(pwm->chip, pwm); } EXPORT_SYMBOL_GPL(pwm_disable); static struct pwm_chip *of_node_to_pwmchip(struct device_node *np) { struct pwm_chip *chip; mutex_lock(&pwm_lock); list_for_each_entry(chip, &pwm_chips, list) if (chip->dev && chip->dev->of_node == np) { mutex_unlock(&pwm_lock); return chip; } mutex_unlock(&pwm_lock); return ERR_PTR(-EPROBE_DEFER); } /** * of_pwm_get() - request a PWM via the PWM framework * @np: device node to get the PWM from * @con_id: consumer name * * Returns the PWM device parsed from the phandle and index specified in the * "pwms" property of a device tree node or a negative error-code on failure. * Values parsed from the device tree are stored in the returned PWM device * object. * * If con_id is NULL, the first PWM device listed in the "pwms" property will * be requested. Otherwise the "pwm-names" property is used to do a reverse * lookup of the PWM index. This also means that the "pwm-names" property * becomes mandatory for devices that look up the PWM device via the con_id * parameter. */ struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id) { struct pwm_device *pwm = NULL; struct of_phandle_args args; struct pwm_chip *pc; int index = 0; int err; if (con_id) { index = of_property_match_string(np, "pwm-names", con_id); if (index < 0) return ERR_PTR(index); } err = of_parse_phandle_with_args(np, "pwms", "#pwm-cells", index, &args); if (err) { pr_debug("%s(): can't parse \"pwms\" property\n", __func__); return ERR_PTR(err); } pc = of_node_to_pwmchip(args.np); if (IS_ERR(pc)) { pr_debug("%s(): PWM chip not found\n", __func__); pwm = ERR_CAST(pc); goto put; } if (args.args_count != pc->of_pwm_n_cells) { pr_debug("%s: wrong #pwm-cells for %s\n", np->full_name, args.np->full_name); pwm = ERR_PTR(-EINVAL); goto put; } pwm = pc->of_xlate(pc, &args); if (IS_ERR(pwm)) goto put; /* * If a consumer name was not given, try to look it up from the * "pwm-names" property if it exists. Otherwise use the name of * the user device node. */ if (!con_id) { err = of_property_read_string_index(np, "pwm-names", index, &con_id); if (err < 0) con_id = np->name; } pwm->label = con_id; put: of_node_put(args.np); return pwm; } EXPORT_SYMBOL_GPL(of_pwm_get); /** * pwm_add_table() - register PWM device consumers * @table: array of consumers to register * @num: number of consumers in table */ void __init pwm_add_table(struct pwm_lookup *table, size_t num) { mutex_lock(&pwm_lookup_lock); while (num--) { list_add_tail(&table->list, &pwm_lookup_list); table++; } mutex_unlock(&pwm_lookup_lock); } /** * pwm_get() - look up and request a PWM device * @dev: device for PWM consumer * @con_id: consumer name * * Lookup is first attempted using DT. If the device was not instantiated from * a device tree, a PWM chip and a relative index is looked up via a table * supplied by board setup code (see pwm_add_table()). * * Once a PWM chip has been found the specified PWM device will be requested * and is ready to be used. */ struct pwm_device *pwm_get(struct device *dev, const char *con_id) { struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER); const char *dev_id = dev ? dev_name(dev) : NULL; struct pwm_chip *chip = NULL; unsigned int index = 0; unsigned int best = 0; struct pwm_lookup *p; unsigned int match; /* look up via DT first */ if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) return of_pwm_get(dev->of_node, con_id); /* * We look up the provider in the static table typically provided by * board setup code. We first try to lookup the consumer device by * name. If the consumer device was passed in as NULL or if no match * was found, we try to find the consumer by directly looking it up * by name. * * If a match is found, the provider PWM chip is looked up by name * and a PWM device is requested using the PWM device per-chip index. * * The lookup algorithm was shamelessly taken from the clock * framework: * * We do slightly fuzzy matching here: * An entry with a NULL ID is assumed to be a wildcard. * If an entry has a device ID, it must match * If an entry has a connection ID, it must match * Then we take the most specific entry - with the following order * of precedence: dev+con > dev only > con only. */ mutex_lock(&pwm_lookup_lock); list_for_each_entry(p, &pwm_lookup_list, list) { match = 0; if (p->dev_id) { if (!dev_id || strcmp(p->dev_id, dev_id)) continue; match += 2; } if (p->con_id) { if (!con_id || strcmp(p->con_id, con_id)) continue; match += 1; } if (match > best) { chip = pwmchip_find_by_name(p->provider); index = p->index; if (match != 3) best = match; else break; } } if (chip) pwm = pwm_request_from_chip(chip, index, con_id ?: dev_id); mutex_unlock(&pwm_lookup_lock); return pwm; } EXPORT_SYMBOL_GPL(pwm_get); /** * pwm_put() - release a PWM device * @pwm: PWM device */ void pwm_put(struct pwm_device *pwm) { if (!pwm) return; mutex_lock(&pwm_lock); if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) { pr_warn("PWM device already freed\n"); goto out; } if (pwm->chip->ops->free) pwm->chip->ops->free(pwm->chip, pwm); pwm->label = NULL; module_put(pwm->chip->ops->owner); out: mutex_unlock(&pwm_lock); } EXPORT_SYMBOL_GPL(pwm_put); static void devm_pwm_release(struct device *dev, void *res) { pwm_put(*(struct pwm_device **)res); } /** * devm_pwm_get() - resource managed pwm_get() * @dev: device for PWM consumer * @con_id: consumer name * * This function performs like pwm_get() but the acquired PWM device will * automatically be released on driver detach. */ struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id) { struct pwm_device **ptr, *pwm; ptr = devres_alloc(devm_pwm_release, sizeof(**ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); pwm = pwm_get(dev, con_id); if (!IS_ERR(pwm)) { *ptr = pwm; devres_add(dev, ptr); } else { devres_free(ptr); } return pwm; } EXPORT_SYMBOL_GPL(devm_pwm_get); /** * devm_of_pwm_get() - resource managed of_pwm_get() * @dev: device for PWM consumer * @np: device node to get the PWM from * @con_id: consumer name * * This function performs like of_pwm_get() but the acquired PWM device will * automatically be released on driver detach. */ struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np, const char *con_id) { struct pwm_device **ptr, *pwm; ptr = devres_alloc(devm_pwm_release, sizeof(**ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); pwm = of_pwm_get(np, con_id); if (!IS_ERR(pwm)) { *ptr = pwm; devres_add(dev, ptr); } else { devres_free(ptr); } return pwm; } EXPORT_SYMBOL_GPL(devm_of_pwm_get); static int devm_pwm_match(struct device *dev, void *res, void *data) { struct pwm_device **p = res; if (WARN_ON(!p || !*p)) return 0; return *p == data; } /** * devm_pwm_put() - resource managed pwm_put() * @dev: device for PWM consumer * @pwm: PWM device * * Release a PWM previously allocated using devm_pwm_get(). Calling this * function is usually not needed because devm-allocated resources are * automatically released on driver detach. */ void devm_pwm_put(struct device *dev, struct pwm_device *pwm) { WARN_ON(devres_release(dev, devm_pwm_release, devm_pwm_match, pwm)); } EXPORT_SYMBOL_GPL(devm_pwm_put); /** * pwm_can_sleep() - report whether PWM access will sleep * @pwm: PWM device * * It returns true if accessing the PWM can sleep, false otherwise. */ bool pwm_can_sleep(struct pwm_device *pwm) { return pwm->chip->can_sleep; } EXPORT_SYMBOL_GPL(pwm_can_sleep); #ifdef CONFIG_DEBUG_FS static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s) { unsigned int i; for (i = 0; i < chip->npwm; i++) { struct pwm_device *pwm = &chip->pwms[i]; seq_printf(s, " pwm-%-3d (%-20.20s):", i, pwm->label); if (test_bit(PWMF_REQUESTED, &pwm->flags)) seq_printf(s, " requested"); if (test_bit(PWMF_ENABLED, &pwm->flags)) seq_printf(s, " enabled"); seq_printf(s, "\n"); } } static void *pwm_seq_start(struct seq_file *s, loff_t *pos) { mutex_lock(&pwm_lock); s->private = ""; return seq_list_start(&pwm_chips, *pos); } static void *pwm_seq_next(struct seq_file *s, void *v, loff_t *pos) { s->private = "\n"; return seq_list_next(v, &pwm_chips, pos); } static void pwm_seq_stop(struct seq_file *s, void *v) { mutex_unlock(&pwm_lock); } static int pwm_seq_show(struct seq_file *s, void *v) { struct pwm_chip *chip = list_entry(v, struct pwm_chip, list); seq_printf(s, "%s%s/%s, %d PWM device%s\n", (char *)s->private, chip->dev->bus ? chip->dev->bus->name : "no-bus", dev_name(chip->dev), chip->npwm, (chip->npwm != 1) ? "s" : ""); if (chip->ops->dbg_show) chip->ops->dbg_show(chip, s); else pwm_dbg_show(chip, s); return 0; } static const struct seq_operations pwm_seq_ops = { .start = pwm_seq_start, .next = pwm_seq_next, .stop = pwm_seq_stop, .show = pwm_seq_show, }; static int pwm_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &pwm_seq_ops); } static const struct file_operations pwm_debugfs_ops = { .owner = THIS_MODULE, .open = pwm_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init pwm_debugfs_init(void) { debugfs_create_file("pwm", S_IFREG | S_IRUGO, NULL, NULL, &pwm_debugfs_ops); return 0; } subsys_initcall(pwm_debugfs_init); #endif /* CONFIG_DEBUG_FS */
gpl-2.0
haldric/I9300
arch/x86/kernel/module.c
2328
6386
/* Kernel module help for x86. Copyright (C) 2001 Rusty Russell. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/moduleloader.h> #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/bug.h> #include <linux/mm.h> #include <linux/gfp.h> #include <linux/jump_label.h> #include <asm/system.h> #include <asm/page.h> #include <asm/pgtable.h> #if 0 #define DEBUGP printk #else #define DEBUGP(fmt...) #endif void *module_alloc(unsigned long size) { if (PAGE_ALIGN(size) > MODULES_LEN) return NULL; return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, -1, __builtin_return_address(0)); } /* Free memory returned from module_alloc */ void module_free(struct module *mod, void *module_region) { vfree(module_region); } /* We don't need anything special. */ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod) { return 0; } #ifdef CONFIG_X86_32 int apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { unsigned int i; Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; Elf32_Sym *sym; uint32_t *location; DEBUGP("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; /* This is the symbol it is referring to. Note that all undefined symbols have been resolved. */ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + ELF32_R_SYM(rel[i].r_info); switch (ELF32_R_TYPE(rel[i].r_info)) { case R_386_32: /* We add the value into the location given */ *location += sym->st_value; break; case R_386_PC32: /* Add the value, subtract its postition */ *location += sym->st_value - (uint32_t)location; break; default: printk(KERN_ERR "module %s: Unknown relocation: %u\n", me->name, ELF32_R_TYPE(rel[i].r_info)); return -ENOEXEC; } } return 0; } int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n", me->name); return -ENOEXEC; } #else /*X86_64*/ int apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { unsigned int i; Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; Elf64_Sym *sym; void *loc; u64 val; DEBUGP("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; /* This is the symbol it is referring to. Note that all undefined symbols have been resolved. */ sym = (Elf64_Sym *)sechdrs[symindex].sh_addr + ELF64_R_SYM(rel[i].r_info); DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n", (int)ELF64_R_TYPE(rel[i].r_info), sym->st_value, rel[i].r_addend, (u64)loc); val = sym->st_value + rel[i].r_addend; switch (ELF64_R_TYPE(rel[i].r_info)) { case R_X86_64_NONE: break; case R_X86_64_64: *(u64 *)loc = val; break; case R_X86_64_32: *(u32 *)loc = val; if (val != *(u32 *)loc) goto overflow; break; case R_X86_64_32S: *(s32 *)loc = val; if ((s64)val != *(s32 *)loc) goto overflow; break; case R_X86_64_PC32: val -= (u64)loc; *(u32 *)loc = val; #if 0 if ((s64)val != *(s32 *)loc) goto overflow; #endif break; default: printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n", me->name, ELF64_R_TYPE(rel[i].r_info)); return -ENOEXEC; } } return 0; overflow: printk(KERN_ERR "overflow in relocation type %d val %Lx\n", (int)ELF64_R_TYPE(rel[i].r_info), val); printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n", me->name); return -ENOEXEC; } int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { printk(KERN_ERR "non add relocation not supported\n"); return -ENOSYS; } #endif int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL, *para = NULL; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { if (!strcmp(".text", secstrings + s->sh_name)) text = s; if (!strcmp(".altinstructions", secstrings + s->sh_name)) alt = s; if (!strcmp(".smp_locks", secstrings + s->sh_name)) locks = s; if (!strcmp(".parainstructions", secstrings + s->sh_name)) para = s; } if (alt) { /* patch .altinstructions */ void *aseg = (void *)alt->sh_addr; apply_alternatives(aseg, aseg + alt->sh_size); } if (locks && text) { void *lseg = (void *)locks->sh_addr; void *tseg = (void *)text->sh_addr; alternatives_smp_module_add(me, me->name, lseg, lseg + locks->sh_size, tseg, tseg + text->sh_size); } if (para) { void *pseg = (void *)para->sh_addr; apply_paravirt(pseg, pseg + para->sh_size); } /* make jump label nops */ jump_label_apply_nops(me); return 0; } void module_arch_cleanup(struct module *mod) { alternatives_smp_module_del(mod); }
gpl-2.0
omnirom/android_kernel_asus_grouper
arch/arm/plat-s5p/dev-fimc2.c
2584
1042
/* linux/arch/arm/plat-s5p/dev-fimc2.c * * Copyright (c) 2010 Samsung Electronics * * Base S5P FIMC2 resource and device definitions * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <mach/map.h> static struct resource s5p_fimc2_resource[] = { [0] = { .start = S5P_PA_FIMC2, .end = S5P_PA_FIMC2 + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_FIMC2, .end = IRQ_FIMC2, .flags = IORESOURCE_IRQ, }, }; static u64 s5p_fimc2_dma_mask = DMA_BIT_MASK(32); struct platform_device s5p_device_fimc2 = { .name = "s5p-fimc", .id = 2, .num_resources = ARRAY_SIZE(s5p_fimc2_resource), .resource = s5p_fimc2_resource, .dev = { .dma_mask = &s5p_fimc2_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, };
gpl-2.0
goodhanrry/G9250_goodhanrry_kernel
arch/arm/mach-omap2/am35xx-emac.c
2584
3293
/* * Copyright (C) 2011 Ilya Yanok, Emcraft Systems * * Based on mach-omap2/board-am3517evm.c * Copyright (C) 2009 Texas Instruments Incorporated * Author: Ranjith Lohithakshan <ranjithl@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * published by the Free Software Foundation. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind, * whether express or implied; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/err.h> #include <linux/davinci_emac.h> #include <asm/system.h> #include "omap_device.h" #include "am35xx.h" #include "control.h" #include "am35xx-emac.h" static void am35xx_enable_emac_int(void) { u32 v; v = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); v |= (AM35XX_CPGMAC_C0_RX_PULSE_CLR | AM35XX_CPGMAC_C0_TX_PULSE_CLR | AM35XX_CPGMAC_C0_MISC_PULSE_CLR | AM35XX_CPGMAC_C0_RX_THRESH_CLR); omap_ctrl_writel(v, AM35XX_CONTROL_LVL_INTR_CLEAR); omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); /* OCP barrier */ } static void am35xx_disable_emac_int(void) { u32 v; v = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); v |= (AM35XX_CPGMAC_C0_RX_PULSE_CLR | AM35XX_CPGMAC_C0_TX_PULSE_CLR); omap_ctrl_writel(v, AM35XX_CONTROL_LVL_INTR_CLEAR); omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); /* OCP barrier */ } static struct emac_platform_data am35xx_emac_pdata = { .ctrl_reg_offset = AM35XX_EMAC_CNTRL_OFFSET, .ctrl_mod_reg_offset = AM35XX_EMAC_CNTRL_MOD_OFFSET, .ctrl_ram_offset = AM35XX_EMAC_CNTRL_RAM_OFFSET, .ctrl_ram_size = AM35XX_EMAC_CNTRL_RAM_SIZE, .hw_ram_addr = AM35XX_EMAC_HW_RAM_ADDR, .version = EMAC_VERSION_2, .interrupt_enable = am35xx_enable_emac_int, .interrupt_disable = am35xx_disable_emac_int, }; static struct mdio_platform_data am35xx_mdio_pdata; static int __init omap_davinci_emac_dev_init(struct omap_hwmod *oh, void *pdata, int pdata_len) { struct platform_device *pdev; pdev = omap_device_build(oh->class->name, 0, oh, pdata, pdata_len); if (IS_ERR(pdev)) { WARN(1, "Can't build omap_device for %s:%s.\n", oh->class->name, oh->name); return PTR_ERR(pdev); } return 0; } void __init am35xx_emac_init(unsigned long mdio_bus_freq, u8 rmii_en) { struct omap_hwmod *oh; u32 v; int ret; oh = omap_hwmod_lookup("davinci_mdio"); if (!oh) { pr_err("Could not find davinci_mdio hwmod\n"); return; } am35xx_mdio_pdata.bus_freq = mdio_bus_freq; ret = omap_davinci_emac_dev_init(oh, &am35xx_mdio_pdata, sizeof(am35xx_mdio_pdata)); if (ret) { pr_err("Could not build davinci_mdio hwmod device\n"); return; } oh = omap_hwmod_lookup("davinci_emac"); if (!oh) { pr_err("Could not find davinci_emac hwmod\n"); return; } am35xx_emac_pdata.rmii_en = rmii_en; ret = omap_davinci_emac_dev_init(oh, &am35xx_emac_pdata, sizeof(am35xx_emac_pdata)); if (ret) { pr_err("Could not build davinci_emac hwmod device\n"); return; } v = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); v &= ~AM35XX_CPGMACSS_SW_RST; omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET); omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */ }
gpl-2.0
javelinanddart/kernel_samsung_msm8660
drivers/usb/mon/mon_stat.c
3096
1543
/* * The USB Monitor, inspired by Dave Harding's USBMon. * * This is the 's' or 'stat' reader which debugs usbmon itself. * Note that this code blows through locks, so make sure that * /dbg/usbmon/0s is well protected from non-root users. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/fs.h> #include <asm/uaccess.h> #include "usb_mon.h" #define STAT_BUF_SIZE 80 struct snap { int slen; char str[STAT_BUF_SIZE]; }; static int mon_stat_open(struct inode *inode, struct file *file) { struct mon_bus *mbus; struct snap *sp; if ((sp = kmalloc(sizeof(struct snap), GFP_KERNEL)) == NULL) return -ENOMEM; mbus = inode->i_private; sp->slen = snprintf(sp->str, STAT_BUF_SIZE, "nreaders %d events %u text_lost %u\n", mbus->nreaders, mbus->cnt_events, mbus->cnt_text_lost); file->private_data = sp; return 0; } static ssize_t mon_stat_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct snap *sp = file->private_data; return simple_read_from_buffer(buf, nbytes, ppos, sp->str, sp->slen); } static int mon_stat_release(struct inode *inode, struct file *file) { struct snap *sp = file->private_data; file->private_data = NULL; kfree(sp); return 0; } const struct file_operations mon_fops_stat = { .owner = THIS_MODULE, .open = mon_stat_open, .llseek = no_llseek, .read = mon_stat_read, /* .write = mon_stat_write, */ /* .poll = mon_stat_poll, */ /* .unlocked_ioctl = mon_stat_ioctl, */ .release = mon_stat_release, };
gpl-2.0
TREX-ROM/android_kernel_asus_grouper
drivers/video/q40fb.c
3352
3460
/* * linux/drivers/video/q40fb.c -- Q40 frame buffer device * * Copyright (C) 2001 * * Richard Zidlicky <rz@linux-m68k.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <asm/uaccess.h> #include <asm/setup.h> #include <asm/system.h> #include <asm/q40_master.h> #include <linux/fb.h> #include <linux/module.h> #include <asm/pgtable.h> #define Q40_PHYS_SCREEN_ADDR 0xFE800000 static struct fb_fix_screeninfo q40fb_fix __devinitdata = { .id = "Q40", .smem_len = 1024*1024, .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .line_length = 1024*2, .accel = FB_ACCEL_NONE, }; static struct fb_var_screeninfo q40fb_var __devinitdata = { .xres = 1024, .yres = 512, .xres_virtual = 1024, .yres_virtual = 512, .bits_per_pixel = 16, .red = {6, 5, 0}, .green = {11, 5, 0}, .blue = {0, 6, 0}, .activate = FB_ACTIVATE_NOW, .height = 230, .width = 300, .vmode = FB_VMODE_NONINTERLACED, }; static int q40fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { /* * Set a single color register. The values supplied have a 16 bit * magnitude. * Return != 0 for invalid regno. */ if (regno > 255) return 1; red>>=11; green>>=11; blue>>=10; if (regno < 16) { ((u32 *)info->pseudo_palette)[regno] = ((red & 31) <<6) | ((green & 31) << 11) | (blue & 63); } return 0; } static struct fb_ops q40fb_ops = { .owner = THIS_MODULE, .fb_setcolreg = q40fb_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static int __devinit q40fb_probe(struct platform_device *dev) { struct fb_info *info; if (!MACH_IS_Q40) return -ENXIO; /* mapped in q40/config.c */ q40fb_fix.smem_start = Q40_PHYS_SCREEN_ADDR; info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev); if (!info) return -ENOMEM; info->var = q40fb_var; info->fix = q40fb_fix; info->fbops = &q40fb_ops; info->flags = FBINFO_DEFAULT; /* not as module for now */ info->pseudo_palette = info->par; info->par = NULL; info->screen_base = (char *) q40fb_fix.smem_start; if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { framebuffer_release(info); return -ENOMEM; } master_outb(3, DISPLAY_CONTROL_REG); if (register_framebuffer(info) < 0) { printk(KERN_ERR "Unable to register Q40 frame buffer\n"); fb_dealloc_cmap(&info->cmap); framebuffer_release(info); return -EINVAL; } printk(KERN_INFO "fb%d: Q40 frame buffer alive and kicking !\n", info->node); return 0; } static struct platform_driver q40fb_driver = { .probe = q40fb_probe, .driver = { .name = "q40fb", }, }; static struct platform_device q40fb_device = { .name = "q40fb", }; int __init q40fb_init(void) { int ret = 0; if (fb_get_options("q40fb", NULL)) return -ENODEV; ret = platform_driver_register(&q40fb_driver); if (!ret) { ret = platform_device_register(&q40fb_device); if (ret) platform_driver_unregister(&q40fb_driver); } return ret; } module_init(q40fb_init); MODULE_LICENSE("GPL");
gpl-2.0
junkie2100/android_kernel_zte_quantum
drivers/target/target_core_file.c
3864
16955
/******************************************************************************* * Filename: target_core_file.c * * This file contains the Storage Engine <-> FILEIO transport specific functions * * Copyright (c) 2005 PyX Technologies, Inc. * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. * Copyright (c) 2007-2010 Rising Tide Systems * Copyright (c) 2008-2010 Linux-iSCSI.org * * Nicholas A. Bellinger <nab@kernel.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ******************************************************************************/ #include <linux/string.h> #include <linux/parser.h> #include <linux/timer.h> #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/module.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> #include "target_core_file.h" static struct se_subsystem_api fileio_template; /* fd_attach_hba(): (Part of se_subsystem_api_t template) * * */ static int fd_attach_hba(struct se_hba *hba, u32 host_id) { struct fd_host *fd_host; fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); if (!fd_host) { pr_err("Unable to allocate memory for struct fd_host\n"); return -ENOMEM; } fd_host->fd_host_id = host_id; hba->hba_ptr = fd_host; pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" " Target Core Stack %s\n", hba->hba_id, FD_VERSION, TARGET_CORE_MOD_VERSION); pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" " MaxSectors: %u\n", hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); return 0; } static void fd_detach_hba(struct se_hba *hba) { struct fd_host *fd_host = hba->hba_ptr; pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" " Target Core\n", hba->hba_id, fd_host->fd_host_id); kfree(fd_host); hba->hba_ptr = NULL; } static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) { struct fd_dev *fd_dev; struct fd_host *fd_host = hba->hba_ptr; fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); if (!fd_dev) { pr_err("Unable to allocate memory for struct fd_dev\n"); return NULL; } fd_dev->fd_host = fd_host; pr_debug("FILEIO: Allocated fd_dev for %p\n", name); return fd_dev; } /* fd_create_virtdevice(): (Part of se_subsystem_api_t template) * * */ static struct se_device *fd_create_virtdevice( struct se_hba *hba, struct se_subsystem_dev *se_dev, void *p) { char *dev_p = NULL; struct se_device *dev; struct se_dev_limits dev_limits; struct queue_limits *limits; struct fd_dev *fd_dev = p; struct fd_host *fd_host = hba->hba_ptr; mm_segment_t old_fs; struct file *file; struct inode *inode = NULL; int dev_flags = 0, flags, ret = -EINVAL; memset(&dev_limits, 0, sizeof(struct se_dev_limits)); old_fs = get_fs(); set_fs(get_ds()); dev_p = getname(fd_dev->fd_dev_name); set_fs(old_fs); if (IS_ERR(dev_p)) { pr_err("getname(%s) failed: %lu\n", fd_dev->fd_dev_name, IS_ERR(dev_p)); ret = PTR_ERR(dev_p); goto fail; } #if 0 if (di->no_create_file) flags = O_RDWR | O_LARGEFILE; else flags = O_RDWR | O_CREAT | O_LARGEFILE; #else flags = O_RDWR | O_CREAT | O_LARGEFILE; #endif /* flags |= O_DIRECT; */ /* * If fd_buffered_io=1 has not been set explicitly (the default), * use O_SYNC to force FILEIO writes to disk. */ if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO)) flags |= O_SYNC; file = filp_open(dev_p, flags, 0600); if (IS_ERR(file)) { pr_err("filp_open(%s) failed\n", dev_p); ret = PTR_ERR(file); goto fail; } if (!file || !file->f_dentry) { pr_err("filp_open(%s) failed\n", dev_p); goto fail; } fd_dev->fd_file = file; /* * If using a block backend with this struct file, we extract * fd_dev->fd_[block,dev]_size from struct block_device. * * Otherwise, we use the passed fd_size= from configfs */ inode = file->f_mapping->host; if (S_ISBLK(inode->i_mode)) { struct request_queue *q; unsigned long long dev_size; /* * Setup the local scope queue_limits from struct request_queue->limits * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. */ q = bdev_get_queue(inode->i_bdev); limits = &dev_limits.limits; limits->logical_block_size = bdev_logical_block_size(inode->i_bdev); limits->max_hw_sectors = queue_max_hw_sectors(q); limits->max_sectors = queue_max_sectors(q); /* * Determine the number of bytes from i_size_read() minus * one (1) logical sector from underlying struct block_device */ fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); dev_size = (i_size_read(file->f_mapping->host) - fd_dev->fd_block_size); pr_debug("FILEIO: Using size: %llu bytes from struct" " block_device blocks: %llu logical_block_size: %d\n", dev_size, div_u64(dev_size, fd_dev->fd_block_size), fd_dev->fd_block_size); } else { if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { pr_err("FILEIO: Missing fd_dev_size=" " parameter, and no backing struct" " block_device\n"); goto fail; } limits = &dev_limits.limits; limits->logical_block_size = FD_BLOCKSIZE; limits->max_hw_sectors = FD_MAX_SECTORS; limits->max_sectors = FD_MAX_SECTORS; fd_dev->fd_block_size = FD_BLOCKSIZE; } dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH; dev = transport_add_device_to_core_hba(hba, &fileio_template, se_dev, dev_flags, fd_dev, &dev_limits, "FILEIO", FD_VERSION); if (!dev) goto fail; fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; fd_dev->fd_queue_depth = dev->queue_depth; pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, fd_dev->fd_dev_name, fd_dev->fd_dev_size); putname(dev_p); return dev; fail: if (fd_dev->fd_file) { filp_close(fd_dev->fd_file, NULL); fd_dev->fd_file = NULL; } putname(dev_p); return ERR_PTR(ret); } /* fd_free_device(): (Part of se_subsystem_api_t template) * * */ static void fd_free_device(void *p) { struct fd_dev *fd_dev = p; if (fd_dev->fd_file) { filp_close(fd_dev->fd_file, NULL); fd_dev->fd_file = NULL; } kfree(fd_dev); } static inline struct fd_request *FILE_REQ(struct se_task *task) { return container_of(task, struct fd_request, fd_task); } static struct se_task * fd_alloc_task(unsigned char *cdb) { struct fd_request *fd_req; fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); if (!fd_req) { pr_err("Unable to allocate struct fd_request\n"); return NULL; } return &fd_req->fd_task; } static int fd_do_readv(struct se_task *task) { struct fd_request *req = FILE_REQ(task); struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; struct fd_dev *dev = se_dev->dev_ptr; struct file *fd = dev->fd_file; struct scatterlist *sg = task->task_sg; struct iovec *iov; mm_segment_t old_fs; loff_t pos = (task->task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size); int ret = 0, i; iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); if (!iov) { pr_err("Unable to allocate fd_do_readv iov[]\n"); return -ENOMEM; } for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { iov[i].iov_len = sg->length; iov[i].iov_base = sg_virt(sg); } old_fs = get_fs(); set_fs(get_ds()); ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos); set_fs(old_fs); kfree(iov); /* * Return zeros and GOOD status even if the READ did not return * the expected virt_size for struct file w/o a backing struct * block_device. */ if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { if (ret < 0 || ret != task->task_size) { pr_err("vfs_readv() returned %d," " expecting %d for S_ISBLK\n", ret, (int)task->task_size); return (ret < 0 ? ret : -EINVAL); } } else { if (ret < 0) { pr_err("vfs_readv() returned %d for non" " S_ISBLK\n", ret); return ret; } } return 1; } static int fd_do_writev(struct se_task *task) { struct fd_request *req = FILE_REQ(task); struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; struct fd_dev *dev = se_dev->dev_ptr; struct file *fd = dev->fd_file; struct scatterlist *sg = task->task_sg; struct iovec *iov; mm_segment_t old_fs; loff_t pos = (task->task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size); int ret, i = 0; iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); if (!iov) { pr_err("Unable to allocate fd_do_writev iov[]\n"); return -ENOMEM; } for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { iov[i].iov_len = sg->length; iov[i].iov_base = sg_virt(sg); } old_fs = get_fs(); set_fs(get_ds()); ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos); set_fs(old_fs); kfree(iov); if (ret < 0 || ret != task->task_size) { pr_err("vfs_writev() returned %d\n", ret); return (ret < 0 ? ret : -EINVAL); } return 1; } static void fd_emulate_sync_cache(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct fd_dev *fd_dev = dev->dev_ptr; int immed = (cmd->t_task_cdb[1] & 0x2); loff_t start, end; int ret; /* * If the Immediate bit is set, queue up the GOOD response * for this SYNCHRONIZE_CACHE op */ if (immed) transport_complete_sync_cache(cmd, 1); /* * Determine if we will be flushing the entire device. */ if (cmd->t_task_lba == 0 && cmd->data_length == 0) { start = 0; end = LLONG_MAX; } else { start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; if (cmd->data_length) end = start + cmd->data_length; else end = LLONG_MAX; } ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); if (ret != 0) pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); if (!immed) transport_complete_sync_cache(cmd, ret == 0); } /* * WRITE Force Unit Access (FUA) emulation on a per struct se_task * LBA range basis.. */ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) { struct se_device *dev = cmd->se_dev; struct fd_dev *fd_dev = dev->dev_ptr; loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; loff_t end = start + task->task_size; int ret; pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", task->task_lba, task->task_size); ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); if (ret != 0) pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); } static int fd_do_task(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; int ret = 0; /* * Call vectorized fileio functions to map struct scatterlist * physical memory addresses to struct iovec virtual memory. */ if (task->task_data_direction == DMA_FROM_DEVICE) { ret = fd_do_readv(task); } else { ret = fd_do_writev(task); if (ret > 0 && dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && (cmd->se_cmd_flags & SCF_FUA)) { /* * We might need to be a bit smarter here * and return some sense data to let the initiator * know the FUA WRITE cache sync failed..? */ fd_emulate_write_fua(cmd, task); } } if (ret < 0) { cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return ret; } if (ret) { task->task_scsi_status = GOOD; transport_complete_task(task, 1); } return 0; } /* fd_free_task(): (Part of se_subsystem_api_t template) * * */ static void fd_free_task(struct se_task *task) { struct fd_request *req = FILE_REQ(task); kfree(req); } enum { Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err }; static match_table_t tokens = { {Opt_fd_dev_name, "fd_dev_name=%s"}, {Opt_fd_dev_size, "fd_dev_size=%s"}, {Opt_fd_buffered_io, "fd_buffered_io=%d"}, {Opt_err, NULL} }; static ssize_t fd_set_configfs_dev_params( struct se_hba *hba, struct se_subsystem_dev *se_dev, const char *page, ssize_t count) { struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; char *orig, *ptr, *arg_p, *opts; substring_t args[MAX_OPT_ARGS]; int ret = 0, arg, token; opts = kstrdup(page, GFP_KERNEL); if (!opts) return -ENOMEM; orig = opts; while ((ptr = strsep(&opts, ",\n")) != NULL) { if (!*ptr) continue; token = match_token(ptr, tokens, args); switch (token) { case Opt_fd_dev_name: arg_p = match_strdup(&args[0]); if (!arg_p) { ret = -ENOMEM; break; } snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, "%s", arg_p); kfree(arg_p); pr_debug("FILEIO: Referencing Path: %s\n", fd_dev->fd_dev_name); fd_dev->fbd_flags |= FBDF_HAS_PATH; break; case Opt_fd_dev_size: arg_p = match_strdup(&args[0]); if (!arg_p) { ret = -ENOMEM; break; } ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); kfree(arg_p); if (ret < 0) { pr_err("strict_strtoull() failed for" " fd_dev_size=\n"); goto out; } pr_debug("FILEIO: Referencing Size: %llu" " bytes\n", fd_dev->fd_dev_size); fd_dev->fbd_flags |= FBDF_HAS_SIZE; break; case Opt_fd_buffered_io: match_int(args, &arg); if (arg != 1) { pr_err("bogus fd_buffered_io=%d value\n", arg); ret = -EINVAL; goto out; } pr_debug("FILEIO: Using buffered I/O" " operations for struct fd_dev\n"); fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; break; default: break; } } out: kfree(orig); return (!ret) ? count : ret; } static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) { struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { pr_err("Missing fd_dev_name=\n"); return -EINVAL; } return 0; } static ssize_t fd_show_configfs_dev_params( struct se_hba *hba, struct se_subsystem_dev *se_dev, char *b) { struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; ssize_t bl = 0; bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", fd_dev->fd_dev_name, fd_dev->fd_dev_size, (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ? "Buffered" : "Synchronous"); return bl; } /* fd_get_device_rev(): (Part of se_subsystem_api_t template) * * */ static u32 fd_get_device_rev(struct se_device *dev) { return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ } /* fd_get_device_type(): (Part of se_subsystem_api_t template) * * */ static u32 fd_get_device_type(struct se_device *dev) { return TYPE_DISK; } static sector_t fd_get_blocks(struct se_device *dev) { struct fd_dev *fd_dev = dev->dev_ptr; struct file *f = fd_dev->fd_file; struct inode *i = f->f_mapping->host; unsigned long long dev_size; /* * When using a file that references an underlying struct block_device, * ensure dev_size is always based on the current inode size in order * to handle underlying block_device resize operations. */ if (S_ISBLK(i->i_mode)) dev_size = (i_size_read(i) - fd_dev->fd_block_size); else dev_size = fd_dev->fd_dev_size; return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size); } static struct se_subsystem_api fileio_template = { .name = "fileio", .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, .write_cache_emulated = 1, .fua_write_emulated = 1, .attach_hba = fd_attach_hba, .detach_hba = fd_detach_hba, .allocate_virtdevice = fd_allocate_virtdevice, .create_virtdevice = fd_create_virtdevice, .free_device = fd_free_device, .alloc_task = fd_alloc_task, .do_task = fd_do_task, .do_sync_cache = fd_emulate_sync_cache, .free_task = fd_free_task, .check_configfs_dev_params = fd_check_configfs_dev_params, .set_configfs_dev_params = fd_set_configfs_dev_params, .show_configfs_dev_params = fd_show_configfs_dev_params, .get_device_rev = fd_get_device_rev, .get_device_type = fd_get_device_type, .get_blocks = fd_get_blocks, }; static int __init fileio_module_init(void) { return transport_subsystem_register(&fileio_template); } static void fileio_module_exit(void) { transport_subsystem_release(&fileio_template); } MODULE_DESCRIPTION("TCM FILEIO subsystem plugin"); MODULE_AUTHOR("nab@Linux-iSCSI.org"); MODULE_LICENSE("GPL"); module_init(fileio_module_init); module_exit(fileio_module_exit);
gpl-2.0
ElKowak/android_kernel_motorola_msm8610
drivers/mtd/maps/impa7.c
5144
2968
/* * Handle mapping of the NOR flash on implementa A7 boards * * Copyright 2002 SYSGO Real-Time Solutions GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #define WINDOW_ADDR0 0x00000000 /* physical properties of flash */ #define WINDOW_SIZE0 0x00800000 #define WINDOW_ADDR1 0x10000000 /* physical properties of flash */ #define WINDOW_SIZE1 0x00800000 #define NUM_FLASHBANKS 2 #define BUSWIDTH 4 /* can be { "cfi_probe", "jedec_probe", "map_rom", NULL } */ #define PROBETYPES { "jedec_probe", NULL } #define MSG_PREFIX "impA7:" /* prefix for our printk()'s */ #define MTDID "impa7-%d" /* for mtdparts= partitioning */ static struct mtd_info *impa7_mtd[NUM_FLASHBANKS]; static struct map_info impa7_map[NUM_FLASHBANKS] = { { .name = "impA7 NOR Flash Bank #0", .size = WINDOW_SIZE0, .bankwidth = BUSWIDTH, }, { .name = "impA7 NOR Flash Bank #1", .size = WINDOW_SIZE1, .bankwidth = BUSWIDTH, }, }; /* * MTD partitioning stuff */ static struct mtd_partition partitions[] = { { .name = "FileSystem", .size = 0x800000, .offset = 0x00000000 }, }; static int __init init_impa7(void) { static const char *rom_probe_types[] = PROBETYPES; const char **type; int i; static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = { { WINDOW_ADDR0, WINDOW_SIZE0 }, { WINDOW_ADDR1, WINDOW_SIZE1 }, }; int devicesfound = 0; for(i=0; i<NUM_FLASHBANKS; i++) { printk(KERN_NOTICE MSG_PREFIX "probing 0x%08lx at 0x%08lx\n", pt[i].size, pt[i].addr); impa7_map[i].phys = pt[i].addr; impa7_map[i].virt = ioremap(pt[i].addr, pt[i].size); if (!impa7_map[i].virt) { printk(MSG_PREFIX "failed to ioremap\n"); return -EIO; } simple_map_init(&impa7_map[i]); impa7_mtd[i] = 0; type = rom_probe_types; for(; !impa7_mtd[i] && *type; type++) { impa7_mtd[i] = do_map_probe(*type, &impa7_map[i]); } if (impa7_mtd[i]) { impa7_mtd[i]->owner = THIS_MODULE; devicesfound++; mtd_device_parse_register(impa7_mtd[i], NULL, NULL, partitions, ARRAY_SIZE(partitions)); } else iounmap((void *)impa7_map[i].virt); } return devicesfound == 0 ? -ENXIO : 0; } static void __exit cleanup_impa7(void) { int i; for (i=0; i<NUM_FLASHBANKS; i++) { if (impa7_mtd[i]) { mtd_device_unregister(impa7_mtd[i]); map_destroy(impa7_mtd[i]); iounmap((void *)impa7_map[i].virt); impa7_map[i].virt = 0; } } } module_init(init_impa7); module_exit(cleanup_impa7); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pavel Bartusek <pba@sysgo.de>"); MODULE_DESCRIPTION("MTD map driver for implementa impA7");
gpl-2.0
jose51197/Infernal_3.4
drivers/mtd/maps/physmap.c
5144
6956
/* * Normal mappings of chips in physical memory * * Copyright (C) 2003 MontaVista Software Inc. * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net * * 031022 - [jsun] add run-time configure and partition setup */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/mtd/concat.h> #include <linux/io.h> #define MAX_RESOURCES 4 struct physmap_flash_info { struct mtd_info *mtd[MAX_RESOURCES]; struct mtd_info *cmtd; struct map_info map[MAX_RESOURCES]; spinlock_t vpp_lock; int vpp_refcnt; }; static int physmap_flash_remove(struct platform_device *dev) { struct physmap_flash_info *info; struct physmap_flash_data *physmap_data; int i; info = platform_get_drvdata(dev); if (info == NULL) return 0; platform_set_drvdata(dev, NULL); physmap_data = dev->dev.platform_data; if (info->cmtd) { mtd_device_unregister(info->cmtd); if (info->cmtd != info->mtd[0]) mtd_concat_destroy(info->cmtd); } for (i = 0; i < MAX_RESOURCES; i++) { if (info->mtd[i] != NULL) map_destroy(info->mtd[i]); } if (physmap_data->exit) physmap_data->exit(dev); return 0; } static void physmap_set_vpp(struct map_info *map, int state) { struct platform_device *pdev; struct physmap_flash_data *physmap_data; struct physmap_flash_info *info; unsigned long flags; pdev = (struct platform_device *)map->map_priv_1; physmap_data = pdev->dev.platform_data; if (!physmap_data->set_vpp) return; info = platform_get_drvdata(pdev); spin_lock_irqsave(&info->vpp_lock, flags); if (state) { if (++info->vpp_refcnt == 1) /* first nested 'on' */ physmap_data->set_vpp(pdev, 1); } else { if (--info->vpp_refcnt == 0) /* last nested 'off' */ physmap_data->set_vpp(pdev, 0); } spin_unlock_irqrestore(&info->vpp_lock, flags); } static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "qinfo_probe", "map_rom", NULL }; static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "afs", NULL }; static int physmap_flash_probe(struct platform_device *dev) { struct physmap_flash_data *physmap_data; struct physmap_flash_info *info; const char **probe_type; const char **part_types; int err = 0; int i; int devices_found = 0; physmap_data = dev->dev.platform_data; if (physmap_data == NULL) return -ENODEV; info = devm_kzalloc(&dev->dev, sizeof(struct physmap_flash_info), GFP_KERNEL); if (info == NULL) { err = -ENOMEM; goto err_out; } if (physmap_data->init) { err = physmap_data->init(dev); if (err) goto err_out; } platform_set_drvdata(dev, info); for (i = 0; i < dev->num_resources; i++) { printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n", (unsigned long long)resource_size(&dev->resource[i]), (unsigned long long)dev->resource[i].start); if (!devm_request_mem_region(&dev->dev, dev->resource[i].start, resource_size(&dev->resource[i]), dev_name(&dev->dev))) { dev_err(&dev->dev, "Could not reserve memory region\n"); err = -ENOMEM; goto err_out; } info->map[i].name = dev_name(&dev->dev); info->map[i].phys = dev->resource[i].start; info->map[i].size = resource_size(&dev->resource[i]); info->map[i].bankwidth = physmap_data->width; info->map[i].set_vpp = physmap_set_vpp; info->map[i].pfow_base = physmap_data->pfow_base; info->map[i].map_priv_1 = (unsigned long)dev; info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys, info->map[i].size); if (info->map[i].virt == NULL) { dev_err(&dev->dev, "Failed to ioremap flash region\n"); err = -EIO; goto err_out; } simple_map_init(&info->map[i]); probe_type = rom_probe_types; if (physmap_data->probe_type == NULL) { for (; info->mtd[i] == NULL && *probe_type != NULL; probe_type++) info->mtd[i] = do_map_probe(*probe_type, &info->map[i]); } else info->mtd[i] = do_map_probe(physmap_data->probe_type, &info->map[i]); if (info->mtd[i] == NULL) { dev_err(&dev->dev, "map_probe failed\n"); err = -ENXIO; goto err_out; } else { devices_found++; } info->mtd[i]->owner = THIS_MODULE; info->mtd[i]->dev.parent = &dev->dev; } if (devices_found == 1) { info->cmtd = info->mtd[0]; } else if (devices_found > 1) { /* * We detected multiple devices. Concatenate them together. */ info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev)); if (info->cmtd == NULL) err = -ENXIO; } if (err) goto err_out; spin_lock_init(&info->vpp_lock); part_types = physmap_data->part_probe_types ? : part_probe_types; mtd_device_parse_register(info->cmtd, part_types, NULL, physmap_data->parts, physmap_data->nr_parts); return 0; err_out: physmap_flash_remove(dev); return err; } #ifdef CONFIG_PM static void physmap_flash_shutdown(struct platform_device *dev) { struct physmap_flash_info *info = platform_get_drvdata(dev); int i; for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++) if (mtd_suspend(info->mtd[i]) == 0) mtd_resume(info->mtd[i]); } #else #define physmap_flash_shutdown NULL #endif static struct platform_driver physmap_flash_driver = { .probe = physmap_flash_probe, .remove = physmap_flash_remove, .shutdown = physmap_flash_shutdown, .driver = { .name = "physmap-flash", .owner = THIS_MODULE, }, }; #ifdef CONFIG_MTD_PHYSMAP_COMPAT static struct physmap_flash_data physmap_flash_data = { .width = CONFIG_MTD_PHYSMAP_BANKWIDTH, }; static struct resource physmap_flash_resource = { .start = CONFIG_MTD_PHYSMAP_START, .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN - 1, .flags = IORESOURCE_MEM, }; static struct platform_device physmap_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &physmap_flash_data, }, .num_resources = 1, .resource = &physmap_flash_resource, }; #endif static int __init physmap_init(void) { int err; err = platform_driver_register(&physmap_flash_driver); #ifdef CONFIG_MTD_PHYSMAP_COMPAT if (err == 0) { err = platform_device_register(&physmap_flash); if (err) platform_driver_unregister(&physmap_flash_driver); } #endif return err; } static void __exit physmap_exit(void) { #ifdef CONFIG_MTD_PHYSMAP_COMPAT platform_device_unregister(&physmap_flash); #endif platform_driver_unregister(&physmap_flash_driver); } module_init(physmap_init); module_exit(physmap_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("Generic configurable MTD map driver"); /* legacy platform drivers can't hotplug or coldplg */ #ifndef CONFIG_MTD_PHYSMAP_COMPAT /* work with hotplug and coldplug */ MODULE_ALIAS("platform:physmap-flash"); #endif
gpl-2.0
slayher/android_kernel_samsung_hlte
lib/halfmd4.c
7960
2028
#include <linux/kernel.h> #include <linux/export.h> #include <linux/cryptohash.h> /* F, G and H are basic MD4 functions: selection, majority, parity */ #define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) #define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z))) #define H(x, y, z) ((x) ^ (y) ^ (z)) /* * The generic round function. The application is so specific that * we don't bother protecting all the arguments with parens, as is generally * good macro practice, in favor of extra legibility. * Rotation is separate from addition to prevent recomputation */ #define ROUND(f, a, b, c, d, x, s) \ (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s))) #define K1 0 #define K2 013240474631UL #define K3 015666365641UL /* * Basic cut-down MD4 transform. Returns only 32 bits of result. */ __u32 half_md4_transform(__u32 buf[4], __u32 const in[8]) { __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; /* Round 1 */ ROUND(F, a, b, c, d, in[0] + K1, 3); ROUND(F, d, a, b, c, in[1] + K1, 7); ROUND(F, c, d, a, b, in[2] + K1, 11); ROUND(F, b, c, d, a, in[3] + K1, 19); ROUND(F, a, b, c, d, in[4] + K1, 3); ROUND(F, d, a, b, c, in[5] + K1, 7); ROUND(F, c, d, a, b, in[6] + K1, 11); ROUND(F, b, c, d, a, in[7] + K1, 19); /* Round 2 */ ROUND(G, a, b, c, d, in[1] + K2, 3); ROUND(G, d, a, b, c, in[3] + K2, 5); ROUND(G, c, d, a, b, in[5] + K2, 9); ROUND(G, b, c, d, a, in[7] + K2, 13); ROUND(G, a, b, c, d, in[0] + K2, 3); ROUND(G, d, a, b, c, in[2] + K2, 5); ROUND(G, c, d, a, b, in[4] + K2, 9); ROUND(G, b, c, d, a, in[6] + K2, 13); /* Round 3 */ ROUND(H, a, b, c, d, in[3] + K3, 3); ROUND(H, d, a, b, c, in[7] + K3, 9); ROUND(H, c, d, a, b, in[2] + K3, 11); ROUND(H, b, c, d, a, in[6] + K3, 15); ROUND(H, a, b, c, d, in[1] + K3, 3); ROUND(H, d, a, b, c, in[5] + K3, 9); ROUND(H, c, d, a, b, in[0] + K3, 11); ROUND(H, b, c, d, a, in[4] + K3, 15); buf[0] += a; buf[1] += b; buf[2] += c; buf[3] += d; return buf[1]; /* "most hashed" word */ } EXPORT_SYMBOL(half_md4_transform);
gpl-2.0
scanno/android_kernel_oneplus_msm8974
arch/arm/mach-s3c2440/dsc.c
9240
1195
/* linux/arch/arm/mach-s3c2440/dsc.c * * Copyright (c) 2004-2005 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * Samsung S3C2440 Drive Strength Control support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/module.h> #include <linux/io.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <asm/irq.h> #include <mach/regs-gpio.h> #include <mach/regs-dsc.h> #include <plat/cpu.h> #include <plat/s3c244x.h> int s3c2440_set_dsc(unsigned int pin, unsigned int value) { void __iomem *base; unsigned long val; unsigned long flags; unsigned long mask; base = (pin & S3C2440_SELECT_DSC1) ? S3C2440_DSC1 : S3C2440_DSC0; mask = 3 << S3C2440_DSC_GETSHIFT(pin); local_irq_save(flags); val = __raw_readl(base); val &= ~mask; val |= value & mask; __raw_writel(val, base); local_irq_restore(flags); return 0; } EXPORT_SYMBOL(s3c2440_set_dsc);
gpl-2.0
felipesanches/linux-sunxi
arch/mips/wrppmc/time.c
11544
1084
/* * time.c: MIPS CPU Count/Compare timer hookup * * Author: Mark.Zhan, <rongkai.zhan@windriver.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 1997, 2004 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2006, Wind River System Inc. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/gt64120.h> #include <asm/time.h> #define WRPPMC_CPU_CLK_FREQ 40000000 /* 40MHZ */ /* * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect * * NOTE: We disable all GT64120 timers, and use MIPS processor internal * timer as the source of kernel clock tick. */ void __init plat_time_init(void) { /* Disable GT64120 timers */ GT_WRITE(GT_TC_CONTROL_OFS, 0x00); GT_WRITE(GT_TC0_OFS, 0x00); GT_WRITE(GT_TC1_OFS, 0x00); GT_WRITE(GT_TC2_OFS, 0x00); GT_WRITE(GT_TC3_OFS, 0x00); /* Use MIPS compare/count internal timer */ mips_hpt_frequency = WRPPMC_CPU_CLK_FREQ; }
gpl-2.0
nitrogen-os-devices/nitrogen_kernel_lge_hammerhead
arch/cris/arch-v32/mach-fs/io.c
13080
5208
/* * Helper functions for I/O pins. * * Copyright (c) 2004-2007 Axis Communications AB. */ #include <linux/types.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/module.h> #include <asm/io.h> #include <mach/pinmux.h> #include <hwregs/gio_defs.h> #ifndef DEBUG #define DEBUG(x) #endif struct crisv32_ioport crisv32_ioports[] = { { (unsigned long *)REG_ADDR(gio, regi_gio, rw_pa_oe), (unsigned long *)REG_ADDR(gio, regi_gio, rw_pa_dout), (unsigned long *)REG_ADDR(gio, regi_gio, r_pa_din), 8 }, { (unsigned long *)REG_ADDR(gio, regi_gio, rw_pb_oe), (unsigned long *)REG_ADDR(gio, regi_gio, rw_pb_dout), (unsigned long *)REG_ADDR(gio, regi_gio, r_pb_din), 18 }, { (unsigned long *)REG_ADDR(gio, regi_gio, rw_pc_oe), (unsigned long *)REG_ADDR(gio, regi_gio, rw_pc_dout), (unsigned long *)REG_ADDR(gio, regi_gio, r_pc_din), 18 }, { (unsigned long *)REG_ADDR(gio, regi_gio, rw_pd_oe), (unsigned long *)REG_ADDR(gio, regi_gio, rw_pd_dout), (unsigned long *)REG_ADDR(gio, regi_gio, r_pd_din), 18 }, { (unsigned long *)REG_ADDR(gio, regi_gio, rw_pe_oe), (unsigned long *)REG_ADDR(gio, regi_gio, rw_pe_dout), (unsigned long *)REG_ADDR(gio, regi_gio, r_pe_din), 18 } }; #define NBR_OF_PORTS ARRAY_SIZE(crisv32_ioports) struct crisv32_iopin crisv32_led_net0_green; struct crisv32_iopin crisv32_led_net0_red; struct crisv32_iopin crisv32_led_net1_green; struct crisv32_iopin crisv32_led_net1_red; struct crisv32_iopin crisv32_led2_green; struct crisv32_iopin crisv32_led2_red; struct crisv32_iopin crisv32_led3_green; struct crisv32_iopin crisv32_led3_red; /* Dummy port used when green LED and red LED is on the same bit */ static unsigned long io_dummy; static struct crisv32_ioport dummy_port = { &io_dummy, &io_dummy, &io_dummy, 18 }; static struct crisv32_iopin dummy_led = { &dummy_port, 0 }; static int __init crisv32_io_init(void) { int ret = 0; u32 i; /* Locks *should* be dynamically initialized. */ for (i = 0; i < ARRAY_SIZE(crisv32_ioports); i++) spin_lock_init(&crisv32_ioports[i].lock); spin_lock_init(&dummy_port.lock); /* Initialize LEDs */ #if (defined(CONFIG_ETRAX_NBR_LED_GRP_ONE) || defined(CONFIG_ETRAX_NBR_LED_GRP_TWO)) ret += crisv32_io_get_name(&crisv32_led_net0_green, CONFIG_ETRAX_LED_G_NET0); crisv32_io_set_dir(&crisv32_led_net0_green, crisv32_io_dir_out); if (strcmp(CONFIG_ETRAX_LED_G_NET0, CONFIG_ETRAX_LED_R_NET0)) { ret += crisv32_io_get_name(&crisv32_led_net0_red, CONFIG_ETRAX_LED_R_NET0); crisv32_io_set_dir(&crisv32_led_net0_red, crisv32_io_dir_out); } else crisv32_led_net0_red = dummy_led; #endif #ifdef CONFIG_ETRAX_NBR_LED_GRP_TWO ret += crisv32_io_get_name(&crisv32_led_net1_green, CONFIG_ETRAX_LED_G_NET1); crisv32_io_set_dir(&crisv32_led_net1_green, crisv32_io_dir_out); if (strcmp(CONFIG_ETRAX_LED_G_NET1, CONFIG_ETRAX_LED_R_NET1)) { crisv32_io_get_name(&crisv32_led_net1_red, CONFIG_ETRAX_LED_R_NET1); crisv32_io_set_dir(&crisv32_led_net1_red, crisv32_io_dir_out); } else crisv32_led_net1_red = dummy_led; #endif ret += crisv32_io_get_name(&crisv32_led2_green, CONFIG_ETRAX_V32_LED2G); ret += crisv32_io_get_name(&crisv32_led2_red, CONFIG_ETRAX_V32_LED2R); ret += crisv32_io_get_name(&crisv32_led3_green, CONFIG_ETRAX_V32_LED3G); ret += crisv32_io_get_name(&crisv32_led3_red, CONFIG_ETRAX_V32_LED3R); crisv32_io_set_dir(&crisv32_led2_green, crisv32_io_dir_out); crisv32_io_set_dir(&crisv32_led2_red, crisv32_io_dir_out); crisv32_io_set_dir(&crisv32_led3_green, crisv32_io_dir_out); crisv32_io_set_dir(&crisv32_led3_red, crisv32_io_dir_out); return ret; } __initcall(crisv32_io_init); int crisv32_io_get(struct crisv32_iopin *iopin, unsigned int port, unsigned int pin) { if (port > NBR_OF_PORTS) return -EINVAL; if (port > crisv32_ioports[port].pin_count) return -EINVAL; iopin->bit = 1 << pin; iopin->port = &crisv32_ioports[port]; /* Only allocate pinmux gpiopins if port != PORT_A (port 0) */ /* NOTE! crisv32_pinmux_alloc thinks PORT_B is port 0 */ if (port != 0 && crisv32_pinmux_alloc(port - 1, pin, pin, pinmux_gpio)) return -EIO; DEBUG(printk(KERN_DEBUG "crisv32_io_get: Allocated pin %d on port %d\n", pin, port)); return 0; } int crisv32_io_get_name(struct crisv32_iopin *iopin, const char *name) { int port; int pin; if (toupper(*name) == 'P') name++; if (toupper(*name) < 'A' || toupper(*name) > 'E') return -EINVAL; port = toupper(*name) - 'A'; name++; pin = simple_strtoul(name, NULL, 10); if (pin < 0 || pin > crisv32_ioports[port].pin_count) return -EINVAL; iopin->bit = 1 << pin; iopin->port = &crisv32_ioports[port]; /* Only allocate pinmux gpiopins if port != PORT_A (port 0) */ /* NOTE! crisv32_pinmux_alloc thinks PORT_B is port 0 */ if (port != 0 && crisv32_pinmux_alloc(port - 1, pin, pin, pinmux_gpio)) return -EIO; DEBUG(printk(KERN_DEBUG "crisv32_io_get_name: Allocated pin %d on port %d\n", pin, port)); return 0; } #ifdef CONFIG_PCI /* PCI I/O access stuff */ struct cris_io_operations *cris_iops = NULL; EXPORT_SYMBOL(cris_iops); #endif
gpl-2.0
athurg/busybox
libbb/restricted_shell.c
25
1981
/* vi: set sw=4 ts=4: */ /* * Copyright 1989 - 1991, Julianne Frances Haugh <jockgrrl@austin.rr.com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Julianne F. Haugh nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JULIE HAUGH AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL JULIE HAUGH OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "libbb.h" /* Return 1 if SHELL is a restricted shell (one not returned by getusershell), else 0, meaning it is a standard shell. */ int FAST_FUNC restricted_shell(const char *shell) { char *line; setusershell(); while ((line = getusershell())) { if (*line != '#' && strcmp(line, shell) == 0) return 0; } endusershell(); return 1; }
gpl-2.0
totalspectrum/gcc-propeller
gcc/graphds.c
25
10645
/* Graph representation and manipulation functions. Copyright (C) 2007-2015 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "obstack.h" #include "bitmap.h" #include "vec.h" #include "graphds.h" /* Dumps graph G into F. */ void dump_graph (FILE *f, struct graph *g) { int i; struct graph_edge *e; for (i = 0; i < g->n_vertices; i++) { if (!g->vertices[i].pred && !g->vertices[i].succ) continue; fprintf (f, "%d (%d)\t<-", i, g->vertices[i].component); for (e = g->vertices[i].pred; e; e = e->pred_next) fprintf (f, " %d", e->src); fprintf (f, "\n"); fprintf (f, "\t->"); for (e = g->vertices[i].succ; e; e = e->succ_next) fprintf (f, " %d", e->dest); fprintf (f, "\n"); } } /* Creates a new graph with N_VERTICES vertices. */ struct graph * new_graph (int n_vertices) { struct graph *g = XNEW (struct graph); gcc_obstack_init (&g->ob); g->n_vertices = n_vertices; g->vertices = XOBNEWVEC (&g->ob, struct vertex, n_vertices); memset (g->vertices, 0, sizeof (struct vertex) * n_vertices); return g; } /* Adds an edge from F to T to graph G. The new edge is returned. */ struct graph_edge * add_edge (struct graph *g, int f, int t) { struct graph_edge *e = XOBNEW (&g->ob, struct graph_edge); struct vertex *vf = &g->vertices[f], *vt = &g->vertices[t]; e->src = f; e->dest = t; e->pred_next = vt->pred; vt->pred = e; e->succ_next = vf->succ; vf->succ = e; return e; } /* Moves all the edges incident with U to V. */ void identify_vertices (struct graph *g, int v, int u) { struct vertex *vv = &g->vertices[v]; struct vertex *uu = &g->vertices[u]; struct graph_edge *e, *next; for (e = uu->succ; e; e = next) { next = e->succ_next; e->src = v; e->succ_next = vv->succ; vv->succ = e; } uu->succ = NULL; for (e = uu->pred; e; e = next) { next = e->pred_next; e->dest = v; e->pred_next = vv->pred; vv->pred = e; } uu->pred = NULL; } /* Helper function for graphds_dfs. Returns the source vertex of E, in the direction given by FORWARD. */ static inline int dfs_edge_src (struct graph_edge *e, bool forward) { return forward ? e->src : e->dest; } /* Helper function for graphds_dfs. Returns the destination vertex of E, in the direction given by FORWARD. */ static inline int dfs_edge_dest (struct graph_edge *e, bool forward) { return forward ? e->dest : e->src; } /* Helper function for graphds_dfs. Returns the first edge after E (including E), in the graph direction given by FORWARD, that belongs to SUBGRAPH. */ static inline struct graph_edge * foll_in_subgraph (struct graph_edge *e, bool forward, bitmap subgraph) { int d; if (!subgraph) return e; while (e) { d = dfs_edge_dest (e, forward); if (bitmap_bit_p (subgraph, d)) return e; e = forward ? e->succ_next : e->pred_next; } return e; } /* Helper function for graphds_dfs. Select the first edge from V in G, in the direction given by FORWARD, that belongs to SUBGRAPH. */ static inline struct graph_edge * dfs_fst_edge (struct graph *g, int v, bool forward, bitmap subgraph) { struct graph_edge *e; e = (forward ? g->vertices[v].succ : g->vertices[v].pred); return foll_in_subgraph (e, forward, subgraph); } /* Helper function for graphds_dfs. Returns the next edge after E, in the graph direction given by FORWARD, that belongs to SUBGRAPH. */ static inline struct graph_edge * dfs_next_edge (struct graph_edge *e, bool forward, bitmap subgraph) { return foll_in_subgraph (forward ? e->succ_next : e->pred_next, forward, subgraph); } /* Runs dfs search over vertices of G, from NQ vertices in queue QS. The vertices in postorder are stored into QT. If FORWARD is false, backward dfs is run. If SUBGRAPH is not NULL, it specifies the subgraph of G to run DFS on. Returns the number of the components of the graph (number of the restarts of DFS). */ int graphds_dfs (struct graph *g, int *qs, int nq, vec<int> *qt, bool forward, bitmap subgraph) { int i, tick = 0, v, comp = 0, top; struct graph_edge *e; struct graph_edge **stack = XNEWVEC (struct graph_edge *, g->n_vertices); bitmap_iterator bi; unsigned av; if (subgraph) { EXECUTE_IF_SET_IN_BITMAP (subgraph, 0, av, bi) { g->vertices[av].component = -1; g->vertices[av].post = -1; } } else { for (i = 0; i < g->n_vertices; i++) { g->vertices[i].component = -1; g->vertices[i].post = -1; } } for (i = 0; i < nq; i++) { v = qs[i]; if (g->vertices[v].post != -1) continue; g->vertices[v].component = comp++; e = dfs_fst_edge (g, v, forward, subgraph); top = 0; while (1) { while (e) { if (g->vertices[dfs_edge_dest (e, forward)].component == -1) break; e = dfs_next_edge (e, forward, subgraph); } if (!e) { if (qt) qt->safe_push (v); g->vertices[v].post = tick++; if (!top) break; e = stack[--top]; v = dfs_edge_src (e, forward); e = dfs_next_edge (e, forward, subgraph); continue; } stack[top++] = e; v = dfs_edge_dest (e, forward); e = dfs_fst_edge (g, v, forward, subgraph); g->vertices[v].component = comp - 1; } } free (stack); return comp; } /* Determines the strongly connected components of G, using the algorithm of Tarjan -- first determine the postorder dfs numbering in reversed graph, then run the dfs on the original graph in the order given by decreasing numbers assigned by the previous pass. If SUBGRAPH is not NULL, it specifies the subgraph of G whose strongly connected components we want to determine. After running this function, v->component is the number of the strongly connected component for each vertex of G. Returns the number of the sccs of G. */ int graphds_scc (struct graph *g, bitmap subgraph) { int *queue = XNEWVEC (int, g->n_vertices); vec<int> postorder = vNULL; int nq, i, comp; unsigned v; bitmap_iterator bi; if (subgraph) { nq = 0; EXECUTE_IF_SET_IN_BITMAP (subgraph, 0, v, bi) { queue[nq++] = v; } } else { for (i = 0; i < g->n_vertices; i++) queue[i] = i; nq = g->n_vertices; } graphds_dfs (g, queue, nq, &postorder, false, subgraph); gcc_assert (postorder.length () == (unsigned) nq); for (i = 0; i < nq; i++) queue[i] = postorder[nq - i - 1]; comp = graphds_dfs (g, queue, nq, NULL, true, subgraph); free (queue); postorder.release (); return comp; } /* Runs CALLBACK for all edges in G. */ void for_each_edge (struct graph *g, graphds_edge_callback callback) { struct graph_edge *e; int i; for (i = 0; i < g->n_vertices; i++) for (e = g->vertices[i].succ; e; e = e->succ_next) callback (g, e); } /* Releases the memory occupied by G. */ void free_graph (struct graph *g) { obstack_free (&g->ob, NULL); free (g); } /* Returns the nearest common ancestor of X and Y in tree whose parent links are given by PARENT. MARKS is the array used to mark the vertices of the tree, and MARK is the number currently used as a mark. */ static int tree_nca (int x, int y, int *parent, int *marks, int mark) { if (x == -1 || x == y) return y; /* We climb with X and Y up the tree, marking the visited nodes. When we first arrive to a marked node, it is the common ancestor. */ marks[x] = mark; marks[y] = mark; while (1) { x = parent[x]; if (x == -1) break; if (marks[x] == mark) return x; marks[x] = mark; y = parent[y]; if (y == -1) break; if (marks[y] == mark) return y; marks[y] = mark; } /* If we reached the root with one of the vertices, continue with the other one till we reach the marked part of the tree. */ if (x == -1) { for (y = parent[y]; marks[y] != mark; y = parent[y]) continue; return y; } else { for (x = parent[x]; marks[x] != mark; x = parent[x]) continue; return x; } } /* Determines the dominance tree of G (stored in the PARENT, SON and BROTHER arrays), where the entry node is ENTRY. */ void graphds_domtree (struct graph *g, int entry, int *parent, int *son, int *brother) { vec<int> postorder = vNULL; int *marks = XCNEWVEC (int, g->n_vertices); int mark = 1, i, v, idom; bool changed = true; struct graph_edge *e; /* We use a slight modification of the standard iterative algorithm, as described in K. D. Cooper, T. J. Harvey and K. Kennedy: A Simple, Fast Dominance Algorithm sort vertices in reverse postorder foreach v dom(v) = everything dom(entry) = entry; while (anything changes) foreach v dom(v) = {v} union (intersection of dom(p) over all predecessors of v) The sets dom(v) are represented by the parent links in the current version of the dominance tree. */ for (i = 0; i < g->n_vertices; i++) { parent[i] = -1; son[i] = -1; brother[i] = -1; } graphds_dfs (g, &entry, 1, &postorder, true, NULL); gcc_assert (postorder.length () == (unsigned) g->n_vertices); gcc_assert (postorder[g->n_vertices - 1] == entry); while (changed) { changed = false; for (i = g->n_vertices - 2; i >= 0; i--) { v = postorder[i]; idom = -1; for (e = g->vertices[v].pred; e; e = e->pred_next) { if (e->src != entry && parent[e->src] == -1) continue; idom = tree_nca (idom, e->src, parent, marks, mark++); } if (idom != parent[v]) { parent[v] = idom; changed = true; } } } free (marks); postorder.release (); for (i = 0; i < g->n_vertices; i++) if (parent[i] != -1) { brother[i] = son[parent[i]]; son[parent[i]] = i; } }
gpl-2.0
allr/r-instrumented
src/nmath/ppois.c
25
1308
/* * Mathlib : A C Library of Special Functions * Copyright (C) 1998 Ross Ihaka * Copyright (C) 2000 The R Core Team * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, a copy is available at * https://www.R-project.org/Licenses/ * * DESCRIPTION * * The distribution function of the Poisson distribution. */ #include "nmath.h" #include "dpq.h" double ppois(double x, double lambda, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(lambda)) return x + lambda; #endif if(lambda < 0.) ML_ERR_return_NAN; if (x < 0) return R_DT_0; if (lambda == 0.) return R_DT_1; if (!R_FINITE(x)) return R_DT_1; x = floor(x + 1e-7); return pgamma(lambda, x + 1, 1., !lower_tail, log_p); }
gpl-2.0
dan82840/Netgear-RBR50
git_home/samba.git/source/lib/version.c
25
1517
/* Unix SMB/CIFS implementation. Samba Version functions Copyright (C) Stefan Metzmacher 2003 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "includes.h" const char *samba_version_string(void) { #ifndef SAMBA_VERSION_VENDOR_SUFFIX return SAMBA_VERSION_OFFICIAL_STRING; #else static fstring samba_version; fstring tmp_version; static BOOL init_samba_version; size_t remaining; if (init_samba_version) return samba_version; snprintf(samba_version,sizeof(samba_version),"%s-%s", SAMBA_VERSION_OFFICIAL_STRING, SAMBA_VERSION_VENDOR_SUFFIX); #ifdef SAMBA_VENDOR_PATCH remaining = sizeof(samba_version)-strlen(samba_version); snprintf( tmp_version, sizeof(tmp_version), "-%d", SAMBA_VENDOR_PATCH ); strlcat( samba_version, tmp_version, remaining-1 ); #endif init_samba_version = True; return samba_version; #endif }
gpl-2.0
karltsou/u-boot_next
board/armltd/integrator/timer.c
25
4819
/* * (C) Copyright 2002 * Sysgo Real-Time Solutions, GmbH <www.elinos.com> * Marius Groeger <mgroeger@sysgo.de> * * (C) Copyright 2002 * David Mueller, ELSOFT AG, <d.mueller@elsoft.ch> * * (C) Copyright 2003 * Texas Instruments, <www.ti.com> * Kshitij Gupta <Kshitij@ti.com> * * (C) Copyright 2004 * ARM Ltd. * Philippe Robin, <philippe.robin@arm.com> * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <div64.h> #ifdef CONFIG_ARCH_CINTEGRATOR #define DIV_CLOCK_INIT 1 #define TIMER_LOAD_VAL 0xFFFFFFFFL #else #define DIV_CLOCK_INIT 256 #define TIMER_LOAD_VAL 0x0000FFFFL #endif /* The Integrator/CP timer1 is clocked at 1MHz * can be divided by 16 or 256 * and can be set up as a 32-bit timer */ /* U-Boot expects a 32 bit timer, running at CONFIG_SYS_HZ */ /* Keep total timer count to avoid losing decrements < div_timer */ static unsigned long long total_count = 0; static unsigned long long lastdec; /* Timer reading at last call */ /* Divisor applied to timer clock */ static unsigned long long div_clock = DIV_CLOCK_INIT; static unsigned long long div_timer = 1; /* Divisor to convert timer reading * change to U-Boot ticks */ /* CONFIG_SYS_HZ = CONFIG_SYS_HZ_CLOCK/(div_clock * div_timer) */ static ulong timestamp; /* U-Boot ticks since startup */ #define READ_TIMER (*(volatile ulong *)(CONFIG_SYS_TIMERBASE+4)) /* all function return values in U-Boot ticks i.e. (1/CONFIG_SYS_HZ) sec * - unless otherwise stated */ /* starts up a counter * - the Integrator/CP timer can be set up to issue an interrupt */ int timer_init (void) { /* Load timer with initial value */ *(volatile ulong *)(CONFIG_SYS_TIMERBASE + 0) = TIMER_LOAD_VAL; #ifdef CONFIG_ARCH_CINTEGRATOR /* Set timer to be * enabled 1 * periodic 1 * no interrupts 0 * X 0 * divider 1 00 == less rounding error * 32 bit 1 * wrapping 0 */ *(volatile ulong *)(CONFIG_SYS_TIMERBASE + 8) = 0x000000C2; #else /* Set timer to be * enabled 1 * free-running 0 * XX 00 * divider 256 10 * XX 00 */ *(volatile ulong *)(CONFIG_SYS_TIMERBASE + 8) = 0x00000088; #endif /* init the timestamp */ total_count = 0ULL; reset_timer_masked(); div_timer = CONFIG_SYS_HZ_CLOCK; do_div(div_timer, CONFIG_SYS_HZ); do_div(div_timer, div_clock); return (0); } /* * timer without interrupts */ void reset_timer (void) { reset_timer_masked (); } ulong get_timer (ulong base_ticks) { return get_timer_masked () - base_ticks; } void set_timer (ulong ticks) { timestamp = ticks; total_count = ticks * div_timer; } /* delay usec useconds */ void udelay (unsigned long usec) { ulong tmo, tmp; /* Convert to U-Boot ticks */ tmo = usec * CONFIG_SYS_HZ; tmo /= (1000000L); tmp = get_timer_masked(); /* get current timestamp */ tmo += tmp; /* form target timestamp */ while (get_timer_masked () < tmo) {/* loop till event */ /*NOP*/; } } void reset_timer_masked (void) { /* capure current decrementer value */ lastdec = READ_TIMER; /* start "advancing" time stamp from 0 */ timestamp = 0L; } /* converts the timer reading to U-Boot ticks */ /* the timestamp is the number of ticks since reset */ ulong get_timer_masked (void) { /* get current count */ unsigned long long now = READ_TIMER; if(now > lastdec) { /* Must have wrapped */ total_count += lastdec + TIMER_LOAD_VAL + 1 - now; } else { total_count += lastdec - now; } lastdec = now; /* Reuse "now" */ now = total_count; do_div(now, div_timer); timestamp = now; return timestamp; } /* waits specified delay value and resets timestamp */ void udelay_masked (unsigned long usec) { udelay(usec); } /* * This function is derived from PowerPC code (read timebase as long long). * On ARM it just returns the timer value. */ unsigned long long get_ticks(void) { return get_timer(0); } /* * Return the timebase clock frequency * i.e. how often the timer decrements */ ulong get_tbclk (void) { unsigned long long tmp = CONFIG_SYS_HZ_CLOCK; do_div(tmp, div_clock); return tmp; }
gpl-2.0
embecosm/avr32-gcc
gcc/testsuite/gcc.dg/vect/vect-strided-u8-i8-gap7.c
25
1865
/* { dg-require-effective-target vect_int } */ #include <stdarg.h> #include "tree-vect.h" #define N 16 typedef struct { unsigned char a; unsigned char b; unsigned char c; unsigned char d; unsigned char e; unsigned char f; unsigned char g; unsigned char h; } s; __attribute__ ((noinline)) int main1 (s *arr) { int i; s *ptr = arr; s res[N]; unsigned char u, t, s, x, y, z, w; for (i = 0; i < N; i++) { u = ptr->b - ptr->a; t = ptr->d - ptr->c; res[i].c = u + t; x = ptr->b + ptr->d; res[i].a = ptr->a + x; res[i].d = u + t; s = ptr->h - ptr->a; res[i].b = s + t; res[i].f = ptr->f + ptr->h; res[i].e = ptr->b + ptr->e; res[i].h = ptr->d; res[i].g = u + t; ptr++; } /* check results: */ for (i = 0; i < N; i++) { if (res[i].c != arr[i].b - arr[i].a + arr[i].d - arr[i].c || res[i].a != arr[i].a + arr[i].b + arr[i].d || res[i].d != arr[i].b - arr[i].a + arr[i].d - arr[i].c || res[i].b != arr[i].h - arr[i].a + arr[i].d - arr[i].c || res[i].f != arr[i].f + arr[i].h || res[i].e != arr[i].b + arr[i].e || res[i].h != arr[i].d || res[i].g != arr[i].b - arr[i].a + arr[i].d - arr[i].c) abort(); } } int main (void) { int i; s arr[N]; check_vect (); for (i = 0; i < N; i++) { arr[i].a = i; arr[i].b = i * 2; arr[i].c = 17; arr[i].d = i+34; arr[i].e = i * 3 + 5; arr[i].f = i * 5; arr[i].g = i - 3; arr[i].h = 67; if (arr[i].a == 178) abort(); } main1 (arr); return 0; } /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { vect_interleave && vect_extract_even_odd } } } } */ /* { dg-final { cleanup-tree-dump "vect" } } */
gpl-2.0
lssjbrolli/android_kernel_samsung_klimtlte
drivers/media/media-device.c
281
9718
/* * Media device * * Copyright (C) 2010 Nokia Corporation * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/ioctl.h> #include <linux/media.h> #include <linux/export.h> #include <media/media-device.h> #include <media/media-devnode.h> #include <media/media-entity.h> /* ----------------------------------------------------------------------------- * Userspace API */ static int media_device_open(struct file *filp) { return 0; } static int media_device_close(struct file *filp) { return 0; } static int media_device_get_info(struct media_device *dev, struct media_device_info __user *__info) { struct media_device_info info; memset(&info, 0, sizeof(info)); strlcpy(info.driver, dev->dev->driver->name, sizeof(info.driver)); strlcpy(info.model, dev->model, sizeof(info.model)); strlcpy(info.serial, dev->serial, sizeof(info.serial)); strlcpy(info.bus_info, dev->bus_info, sizeof(info.bus_info)); info.media_version = MEDIA_API_VERSION; info.hw_revision = dev->hw_revision; info.driver_version = dev->driver_version; return copy_to_user(__info, &info, sizeof(*__info)); } static struct media_entity *find_entity(struct media_device *mdev, u32 id) { struct media_entity *entity; int next = id & MEDIA_ENT_ID_FLAG_NEXT; id &= ~MEDIA_ENT_ID_FLAG_NEXT; spin_lock(&mdev->lock); media_device_for_each_entity(entity, mdev) { if ((entity->id == id && !next) || (entity->id > id && next)) { spin_unlock(&mdev->lock); return entity; } } spin_unlock(&mdev->lock); return NULL; } static long media_device_enum_entities(struct media_device *mdev, struct media_entity_desc __user *uent) { struct media_entity *ent; struct media_entity_desc u_ent; memset(&u_ent, 0, sizeof(u_ent)); if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id))) return -EFAULT; ent = find_entity(mdev, u_ent.id); if (ent == NULL) return -EINVAL; u_ent.id = ent->id; u_ent.name[0] = '\0'; if (ent->name) strlcpy(u_ent.name, ent->name, sizeof(u_ent.name)); u_ent.type = ent->type; u_ent.revision = ent->revision; u_ent.flags = ent->flags; u_ent.group_id = ent->group_id; u_ent.pads = ent->num_pads; u_ent.links = ent->num_links - ent->num_backlinks; memcpy(&u_ent.raw, &ent->info, sizeof(ent->info)); if (copy_to_user(uent, &u_ent, sizeof(u_ent))) return -EFAULT; return 0; } static void media_device_kpad_to_upad(const struct media_pad *kpad, struct media_pad_desc *upad) { upad->entity = kpad->entity->id; upad->index = kpad->index; upad->flags = kpad->flags; } static long media_device_enum_links(struct media_device *mdev, struct media_links_enum __user *ulinks) { struct media_entity *entity; struct media_links_enum links; if (copy_from_user(&links, ulinks, sizeof(links))) return -EFAULT; entity = find_entity(mdev, links.entity); if (entity == NULL) return -EINVAL; if (links.pads) { unsigned int p; for (p = 0; p < entity->num_pads; p++) { struct media_pad_desc pad; media_device_kpad_to_upad(&entity->pads[p], &pad); if (copy_to_user(&links.pads[p], &pad, sizeof(pad))) return -EFAULT; } } if (links.links) { struct media_link_desc __user *ulink; unsigned int l; for (l = 0, ulink = links.links; l < entity->num_links; l++) { struct media_link_desc link; /* Ignore backlinks. */ if (entity->links[l].source->entity != entity) continue; media_device_kpad_to_upad(entity->links[l].source, &link.source); media_device_kpad_to_upad(entity->links[l].sink, &link.sink); link.flags = entity->links[l].flags; if (copy_to_user(ulink, &link, sizeof(*ulink))) return -EFAULT; ulink++; } } if (copy_to_user(ulinks, &links, sizeof(*ulinks))) return -EFAULT; return 0; } static long media_device_setup_link(struct media_device *mdev, struct media_link_desc __user *_ulink) { struct media_link *link = NULL; struct media_link_desc ulink; struct media_entity *source; struct media_entity *sink; int ret; if (copy_from_user(&ulink, _ulink, sizeof(ulink))) return -EFAULT; /* Find the source and sink entities and link. */ source = find_entity(mdev, ulink.source.entity); sink = find_entity(mdev, ulink.sink.entity); if (source == NULL || sink == NULL) return -EINVAL; if (ulink.source.index >= source->num_pads || ulink.sink.index >= sink->num_pads) return -EINVAL; link = media_entity_find_link(&source->pads[ulink.source.index], &sink->pads[ulink.sink.index]); if (link == NULL) return -EINVAL; /* Setup the link on both entities. */ ret = __media_entity_setup_link(link, ulink.flags); if (copy_to_user(_ulink, &ulink, sizeof(ulink))) return -EFAULT; return ret; } static long media_device_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct media_devnode *devnode = media_devnode_data(filp); struct media_device *dev = to_media_device(devnode); long ret; switch (cmd) { case MEDIA_IOC_DEVICE_INFO: ret = media_device_get_info(dev, (struct media_device_info __user *)arg); break; case MEDIA_IOC_ENUM_ENTITIES: ret = media_device_enum_entities(dev, (struct media_entity_desc __user *)arg); break; case MEDIA_IOC_ENUM_LINKS: mutex_lock(&dev->graph_mutex); ret = media_device_enum_links(dev, (struct media_links_enum __user *)arg); mutex_unlock(&dev->graph_mutex); break; case MEDIA_IOC_SETUP_LINK: mutex_lock(&dev->graph_mutex); ret = media_device_setup_link(dev, (struct media_link_desc __user *)arg); mutex_unlock(&dev->graph_mutex); break; default: ret = -ENOIOCTLCMD; } return ret; } static const struct media_file_operations media_device_fops = { .owner = THIS_MODULE, .open = media_device_open, .ioctl = media_device_ioctl, .release = media_device_close, }; /* ----------------------------------------------------------------------------- * sysfs */ static ssize_t show_model(struct device *cd, struct device_attribute *attr, char *buf) { struct media_device *mdev = to_media_device(to_media_devnode(cd)); return sprintf(buf, "%.*s\n", (int)sizeof(mdev->model), mdev->model); } static DEVICE_ATTR(model, S_IRUGO, show_model, NULL); /* ----------------------------------------------------------------------------- * Registration/unregistration */ static void media_device_release(struct media_devnode *mdev) { } /** * media_device_register - register a media device * @mdev: The media device * * The caller is responsible for initializing the media device before * registration. The following fields must be set: * * - dev must point to the parent device * - model must be filled with the device model name */ int __must_check media_device_register(struct media_device *mdev) { int ret; if (WARN_ON(mdev->dev == NULL || mdev->model[0] == 0)) return -EINVAL; mdev->entity_id = 1; INIT_LIST_HEAD(&mdev->entities); spin_lock_init(&mdev->lock); mutex_init(&mdev->graph_mutex); /* Register the device node. */ mdev->devnode.fops = &media_device_fops; mdev->devnode.parent = mdev->dev; mdev->devnode.release = media_device_release; ret = media_devnode_register(&mdev->devnode); if (ret < 0) return ret; ret = device_create_file(&mdev->devnode.dev, &dev_attr_model); if (ret < 0) { media_devnode_unregister(&mdev->devnode); return ret; } return 0; } EXPORT_SYMBOL_GPL(media_device_register); /** * media_device_unregister - unregister a media device * @mdev: The media device * */ void media_device_unregister(struct media_device *mdev) { struct media_entity *entity; struct media_entity *next; list_for_each_entry_safe(entity, next, &mdev->entities, list) media_device_unregister_entity(entity); device_remove_file(&mdev->devnode.dev, &dev_attr_model); media_devnode_unregister(&mdev->devnode); } EXPORT_SYMBOL_GPL(media_device_unregister); /** * media_device_register_entity - Register an entity with a media device * @mdev: The media device * @entity: The entity */ int __must_check media_device_register_entity(struct media_device *mdev, struct media_entity *entity) { /* Warn if we apparently re-register an entity */ WARN_ON(entity->parent != NULL); entity->parent = mdev; spin_lock(&mdev->lock); if (entity->id == 0) entity->id = mdev->entity_id++; else mdev->entity_id = max(entity->id + 1, mdev->entity_id); list_add_tail(&entity->list, &mdev->entities); spin_unlock(&mdev->lock); return 0; } EXPORT_SYMBOL_GPL(media_device_register_entity); /** * media_device_unregister_entity - Unregister an entity * @entity: The entity * * If the entity has never been registered this function will return * immediately. */ void media_device_unregister_entity(struct media_entity *entity) { struct media_device *mdev = entity->parent; if (mdev == NULL) return; spin_lock(&mdev->lock); list_del(&entity->list); spin_unlock(&mdev->lock); entity->parent = NULL; } EXPORT_SYMBOL_GPL(media_device_unregister_entity);
gpl-2.0
AndyLavr/Aspire-SW5-012_Kernel_4.8
drivers/rapidio/switches/idt_gen3.c
281
10066
/* * IDT RXS Gen.3 Serial RapidIO switch family support * * Copyright 2016 Integrated Device Technology, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stat.h> #include <linux/module.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/rio_ids.h> #include <linux/delay.h> #include <asm/page.h> #include "../rio.h" #define RIO_EM_PW_STAT 0x40020 #define RIO_PW_CTL 0x40204 #define RIO_PW_CTL_PW_TMR 0xffffff00 #define RIO_PW_ROUTE 0x40208 #define RIO_EM_DEV_INT_EN 0x40030 #define RIO_PLM_SPx_IMP_SPEC_CTL(x) (0x10100 + (x)*0x100) #define RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST 0x02000000 #define RIO_PLM_SPx_PW_EN(x) (0x10118 + (x)*0x100) #define RIO_PLM_SPx_PW_EN_OK2U 0x40000000 #define RIO_PLM_SPx_PW_EN_LINIT 0x10000000 #define RIO_BC_L2_Gn_ENTRYx_CSR(n, x) (0x31000 + (n)*0x400 + (x)*0x4) #define RIO_SPx_L2_Gn_ENTRYy_CSR(x, n, y) \ (0x51000 + (x)*0x2000 + (n)*0x400 + (y)*0x4) static int idtg3_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port) { u32 rval; u32 entry = route_port; int err = 0; pr_debug("RIO: %s t=0x%x did_%x to p_%x\n", __func__, table, route_destid, entry); if (route_destid > 0xFF) return -EINVAL; if (route_port == RIO_INVALID_ROUTE) entry = RIO_RT_ENTRY_DROP_PKT; if (table == RIO_GLOBAL_TABLE) { /* Use broadcast register to update all per-port tables */ err = rio_mport_write_config_32(mport, destid, hopcount, RIO_BC_L2_Gn_ENTRYx_CSR(0, route_destid), entry); return err; } /* * Verify that specified port/table number is valid */ err = rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR, &rval); if (err) return err; if (table >= RIO_GET_TOTAL_PORTS(rval)) return -EINVAL; err = rio_mport_write_config_32(mport, destid, hopcount, RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid), entry); return err; } static int idtg3_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 *route_port) { u32 rval; int err; if (route_destid > 0xFF) return -EINVAL; err = rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR, &rval); if (err) return err; /* * This switch device does not have the dedicated global routing table. * It is substituted by reading routing table of the ingress port of * maintenance read requests. */ if (table == RIO_GLOBAL_TABLE) table = RIO_GET_PORT_NUM(rval); else if (table >= RIO_GET_TOTAL_PORTS(rval)) return -EINVAL; err = rio_mport_read_config_32(mport, destid, hopcount, RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid), &rval); if (err) return err; if (rval == RIO_RT_ENTRY_DROP_PKT) *route_port = RIO_INVALID_ROUTE; else *route_port = (u8)rval; return 0; } static int idtg3_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table) { u32 i; u32 rval; int err; if (table == RIO_GLOBAL_TABLE) { for (i = 0; i <= 0xff; i++) { err = rio_mport_write_config_32(mport, destid, hopcount, RIO_BC_L2_Gn_ENTRYx_CSR(0, i), RIO_RT_ENTRY_DROP_PKT); if (err) break; } return err; } err = rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR, &rval); if (err) return err; if (table >= RIO_GET_TOTAL_PORTS(rval)) return -EINVAL; for (i = 0; i <= 0xff; i++) { err = rio_mport_write_config_32(mport, destid, hopcount, RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, i), RIO_RT_ENTRY_DROP_PKT); if (err) break; } return err; } /* * This routine performs device-specific initialization only. * All standard EM configuration should be performed at upper level. */ static int idtg3_em_init(struct rio_dev *rdev) { int i, tmp; u32 rval; pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); /* Disable assertion of interrupt signal */ rio_write_config_32(rdev, RIO_EM_DEV_INT_EN, 0); /* Disable port-write event notifications during initialization */ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL, RIO_EM_PW_TX_CTRL_PW_DIS); /* Configure Port-Write notifications for hot-swap events */ tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo); for (i = 0; i < tmp; i++) { rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, i), &rval); if (rval & RIO_PORT_N_ERR_STS_PORT_UA) continue; /* Clear events signaled before enabling notification */ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PN_ERR_DETECT(i), 0); /* Enable event notifications */ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PN_ERRRATE_EN(i), RIO_EM_PN_ERRRATE_EN_OK2U | RIO_EM_PN_ERRRATE_EN_U2OK); /* Enable port-write generation on events */ rio_write_config_32(rdev, RIO_PLM_SPx_PW_EN(i), RIO_PLM_SPx_PW_EN_OK2U | RIO_PLM_SPx_PW_EN_LINIT); } /* Set Port-Write destination port */ tmp = RIO_GET_PORT_NUM(rdev->swpinfo); rio_write_config_32(rdev, RIO_PW_ROUTE, 1 << tmp); /* Enable sending port-write event notifications */ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0); /* set TVAL = ~50us */ rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); return 0; } /* * idtg3_em_handler - device-specific error handler * * If the link is down (PORT_UNINIT) does nothing - this is considered * as link partner removal from the port. * * If the link is up (PORT_OK) - situation is handled as *new* device insertion. * In this case ERR_STOP bits are cleared by issuing soft reset command to the * reporting port. Inbound and outbound ackIDs are cleared by the reset as well. * This way the port is synchronized with freshly inserted device (assuming it * was reset/powered-up on insertion). * * TODO: This is not sufficient in a situation when a link between two devices * was down and up again (e.g. cable disconnect). For that situation full ackID * realignment process has to be implemented. */ static int idtg3_em_handler(struct rio_dev *rdev, u8 pnum) { u32 err_status; u32 rval; rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum), &err_status); /* Do nothing for device/link removal */ if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) return 0; /* When link is OK we have a device insertion. * Request port soft reset to clear errors if they present. * Inbound and outbound ackIDs will be 0 after reset. */ if (err_status & (RIO_PORT_N_ERR_STS_OUT_ES | RIO_PORT_N_ERR_STS_INP_ES)) { rio_read_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), &rval); rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), rval | RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST); udelay(10); rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), rval); msleep(500); } return 0; } static struct rio_switch_ops idtg3_switch_ops = { .owner = THIS_MODULE, .add_entry = idtg3_route_add_entry, .get_entry = idtg3_route_get_entry, .clr_table = idtg3_route_clr_table, .em_init = idtg3_em_init, .em_handle = idtg3_em_handler, }; static int idtg3_probe(struct rio_dev *rdev, const struct rio_device_id *id) { pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); spin_lock(&rdev->rswitch->lock); if (rdev->rswitch->ops) { spin_unlock(&rdev->rswitch->lock); return -EINVAL; } rdev->rswitch->ops = &idtg3_switch_ops; if (rdev->do_enum) { /* Disable hierarchical routing support: Existing fabric * enumeration/discovery process (see rio-scan.c) uses 8-bit * flat destination ID routing only. */ rio_write_config_32(rdev, 0x5000 + RIO_BC_RT_CTL_CSR, 0); } spin_unlock(&rdev->rswitch->lock); return 0; } static void idtg3_remove(struct rio_dev *rdev) { pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); spin_lock(&rdev->rswitch->lock); if (rdev->rswitch->ops == &idtg3_switch_ops) rdev->rswitch->ops = NULL; spin_unlock(&rdev->rswitch->lock); } /* * Gen3 switches repeat sending PW messages until a corresponding event flag * is cleared. Use shutdown notification to disable generation of port-write * messages if their destination node is shut down. */ static void idtg3_shutdown(struct rio_dev *rdev) { int i; u32 rval; u16 destid; /* Currently the enumerator node acts also as PW handler */ if (!rdev->do_enum) return; pr_debug("RIO: %s(%s)\n", __func__, rio_name(rdev)); rio_read_config_32(rdev, RIO_PW_ROUTE, &rval); i = RIO_GET_PORT_NUM(rdev->swpinfo); /* Check port-write destination port */ if (!((1 << i) & rval)) return; /* Disable sending port-write event notifications if PW destID * matches to one of the enumerator node */ rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TGT_DEVID, &rval); if (rval & RIO_EM_PW_TGT_DEVID_DEV16) destid = rval >> 16; else destid = ((rval & RIO_EM_PW_TGT_DEVID_D8) >> 16); if (rdev->net->hport->host_deviceid == destid) { rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0); pr_debug("RIO: %s(%s) PW transmission disabled\n", __func__, rio_name(rdev)); } } static struct rio_device_id idtg3_id_table[] = { {RIO_DEVICE(RIO_DID_IDTRXS1632, RIO_VID_IDT)}, {RIO_DEVICE(RIO_DID_IDTRXS2448, RIO_VID_IDT)}, { 0, } /* terminate list */ }; static struct rio_driver idtg3_driver = { .name = "idt_gen3", .id_table = idtg3_id_table, .probe = idtg3_probe, .remove = idtg3_remove, .shutdown = idtg3_shutdown, }; static int __init idtg3_init(void) { return rio_register_driver(&idtg3_driver); } static void __exit idtg3_exit(void) { pr_debug("RIO: %s\n", __func__); rio_unregister_driver(&idtg3_driver); pr_debug("RIO: %s done\n", __func__); } device_initcall(idtg3_init); module_exit(idtg3_exit); MODULE_DESCRIPTION("IDT RXS Gen.3 Serial RapidIO switch family driver"); MODULE_AUTHOR("Integrated Device Technology, Inc."); MODULE_LICENSE("GPL");
gpl-2.0
Luavis/SOS
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
537
68557
/* * Copyright (C) 2003 - 2009 NetXen, Inc. * Copyright (C) 2009 - QLogic Corporation. * All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * * The full GNU General Public License is included in this distribution * in the file called "COPYING". * */ #include <linux/slab.h> #include "netxen_nic.h" #include "netxen_nic_hw.h" #include <net/ip.h> #define MASK(n) ((1ULL<<(n))-1) #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) #define MS_WIN(addr) (addr & 0x0ffc0000) #define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) #define CRB_BLK(off) ((off >> 20) & 0x3f) #define CRB_SUBBLK(off) ((off >> 16) & 0xf) #define CRB_WINDOW_2M (0x130060) #define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) #define CRB_INDIRECT_2M (0x1e0000UL) static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, void __iomem *addr, u32 data); static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, void __iomem *addr); #ifndef readq static inline u64 readq(void __iomem *addr) { return readl(addr) | (((u64) readl(addr + 4)) << 32LL); } #endif #ifndef writeq static inline void writeq(u64 val, void __iomem *addr) { writel(((u32) (val)), (addr)); writel(((u32) (val >> 32)), (addr + 4)); } #endif #define PCI_OFFSET_FIRST_RANGE(adapter, off) \ ((adapter)->ahw.pci_base0 + (off)) #define PCI_OFFSET_SECOND_RANGE(adapter, off) \ ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START) #define PCI_OFFSET_THIRD_RANGE(adapter, off) \ ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START) static void __iomem *pci_base_offset(struct netxen_adapter *adapter, unsigned long off) { if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END)) return PCI_OFFSET_FIRST_RANGE(adapter, off); if (ADDR_IN_RANGE(off, SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_END)) return PCI_OFFSET_SECOND_RANGE(adapter, off); if (ADDR_IN_RANGE(off, THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_END)) return PCI_OFFSET_THIRD_RANGE(adapter, off); return NULL; } static crb_128M_2M_block_map_t crb_128M_2M_map[64] __cacheline_aligned_in_smp = { {{{0, 0, 0, 0} } }, /* 0: PCI */ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ {1, 0x0110000, 0x0120000, 0x130000}, {1, 0x0120000, 0x0122000, 0x124000}, {1, 0x0130000, 0x0132000, 0x126000}, {1, 0x0140000, 0x0142000, 0x128000}, {1, 0x0150000, 0x0152000, 0x12a000}, {1, 0x0160000, 0x0170000, 0x110000}, {1, 0x0170000, 0x0172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x01e0000, 0x01e0800, 0x122000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ {{{0, 0, 0, 0} } }, /* 3: */ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x08f0000, 0x08f2000, 0x172000} } }, {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x09f0000, 0x09f2000, 0x176000} } }, {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0af0000, 0x0af2000, 0x17a000} } }, {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ {{{0, 0, 0, 0} } }, /* 23: */ {{{0, 0, 0, 0} } }, /* 24: */ {{{0, 0, 0, 0} } }, /* 25: */ {{{0, 0, 0, 0} } }, /* 26: */ {{{0, 0, 0, 0} } }, /* 27: */ {{{0, 0, 0, 0} } }, /* 28: */ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ {{{0} } }, /* 32: PCI */ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ {1, 0x2110000, 0x2120000, 0x130000}, {1, 0x2120000, 0x2122000, 0x124000}, {1, 0x2130000, 0x2132000, 0x126000}, {1, 0x2140000, 0x2142000, 0x128000}, {1, 0x2150000, 0x2152000, 0x12a000}, {1, 0x2160000, 0x2170000, 0x110000}, {1, 0x2170000, 0x2172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ {{{0} } }, /* 35: */ {{{0} } }, /* 36: */ {{{0} } }, /* 37: */ {{{0} } }, /* 38: */ {{{0} } }, /* 39: */ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ {{{0} } }, /* 52: */ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ {{{0} } }, /* 59: I2C0 */ {{{0} } }, /* 60: I2C1 */ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ }; /* * top 12 bits of crb internal address (hub, agent) */ static unsigned crb_hub_agt[64] = { 0, NETXEN_HW_CRB_HUB_AGT_ADR_PS, NETXEN_HW_CRB_HUB_AGT_ADR_MN, NETXEN_HW_CRB_HUB_AGT_ADR_MS, 0, NETXEN_HW_CRB_HUB_AGT_ADR_SRE, NETXEN_HW_CRB_HUB_AGT_ADR_NIU, NETXEN_HW_CRB_HUB_AGT_ADR_QMN, NETXEN_HW_CRB_HUB_AGT_ADR_SQN0, NETXEN_HW_CRB_HUB_AGT_ADR_SQN1, NETXEN_HW_CRB_HUB_AGT_ADR_SQN2, NETXEN_HW_CRB_HUB_AGT_ADR_SQN3, NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, NETXEN_HW_CRB_HUB_AGT_ADR_PGN4, NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, NETXEN_HW_CRB_HUB_AGT_ADR_PGN0, NETXEN_HW_CRB_HUB_AGT_ADR_PGN1, NETXEN_HW_CRB_HUB_AGT_ADR_PGN2, NETXEN_HW_CRB_HUB_AGT_ADR_PGN3, NETXEN_HW_CRB_HUB_AGT_ADR_PGND, NETXEN_HW_CRB_HUB_AGT_ADR_PGNI, NETXEN_HW_CRB_HUB_AGT_ADR_PGS0, NETXEN_HW_CRB_HUB_AGT_ADR_PGS1, NETXEN_HW_CRB_HUB_AGT_ADR_PGS2, NETXEN_HW_CRB_HUB_AGT_ADR_PGS3, 0, NETXEN_HW_CRB_HUB_AGT_ADR_PGSI, NETXEN_HW_CRB_HUB_AGT_ADR_SN, 0, NETXEN_HW_CRB_HUB_AGT_ADR_EG, 0, NETXEN_HW_CRB_HUB_AGT_ADR_PS, NETXEN_HW_CRB_HUB_AGT_ADR_CAM, 0, 0, 0, 0, 0, NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, 0, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7, NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, 0, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9, NETXEN_HW_CRB_HUB_AGT_ADR_OCM0, 0, NETXEN_HW_CRB_HUB_AGT_ADR_SMB, NETXEN_HW_CRB_HUB_AGT_ADR_I2C0, NETXEN_HW_CRB_HUB_AGT_ADR_I2C1, 0, NETXEN_HW_CRB_HUB_AGT_ADR_PGNC, 0, }; /* PCI Windowing for DDR regions. */ #define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ #define NETXEN_PCIE_SEM_TIMEOUT 10000 static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu); int netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg) { int done = 0, timeout = 0; while (!done) { done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_LOCK(sem))); if (done == 1) break; if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT) return -EIO; msleep(1); } if (id_reg) NXWR32(adapter, id_reg, adapter->portnum); return 0; } void netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem) { NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem))); } static int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) { if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447); NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5); } return 0; } /* Disable an XG interface */ static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) { __u32 mac_cfg; u32 port = adapter->physical_port; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) return 0; if (port >= NETXEN_NIU_MAX_XG_PORTS) return -EINVAL; mac_cfg = 0; if (NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg)) return -EIO; return 0; } #define NETXEN_UNICAST_ADDR(port, index) \ (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8)) #define NETXEN_MCAST_ADDR(port, index) \ (NETXEN_MULTICAST_ADDR_BASE+(port*0x80)+(index*8)) #define MAC_HI(addr) \ ((addr[2] << 16) | (addr[1] << 8) | (addr[0])) #define MAC_LO(addr) \ ((addr[5] << 16) | (addr[4] << 8) | (addr[3])) static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) { u32 mac_cfg; u32 cnt = 0; __u32 reg = 0x0200; u32 port = adapter->physical_port; u16 board_type = adapter->ahw.board_type; if (port >= NETXEN_NIU_MAX_XG_PORTS) return -EINVAL; mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port)); mac_cfg &= ~0x4; NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) || (board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ)) reg = (0x20 << port); NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg); mdelay(10); while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20) mdelay(10); if (cnt < 20) { reg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port)); if (mode == NETXEN_NIU_PROMISC_MODE) reg = (reg | 0x2000UL); else reg = (reg & ~0x2000UL); if (mode == NETXEN_NIU_ALLMULTI_MODE) reg = (reg | 0x1000UL); else reg = (reg & ~0x1000UL); NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg); } mac_cfg |= 0x4; NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); return 0; } static int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) { u32 mac_hi, mac_lo; u32 reg_hi, reg_lo; u8 phy = adapter->physical_port; if (phy >= NETXEN_NIU_MAX_XG_PORTS) return -EINVAL; mac_lo = ((u32)addr[0] << 16) | ((u32)addr[1] << 24); mac_hi = addr[2] | ((u32)addr[3] << 8) | ((u32)addr[4] << 16) | ((u32)addr[5] << 24); reg_lo = NETXEN_NIU_XGE_STATION_ADDR_0_1 + (0x10000 * phy); reg_hi = NETXEN_NIU_XGE_STATION_ADDR_0_HI + (0x10000 * phy); /* write twice to flush */ if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) return -EIO; if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) return -EIO; return 0; } static int netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter) { u32 val = 0; u16 port = adapter->physical_port; u8 *addr = adapter->mac_addr; if (adapter->mc_enabled) return 0; val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); val |= (1UL << (28+port)); NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); /* add broadcast addr to filter */ val = 0xffffff; NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); /* add station addr to filter */ val = MAC_HI(addr); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), val); val = MAC_LO(addr); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, val); adapter->mc_enabled = 1; return 0; } static int netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter) { u32 val = 0; u16 port = adapter->physical_port; u8 *addr = adapter->mac_addr; if (!adapter->mc_enabled) return 0; val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); val &= ~(1UL << (28+port)); NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); val = MAC_HI(addr); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); val = MAC_LO(addr); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), 0); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0); adapter->mc_enabled = 0; return 0; } static int netxen_nic_set_mcast_addr(struct netxen_adapter *adapter, int index, u8 *addr) { u32 hi = 0, lo = 0; u16 port = adapter->physical_port; lo = MAC_LO(addr); hi = MAC_HI(addr); NXWR32(adapter, NETXEN_MCAST_ADDR(port, index), hi); NXWR32(adapter, NETXEN_MCAST_ADDR(port, index)+4, lo); return 0; } static void netxen_p2_nic_set_multi(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); struct netdev_hw_addr *ha; u8 null_addr[ETH_ALEN]; int i; memset(null_addr, 0, ETH_ALEN); if (netdev->flags & IFF_PROMISC) { adapter->set_promisc(adapter, NETXEN_NIU_PROMISC_MODE); /* Full promiscuous mode */ netxen_nic_disable_mcast_filter(adapter); return; } if (netdev_mc_empty(netdev)) { adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE); netxen_nic_disable_mcast_filter(adapter); return; } adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE); if (netdev->flags & IFF_ALLMULTI || netdev_mc_count(netdev) > adapter->max_mc_count) { netxen_nic_disable_mcast_filter(adapter); return; } netxen_nic_enable_mcast_filter(adapter); i = 0; netdev_for_each_mc_addr(ha, netdev) netxen_nic_set_mcast_addr(adapter, i++, ha->addr); /* Clear out remaining addresses */ while (i < adapter->max_mc_count) netxen_nic_set_mcast_addr(adapter, i++, null_addr); } static int netxen_send_cmd_descs(struct netxen_adapter *adapter, struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) { u32 i, producer, consumer; struct netxen_cmd_buffer *pbuf; struct cmd_desc_type0 *cmd_desc; struct nx_host_tx_ring *tx_ring; i = 0; if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return -EIO; tx_ring = adapter->tx_ring; __netif_tx_lock_bh(tx_ring->txq); producer = tx_ring->producer; consumer = tx_ring->sw_consumer; if (nr_desc >= netxen_tx_avail(tx_ring)) { netif_tx_stop_queue(tx_ring->txq); smp_mb(); if (netxen_tx_avail(tx_ring) > nr_desc) { if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) netif_tx_wake_queue(tx_ring->txq); } else { __netif_tx_unlock_bh(tx_ring->txq); return -EBUSY; } } do { cmd_desc = &cmd_desc_arr[i]; pbuf = &tx_ring->cmd_buf_arr[producer]; pbuf->skb = NULL; pbuf->frag_count = 0; memcpy(&tx_ring->desc_head[producer], &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); producer = get_next_index(producer, tx_ring->num_desc); i++; } while (i != nr_desc); tx_ring->producer = producer; netxen_nic_update_cmd_producer(adapter, tx_ring); __netif_tx_unlock_bh(tx_ring->txq); return 0; } static int nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op) { nx_nic_req_t req; nx_mac_req_t *mac_req; u64 word; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23); word = NX_MAC_EVENT | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); mac_req = (nx_mac_req_t *)&req.words[0]; mac_req->op = op; memcpy(mac_req->mac_addr, addr, ETH_ALEN); return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); } static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, const u8 *addr, struct list_head *del_list) { struct list_head *head; nx_mac_list_t *cur; /* look up if already exists */ list_for_each(head, del_list) { cur = list_entry(head, nx_mac_list_t, list); if (ether_addr_equal(addr, cur->mac_addr)) { list_move_tail(head, &adapter->mac_list); return 0; } } cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC); if (cur == NULL) return -ENOMEM; memcpy(cur->mac_addr, addr, ETH_ALEN); list_add_tail(&cur->list, &adapter->mac_list); return nx_p3_sre_macaddr_change(adapter, cur->mac_addr, NETXEN_MAC_ADD); } static void netxen_p3_nic_set_multi(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); struct netdev_hw_addr *ha; static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u32 mode = VPORT_MISS_MODE_DROP; LIST_HEAD(del_list); struct list_head *head; nx_mac_list_t *cur; if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return; list_splice_tail_init(&adapter->mac_list, &del_list); nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list); nx_p3_nic_add_mac(adapter, bcast_addr, &del_list); if (netdev->flags & IFF_PROMISC) { mode = VPORT_MISS_MODE_ACCEPT_ALL; goto send_fw_cmd; } if ((netdev->flags & IFF_ALLMULTI) || (netdev_mc_count(netdev) > adapter->max_mc_count)) { mode = VPORT_MISS_MODE_ACCEPT_MULTI; goto send_fw_cmd; } if (!netdev_mc_empty(netdev)) { netdev_for_each_mc_addr(ha, netdev) nx_p3_nic_add_mac(adapter, ha->addr, &del_list); } send_fw_cmd: adapter->set_promisc(adapter, mode); head = &del_list; while (!list_empty(head)) { cur = list_entry(head->next, nx_mac_list_t, list); nx_p3_sre_macaddr_change(adapter, cur->mac_addr, NETXEN_MAC_DEL); list_del(&cur->list); kfree(cur); } } static int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) { nx_nic_req_t req; u64 word; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(mode); return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); } void netxen_p3_free_mac_list(struct netxen_adapter *adapter) { nx_mac_list_t *cur; struct list_head *head = &adapter->mac_list; while (!list_empty(head)) { cur = list_entry(head->next, nx_mac_list_t, list); nx_p3_sre_macaddr_change(adapter, cur->mac_addr, NETXEN_MAC_DEL); list_del(&cur->list); kfree(cur); } } static int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) { /* assuming caller has already copied new addr to netdev */ netxen_p3_nic_set_multi(adapter->netdev); return 0; } #define NETXEN_CONFIG_INTR_COALESCE 3 /* * Send the interrupt coalescing parameter set by ethtool to the card. */ int netxen_config_intr_coalesce(struct netxen_adapter *adapter) { nx_nic_req_t req; u64 word[6]; int rv, i; memset(&req, 0, sizeof(nx_nic_req_t)); memset(word, 0, sizeof(word)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word[0]); memcpy(&word[0], &adapter->coal, sizeof(adapter->coal)); for (i = 0; i < 6; i++) req.words[i] = cpu_to_le64(word[i]); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "ERROR. Could not send " "interrupt coalescing parameters\n"); } return rv; } int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable) { nx_nic_req_t req; u64 word; int rv = 0; if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) return 0; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(enable); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "ERROR. Could not send " "configure hw lro request\n"); } return rv; } int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable) { nx_nic_req_t req; u64 word; int rv = 0; if (!!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED) == enable) return rv; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_CONFIG_BRIDGING | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(enable); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "ERROR. Could not send " "configure bridge mode request\n"); } adapter->flags ^= NETXEN_NIC_BRIDGE_ENABLED; return rv; } #define RSS_HASHTYPE_IP_TCP 0x3 int netxen_config_rss(struct netxen_adapter *adapter, int enable) { nx_nic_req_t req; u64 word; int i, rv; static const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 0x255b0ec26d5a56daULL }; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); /* * RSS request: * bits 3-0: hash_method * 5-4: hash_type_ipv4 * 7-6: hash_type_ipv6 * 8: enable * 9: use indirection table * 47-10: reserved * 63-48: indirection table mask */ word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | ((u64)(enable & 0x1) << 8) | ((0x7ULL) << 48); req.words[0] = cpu_to_le64(word); for (i = 0; i < ARRAY_SIZE(key); i++) req.words[i+1] = cpu_to_le64(key[i]); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "%s: could not configure RSS\n", adapter->netdev->name); } return rv; } int netxen_config_ipaddr(struct netxen_adapter *adapter, __be32 ip, int cmd) { nx_nic_req_t req; u64 word; int rv; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(cmd); memcpy(&req.words[1], &ip, sizeof(u32)); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "%s: could not notify %s IP 0x%x request\n", adapter->netdev->name, (cmd == NX_IP_UP) ? "Add" : "Remove", ip); } return rv; } int netxen_linkevent_request(struct netxen_adapter *adapter, int enable) { nx_nic_req_t req; u64 word; int rv; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(enable | (enable << 8)); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "%s: could not configure link notification\n", adapter->netdev->name); } return rv; } int netxen_send_lro_cleanup(struct netxen_adapter *adapter) { nx_nic_req_t req; u64 word; int rv; if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) return 0; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_LRO_REQUEST | ((u64)adapter->portnum << 16) | ((u64)NX_NIC_LRO_REQUEST_CLEANUP << 56) ; req.req_hdr = cpu_to_le64(word); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "%s: could not cleanup lro flows\n", adapter->netdev->name); } return rv; } /* * netxen_nic_change_mtu - Change the Maximum Transfer Unit * @returns 0 on success, negative on failure */ #define MTU_FUDGE_FACTOR 100 int netxen_nic_change_mtu(struct net_device *netdev, int mtu) { struct netxen_adapter *adapter = netdev_priv(netdev); int max_mtu; int rc = 0; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) max_mtu = P3_MAX_MTU; else max_mtu = P2_MAX_MTU; if (mtu > max_mtu) { printk(KERN_ERR "%s: mtu > %d bytes unsupported\n", netdev->name, max_mtu); return -EINVAL; } if (adapter->set_mtu) rc = adapter->set_mtu(adapter, mtu); if (!rc) netdev->mtu = mtu; return rc; } static int netxen_get_flash_block(struct netxen_adapter *adapter, int base, int size, __le32 * buf) { int i, v, addr; __le32 *ptr32; addr = base; ptr32 = buf; for (i = 0; i < size / sizeof(u32); i++) { if (netxen_rom_fast_read(adapter, addr, &v) == -1) return -1; *ptr32 = cpu_to_le32(v); ptr32++; addr += sizeof(u32); } if ((char *)buf + size > (char *)ptr32) { __le32 local; if (netxen_rom_fast_read(adapter, addr, &v) == -1) return -1; local = cpu_to_le32(v); memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32); } return 0; } int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac) { __le32 *pmac = (__le32 *) mac; u32 offset; offset = NX_FW_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64)); if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1) return -1; if (*mac == ~0ULL) { offset = NX_OLD_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64)); if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1) return -1; if (*mac == ~0ULL) return -1; } return 0; } int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac) { uint32_t crbaddr, mac_hi, mac_lo; int pci_func = adapter->ahw.pci_func; crbaddr = CRB_MAC_BLOCK_START + (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); mac_lo = NXRD32(adapter, crbaddr); mac_hi = NXRD32(adapter, crbaddr+4); if (pci_func & 1) *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16)); else *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32)); return 0; } /* * Changes the CRB window to the specified window. */ static void netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter, u32 window) { void __iomem *offset; int count = 10; u8 func = adapter->ahw.pci_func; if (adapter->ahw.crb_win == window) return; offset = PCI_OFFSET_SECOND_RANGE(adapter, NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func))); writel(window, offset); do { if (window == readl(offset)) break; if (printk_ratelimit()) dev_warn(&adapter->pdev->dev, "failed to set CRB window to %d\n", (window == NETXEN_WINDOW_ONE)); udelay(1); } while (--count > 0); if (count > 0) adapter->ahw.crb_win = window; } /* * Returns < 0 if off is not valid, * 1 if window access is needed. 'off' is set to offset from * CRB space in 128M pci map * 0 if no window access is needed. 'off' is set to 2M addr * In: 'off' is offset from base in 128M pci map */ static int netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, ulong off, void __iomem **addr) { crb_128M_2M_sub_block_map_t *m; if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE)) return -EINVAL; off -= NETXEN_PCI_CRBSPACE; /* * Try direct map */ m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { *addr = adapter->ahw.pci_base0 + m->start_2M + (off - m->start_128M); return 0; } /* * Not in direct map, use crb window */ *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + (off & MASK(16)); return 1; } /* * In: 'off' is offset from CRB space in 128M pci map * Out: 'off' is 2M pci map addr * side effect: lock crb window */ static void netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off) { u32 window; void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M; off -= NETXEN_PCI_CRBSPACE; window = CRB_HI(off); writel(window, addr); if (readl(addr) != window) { if (printk_ratelimit()) dev_warn(&adapter->pdev->dev, "failed to set CRB window to %d off 0x%lx\n", window, off); } } static void __iomem * netxen_nic_map_indirect_address_128M(struct netxen_adapter *adapter, ulong win_off, void __iomem **mem_ptr) { ulong off = win_off; void __iomem *addr; resource_size_t mem_base; if (ADDR_IN_WINDOW1(win_off)) off = NETXEN_CRB_NORMAL(win_off); addr = pci_base_offset(adapter, off); if (addr) return addr; if (adapter->ahw.pci_len0 == 0) off -= NETXEN_PCI_CRBSPACE; mem_base = pci_resource_start(adapter->pdev, 0); *mem_ptr = ioremap(mem_base + (off & PAGE_MASK), PAGE_SIZE); if (*mem_ptr) addr = *mem_ptr + (off & (PAGE_SIZE - 1)); return addr; } static int netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data) { unsigned long flags; void __iomem *addr, *mem_ptr = NULL; addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); if (!addr) return -EIO; if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ netxen_nic_io_write_128M(adapter, addr, data); } else { /* Window 0 */ write_lock_irqsave(&adapter->ahw.crb_lock, flags); netxen_nic_pci_set_crbwindow_128M(adapter, 0); writel(data, addr); netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); } if (mem_ptr) iounmap(mem_ptr); return 0; } static u32 netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off) { unsigned long flags; void __iomem *addr, *mem_ptr = NULL; u32 data; addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); if (!addr) return -EIO; if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ data = netxen_nic_io_read_128M(adapter, addr); } else { /* Window 0 */ write_lock_irqsave(&adapter->ahw.crb_lock, flags); netxen_nic_pci_set_crbwindow_128M(adapter, 0); data = readl(addr); netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); } if (mem_ptr) iounmap(mem_ptr); return data; } static int netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data) { unsigned long flags; int rv; void __iomem *addr = NULL; rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); if (rv == 0) { writel(data, addr); return 0; } if (rv > 0) { /* indirect access */ write_lock_irqsave(&adapter->ahw.crb_lock, flags); crb_win_lock(adapter); netxen_nic_pci_set_crbwindow_2M(adapter, off); writel(data, addr); crb_win_unlock(adapter); write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); return 0; } dev_err(&adapter->pdev->dev, "%s: invalid offset: 0x%016lx\n", __func__, off); dump_stack(); return -EIO; } static u32 netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off) { unsigned long flags; int rv; u32 data; void __iomem *addr = NULL; rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); if (rv == 0) return readl(addr); if (rv > 0) { /* indirect access */ write_lock_irqsave(&adapter->ahw.crb_lock, flags); crb_win_lock(adapter); netxen_nic_pci_set_crbwindow_2M(adapter, off); data = readl(addr); crb_win_unlock(adapter); write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); return data; } dev_err(&adapter->pdev->dev, "%s: invalid offset: 0x%016lx\n", __func__, off); dump_stack(); return -1; } /* window 1 registers only */ static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, void __iomem *addr, u32 data) { read_lock(&adapter->ahw.crb_lock); writel(data, addr); read_unlock(&adapter->ahw.crb_lock); } static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, void __iomem *addr) { u32 val; read_lock(&adapter->ahw.crb_lock); val = readl(addr); read_unlock(&adapter->ahw.crb_lock); return val; } static void netxen_nic_io_write_2M(struct netxen_adapter *adapter, void __iomem *addr, u32 data) { writel(data, addr); } static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter, void __iomem *addr) { return readl(addr); } void __iomem * netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset) { void __iomem *addr = NULL; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { if ((offset < NETXEN_CRB_PCIX_HOST2) && (offset > NETXEN_CRB_PCIX_HOST)) addr = PCI_OFFSET_SECOND_RANGE(adapter, offset); else addr = NETXEN_CRB_NORMALIZE(adapter, offset); } else { WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter, offset, &addr)); } return addr; } static int netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter, u64 addr, u32 *start) { if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { *start = (addr - NETXEN_ADDR_OCM0 + NETXEN_PCI_OCM0); return 0; } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { *start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1); return 0; } return -EIO; } static int netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter, u64 addr, u32 *start) { u32 window; window = OCM_WIN(addr); writel(window, adapter->ahw.ocm_win_crb); /* read back to flush */ readl(adapter->ahw.ocm_win_crb); adapter->ahw.ocm_win = window; *start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); return 0; } static int netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off, u64 *data, int op) { void __iomem *addr, *mem_ptr = NULL; resource_size_t mem_base; int ret; u32 start; spin_lock(&adapter->ahw.mem_lock); ret = adapter->pci_set_window(adapter, off, &start); if (ret != 0) goto unlock; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { addr = adapter->ahw.pci_base0 + start; } else { addr = pci_base_offset(adapter, start); if (addr) goto noremap; mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK); mem_ptr = ioremap(mem_base, PAGE_SIZE); if (mem_ptr == NULL) { ret = -EIO; goto unlock; } addr = mem_ptr + (start & (PAGE_SIZE-1)); } noremap: if (op == 0) /* read */ *data = readq(addr); else /* write */ writeq(*data, addr); unlock: spin_unlock(&adapter->ahw.mem_lock); if (mem_ptr) iounmap(mem_ptr); return ret; } void netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data) { void __iomem *addr = adapter->ahw.pci_base0 + NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); spin_lock(&adapter->ahw.mem_lock); *data = readq(addr); spin_unlock(&adapter->ahw.mem_lock); } void netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data) { void __iomem *addr = adapter->ahw.pci_base0 + NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); spin_lock(&adapter->ahw.mem_lock); writeq(data, addr); spin_unlock(&adapter->ahw.mem_lock); } #define MAX_CTL_CHECK 1000 static int netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter, u64 off, u64 data) { int j, ret; u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; void __iomem *mem_crb; /* Only 64-bit aligned access */ if (off & 7) return -EIO; /* P2 has different SIU and MIU test agent base addr */ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P2)) { mem_crb = pci_base_offset(adapter, NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); addr_hi = SIU_TEST_AGT_ADDR_HI; data_lo = SIU_TEST_AGT_WRDATA_LO; data_hi = SIU_TEST_AGT_WRDATA_HI; off_lo = off & SIU_TEST_AGT_ADDR_MASK; off_hi = SIU_TEST_AGT_UPPER_ADDR(off); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { mem_crb = pci_base_offset(adapter, NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); addr_hi = MIU_TEST_AGT_ADDR_HI; data_lo = MIU_TEST_AGT_WRDATA_LO; data_hi = MIU_TEST_AGT_WRDATA_HI; off_lo = off & MIU_TEST_AGT_ADDR_MASK; off_hi = 0; goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { if (adapter->ahw.pci_len0 != 0) { return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1); } } return -EIO; correct: spin_lock(&adapter->ahw.mem_lock); netxen_nic_pci_set_crbwindow_128M(adapter, 0); writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); writel(off_hi, (mem_crb + addr_hi)); writel(data & 0xffffffff, (mem_crb + data_lo)); writel((data >> 32) & 0xffffffff, (mem_crb + data_hi)); writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = readl((mem_crb + TEST_AGT_CTRL)); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to write through agent\n"); ret = -EIO; } else ret = 0; netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); spin_unlock(&adapter->ahw.mem_lock); return ret; } static int netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter, u64 off, u64 *data) { int j, ret; u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; u64 val; void __iomem *mem_crb; /* Only 64-bit aligned access */ if (off & 7) return -EIO; /* P2 has different SIU and MIU test agent base addr */ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P2)) { mem_crb = pci_base_offset(adapter, NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); addr_hi = SIU_TEST_AGT_ADDR_HI; data_lo = SIU_TEST_AGT_RDDATA_LO; data_hi = SIU_TEST_AGT_RDDATA_HI; off_lo = off & SIU_TEST_AGT_ADDR_MASK; off_hi = SIU_TEST_AGT_UPPER_ADDR(off); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { mem_crb = pci_base_offset(adapter, NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); addr_hi = MIU_TEST_AGT_ADDR_HI; data_lo = MIU_TEST_AGT_RDDATA_LO; data_hi = MIU_TEST_AGT_RDDATA_HI; off_lo = off & MIU_TEST_AGT_ADDR_MASK; off_hi = 0; goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { if (adapter->ahw.pci_len0 != 0) { return netxen_nic_pci_mem_access_direct(adapter, off, data, 0); } } return -EIO; correct: spin_lock(&adapter->ahw.mem_lock); netxen_nic_pci_set_crbwindow_128M(adapter, 0); writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); writel(off_hi, (mem_crb + addr_hi)); writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = readl(mem_crb + TEST_AGT_CTRL); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to read through agent\n"); ret = -EIO; } else { temp = readl(mem_crb + data_hi); val = ((u64)temp << 32); val |= readl(mem_crb + data_lo); *data = val; ret = 0; } netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); spin_unlock(&adapter->ahw.mem_lock); return ret; } static int netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter, u64 off, u64 data) { int j, ret; u32 temp, off8; void __iomem *mem_crb; /* Only 64-bit aligned access */ if (off & 7) return -EIO; /* P3 onward, test agent base for MIU and SIU is same */ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P3)) { mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1); return -EIO; correct: off8 = off & 0xfffffff8; spin_lock(&adapter->ahw.mem_lock); writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); writel(data & 0xffffffff, mem_crb + MIU_TEST_AGT_WRDATA_LO); writel((data >> 32) & 0xffffffff, mem_crb + MIU_TEST_AGT_WRDATA_HI); writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = readl(mem_crb + TEST_AGT_CTRL); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to write through agent\n"); ret = -EIO; } else ret = 0; spin_unlock(&adapter->ahw.mem_lock); return ret; } static int netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data) { int j, ret; u32 temp, off8; u64 val; void __iomem *mem_crb; /* Only 64-bit aligned access */ if (off & 7) return -EIO; /* P3 onward, test agent base for MIU and SIU is same */ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P3)) { mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { return netxen_nic_pci_mem_access_direct(adapter, off, data, 0); } return -EIO; correct: off8 = off & 0xfffffff8; spin_lock(&adapter->ahw.mem_lock); writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = readl(mem_crb + TEST_AGT_CTRL); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to read through agent\n"); ret = -EIO; } else { val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32; val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO); *data = val; ret = 0; } spin_unlock(&adapter->ahw.mem_lock); return ret; } void netxen_setup_hwops(struct netxen_adapter *adapter) { adapter->init_port = netxen_niu_xg_init_port; adapter->stop_port = netxen_niu_disable_xg_port; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { adapter->crb_read = netxen_nic_hw_read_wx_128M, adapter->crb_write = netxen_nic_hw_write_wx_128M, adapter->pci_set_window = netxen_nic_pci_set_window_128M, adapter->pci_mem_read = netxen_nic_pci_mem_read_128M, adapter->pci_mem_write = netxen_nic_pci_mem_write_128M, adapter->io_read = netxen_nic_io_read_128M, adapter->io_write = netxen_nic_io_write_128M, adapter->macaddr_set = netxen_p2_nic_set_mac_addr; adapter->set_multi = netxen_p2_nic_set_multi; adapter->set_mtu = netxen_nic_set_mtu_xgb; adapter->set_promisc = netxen_p2_nic_set_promisc; } else { adapter->crb_read = netxen_nic_hw_read_wx_2M, adapter->crb_write = netxen_nic_hw_write_wx_2M, adapter->pci_set_window = netxen_nic_pci_set_window_2M, adapter->pci_mem_read = netxen_nic_pci_mem_read_2M, adapter->pci_mem_write = netxen_nic_pci_mem_write_2M, adapter->io_read = netxen_nic_io_read_2M, adapter->io_write = netxen_nic_io_write_2M, adapter->set_mtu = nx_fw_cmd_set_mtu; adapter->set_promisc = netxen_p3_nic_set_promisc; adapter->macaddr_set = netxen_p3_nic_set_mac_addr; adapter->set_multi = netxen_p3_nic_set_multi; adapter->phy_read = nx_fw_cmd_query_phy; adapter->phy_write = nx_fw_cmd_set_phy; } } int netxen_nic_get_board_info(struct netxen_adapter *adapter) { int offset, board_type, magic; struct pci_dev *pdev = adapter->pdev; offset = NX_FW_MAGIC_OFFSET; if (netxen_rom_fast_read(adapter, offset, &magic)) return -EIO; if (magic != NETXEN_BDINFO_MAGIC) { dev_err(&pdev->dev, "invalid board config, magic=%08x\n", magic); return -EIO; } offset = NX_BRDTYPE_OFFSET; if (netxen_rom_fast_read(adapter, offset, &board_type)) return -EIO; if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) { u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I); if ((gpio & 0x8000) == 0) board_type = NETXEN_BRDTYPE_P3_10G_TP; } adapter->ahw.board_type = board_type; switch (board_type) { case NETXEN_BRDTYPE_P2_SB35_4G: adapter->ahw.port_type = NETXEN_NIC_GBE; break; case NETXEN_BRDTYPE_P2_SB31_10G: case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: case NETXEN_BRDTYPE_P2_SB31_10G_CX4: case NETXEN_BRDTYPE_P3_HMEZ: case NETXEN_BRDTYPE_P3_XG_LOM: case NETXEN_BRDTYPE_P3_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4_LP: case NETXEN_BRDTYPE_P3_IMEZ: case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: case NETXEN_BRDTYPE_P3_10G_SFP_CT: case NETXEN_BRDTYPE_P3_10G_SFP_QT: case NETXEN_BRDTYPE_P3_10G_XFP: case NETXEN_BRDTYPE_P3_10000_BASE_T: adapter->ahw.port_type = NETXEN_NIC_XGBE; break; case NETXEN_BRDTYPE_P1_BD: case NETXEN_BRDTYPE_P1_SB: case NETXEN_BRDTYPE_P1_SMAX: case NETXEN_BRDTYPE_P1_SOCK: case NETXEN_BRDTYPE_P3_REF_QG: case NETXEN_BRDTYPE_P3_4_GB: case NETXEN_BRDTYPE_P3_4_GB_MM: adapter->ahw.port_type = NETXEN_NIC_GBE; break; case NETXEN_BRDTYPE_P3_10G_TP: adapter->ahw.port_type = (adapter->portnum < 2) ? NETXEN_NIC_XGBE : NETXEN_NIC_GBE; break; default: dev_err(&pdev->dev, "unknown board type %x\n", board_type); adapter->ahw.port_type = NETXEN_NIC_XGBE; break; } return 0; } /* NIU access sections */ static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) { new_mtu += MTU_FUDGE_FACTOR; if (adapter->physical_port == 0) NXWR32(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu); else NXWR32(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE, new_mtu); return 0; } void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) { __u32 status; __u32 autoneg; __u32 port_mode; if (!netif_carrier_ok(adapter->netdev)) { adapter->link_speed = 0; adapter->link_duplex = -1; adapter->link_autoneg = AUTONEG_ENABLE; return; } if (adapter->ahw.port_type == NETXEN_NIC_GBE) { port_mode = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); if (port_mode == NETXEN_PORT_MODE_802_3_AP) { adapter->link_speed = SPEED_1000; adapter->link_duplex = DUPLEX_FULL; adapter->link_autoneg = AUTONEG_DISABLE; return; } if (adapter->phy_read && adapter->phy_read(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, &status) == 0) { if (netxen_get_phy_link(status)) { switch (netxen_get_phy_speed(status)) { case 0: adapter->link_speed = SPEED_10; break; case 1: adapter->link_speed = SPEED_100; break; case 2: adapter->link_speed = SPEED_1000; break; default: adapter->link_speed = 0; break; } switch (netxen_get_phy_duplex(status)) { case 0: adapter->link_duplex = DUPLEX_HALF; break; case 1: adapter->link_duplex = DUPLEX_FULL; break; default: adapter->link_duplex = -1; break; } if (adapter->phy_read && adapter->phy_read(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, &autoneg) != 0) adapter->link_autoneg = autoneg; } else goto link_down; } else { link_down: adapter->link_speed = 0; adapter->link_duplex = -1; } } } int netxen_nic_wol_supported(struct netxen_adapter *adapter) { u32 wol_cfg; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 0; wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); if (wol_cfg & (1UL << adapter->portnum)) { wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); if (wol_cfg & (1 << adapter->portnum)) return 1; } return 0; } static u32 netxen_md_cntrl(struct netxen_adapter *adapter, struct netxen_minidump_template_hdr *template_hdr, struct netxen_minidump_entry_crb *crtEntry) { int loop_cnt, i, rv = 0, timeout_flag; u32 op_count, stride; u32 opcode, read_value, addr; unsigned long timeout, timeout_jiffies; addr = crtEntry->addr; op_count = crtEntry->op_count; stride = crtEntry->addr_stride; for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { for (i = 0; i < sizeof(crtEntry->opcode) * 8; i++) { opcode = (crtEntry->opcode & (0x1 << i)); if (opcode) { switch (opcode) { case NX_DUMP_WCRB: NX_WR_DUMP_REG(addr, adapter->ahw.pci_base0, crtEntry->value_1); break; case NX_DUMP_RWCRB: NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); NX_WR_DUMP_REG(addr, adapter->ahw.pci_base0, read_value); break; case NX_DUMP_ANDCRB: NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); read_value &= crtEntry->value_2; NX_WR_DUMP_REG(addr, adapter->ahw.pci_base0, read_value); break; case NX_DUMP_ORCRB: NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); read_value |= crtEntry->value_3; NX_WR_DUMP_REG(addr, adapter->ahw.pci_base0, read_value); break; case NX_DUMP_POLLCRB: timeout = crtEntry->poll_timeout; NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); timeout_jiffies = msecs_to_jiffies(timeout) + jiffies; for (timeout_flag = 0; !timeout_flag && ((read_value & crtEntry->value_2) != crtEntry->value_1);) { if (time_after(jiffies, timeout_jiffies)) timeout_flag = 1; NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); } if (timeout_flag) { dev_err(&adapter->pdev->dev, "%s : " "Timeout in poll_crb control operation.\n" , __func__); return -1; } break; case NX_DUMP_RD_SAVE: /* Decide which address to use */ if (crtEntry->state_index_a) addr = template_hdr->saved_state_array [crtEntry->state_index_a]; NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); template_hdr->saved_state_array [crtEntry->state_index_v] = read_value; break; case NX_DUMP_WRT_SAVED: /* Decide which value to use */ if (crtEntry->state_index_v) read_value = template_hdr->saved_state_array [crtEntry->state_index_v]; else read_value = crtEntry->value_1; /* Decide which address to use */ if (crtEntry->state_index_a) addr = template_hdr->saved_state_array [crtEntry->state_index_a]; NX_WR_DUMP_REG(addr, adapter->ahw.pci_base0, read_value); break; case NX_DUMP_MOD_SAVE_ST: read_value = template_hdr->saved_state_array [crtEntry->state_index_v]; read_value <<= crtEntry->shl; read_value >>= crtEntry->shr; if (crtEntry->value_2) read_value &= crtEntry->value_2; read_value |= crtEntry->value_3; read_value += crtEntry->value_1; /* Write value back to state area.*/ template_hdr->saved_state_array [crtEntry->state_index_v] = read_value; break; default: rv = 1; break; } } } addr = addr + stride; } return rv; } /* Read memory or MN */ static u32 netxen_md_rdmem(struct netxen_adapter *adapter, struct netxen_minidump_entry_rdmem *memEntry, u64 *data_buff) { u64 addr, value = 0; int i = 0, loop_cnt; addr = (u64)memEntry->read_addr; loop_cnt = memEntry->read_data_size; /* This is size in bytes */ loop_cnt /= sizeof(value); for (i = 0; i < loop_cnt; i++) { if (netxen_nic_pci_mem_read_2M(adapter, addr, &value)) goto out; *data_buff++ = value; addr += sizeof(value); } out: return i * sizeof(value); } /* Read CRB operation */ static u32 netxen_md_rd_crb(struct netxen_adapter *adapter, struct netxen_minidump_entry_crb *crbEntry, u32 *data_buff) { int loop_cnt; u32 op_count, addr, stride, value; addr = crbEntry->addr; op_count = crbEntry->op_count; stride = crbEntry->addr_stride; for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &value); *data_buff++ = addr; *data_buff++ = value; addr = addr + stride; } return loop_cnt * (2 * sizeof(u32)); } /* Read ROM */ static u32 netxen_md_rdrom(struct netxen_adapter *adapter, struct netxen_minidump_entry_rdrom *romEntry, __le32 *data_buff) { int i, count = 0; u32 size, lck_val; u32 val; u32 fl_addr, waddr, raddr; fl_addr = romEntry->read_addr; size = romEntry->read_data_size/4; lock_try: lck_val = readl((void __iomem *)(adapter->ahw.pci_base0 + NX_FLASH_SEM2_LK)); if (!lck_val && count < MAX_CTL_CHECK) { msleep(20); count++; goto lock_try; } writel(adapter->ahw.pci_func, (void __iomem *)(adapter->ahw.pci_base0 + NX_FLASH_LOCK_ID)); for (i = 0; i < size; i++) { waddr = fl_addr & 0xFFFF0000; NX_WR_DUMP_REG(FLASH_ROM_WINDOW, adapter->ahw.pci_base0, waddr); raddr = FLASH_ROM_DATA + (fl_addr & 0x0000FFFF); NX_RD_DUMP_REG(raddr, adapter->ahw.pci_base0, &val); *data_buff++ = cpu_to_le32(val); fl_addr += sizeof(val); } readl((void __iomem *)(adapter->ahw.pci_base0 + NX_FLASH_SEM2_ULK)); return romEntry->read_data_size; } /* Handle L2 Cache */ static u32 netxen_md_L2Cache(struct netxen_adapter *adapter, struct netxen_minidump_entry_cache *cacheEntry, u32 *data_buff) { int loop_cnt, i, k, timeout_flag = 0; u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr; u32 tag_value, read_cnt; u8 cntl_value_w, cntl_value_r; unsigned long timeout, timeout_jiffies; loop_cnt = cacheEntry->op_count; read_addr = cacheEntry->read_addr; cntrl_addr = cacheEntry->control_addr; cntl_value_w = (u32) cacheEntry->write_value; tag_reg_addr = cacheEntry->tag_reg_addr; tag_value = cacheEntry->init_tag_value; read_cnt = cacheEntry->read_addr_cnt; for (i = 0; i < loop_cnt; i++) { NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value); if (cntl_value_w) NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0, (u32)cntl_value_w); if (cacheEntry->poll_mask) { timeout = cacheEntry->poll_wait; NX_RD_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0, &cntl_value_r); timeout_jiffies = msecs_to_jiffies(timeout) + jiffies; for (timeout_flag = 0; !timeout_flag && ((cntl_value_r & cacheEntry->poll_mask) != 0);) { if (time_after(jiffies, timeout_jiffies)) timeout_flag = 1; NX_RD_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0, &cntl_value_r); } if (timeout_flag) { dev_err(&adapter->pdev->dev, "Timeout in processing L2 Tag poll.\n"); return -1; } } addr = read_addr; for (k = 0; k < read_cnt; k++) { NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); *data_buff++ = read_value; addr += cacheEntry->read_addr_stride; } tag_value += cacheEntry->tag_value_stride; } return read_cnt * loop_cnt * sizeof(read_value); } /* Handle L1 Cache */ static u32 netxen_md_L1Cache(struct netxen_adapter *adapter, struct netxen_minidump_entry_cache *cacheEntry, u32 *data_buff) { int i, k, loop_cnt; u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr; u32 tag_value, read_cnt; u8 cntl_value_w; loop_cnt = cacheEntry->op_count; read_addr = cacheEntry->read_addr; cntrl_addr = cacheEntry->control_addr; cntl_value_w = (u32) cacheEntry->write_value; tag_reg_addr = cacheEntry->tag_reg_addr; tag_value = cacheEntry->init_tag_value; read_cnt = cacheEntry->read_addr_cnt; for (i = 0; i < loop_cnt; i++) { NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value); NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0, (u32) cntl_value_w); addr = read_addr; for (k = 0; k < read_cnt; k++) { NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); *data_buff++ = read_value; addr += cacheEntry->read_addr_stride; } tag_value += cacheEntry->tag_value_stride; } return read_cnt * loop_cnt * sizeof(read_value); } /* Reading OCM memory */ static u32 netxen_md_rdocm(struct netxen_adapter *adapter, struct netxen_minidump_entry_rdocm *ocmEntry, u32 *data_buff) { int i, loop_cnt; u32 value; void __iomem *addr; addr = (ocmEntry->read_addr + adapter->ahw.pci_base0); loop_cnt = ocmEntry->op_count; for (i = 0; i < loop_cnt; i++) { value = readl(addr); *data_buff++ = value; addr += ocmEntry->read_addr_stride; } return i * sizeof(u32); } /* Read MUX data */ static u32 netxen_md_rdmux(struct netxen_adapter *adapter, struct netxen_minidump_entry_mux *muxEntry, u32 *data_buff) { int loop_cnt = 0; u32 read_addr, read_value, select_addr, sel_value; read_addr = muxEntry->read_addr; sel_value = muxEntry->select_value; select_addr = muxEntry->select_addr; for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) { NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, sel_value); NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, &read_value); *data_buff++ = sel_value; *data_buff++ = read_value; sel_value += muxEntry->select_value_stride; } return loop_cnt * (2 * sizeof(u32)); } /* Handling Queue State Reads */ static u32 netxen_md_rdqueue(struct netxen_adapter *adapter, struct netxen_minidump_entry_queue *queueEntry, u32 *data_buff) { int loop_cnt, k; u32 queue_id, read_addr, read_value, read_stride, select_addr, read_cnt; read_cnt = queueEntry->read_addr_cnt; read_stride = queueEntry->read_addr_stride; select_addr = queueEntry->select_addr; for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count; loop_cnt++) { NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, queue_id); read_addr = queueEntry->read_addr; for (k = 0; k < read_cnt; k--) { NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, &read_value); *data_buff++ = read_value; read_addr += read_stride; } queue_id += queueEntry->queue_id_stride; } return loop_cnt * (read_cnt * sizeof(read_value)); } /* * We catch an error where driver does not read * as much data as we expect from the entry. */ static int netxen_md_entry_err_chk(struct netxen_adapter *adapter, struct netxen_minidump_entry *entry, int esize) { if (esize < 0) { entry->hdr.driver_flags |= NX_DUMP_SKIP; return esize; } if (esize != entry->hdr.entry_capture_size) { entry->hdr.entry_capture_size = esize; entry->hdr.driver_flags |= NX_DUMP_SIZE_ERR; dev_info(&adapter->pdev->dev, "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n", entry->hdr.entry_type, entry->hdr.entry_capture_mask, esize, entry->hdr.entry_capture_size); dev_info(&adapter->pdev->dev, "Aborting further dump capture\n"); } return 0; } static int netxen_parse_md_template(struct netxen_adapter *adapter) { int num_of_entries, buff_level, e_cnt, esize; int end_cnt = 0, rv = 0, sane_start = 0, sane_end = 0; char *dbuff; void *template_buff = adapter->mdump.md_template; char *dump_buff = adapter->mdump.md_capture_buff; int capture_mask = adapter->mdump.md_capture_mask; struct netxen_minidump_template_hdr *template_hdr; struct netxen_minidump_entry *entry; if ((capture_mask & 0x3) != 0x3) { dev_err(&adapter->pdev->dev, "Capture mask %02x below minimum needed " "for valid firmware dump\n", capture_mask); return -EINVAL; } template_hdr = (struct netxen_minidump_template_hdr *) template_buff; num_of_entries = template_hdr->num_of_entries; entry = (struct netxen_minidump_entry *) ((char *) template_buff + template_hdr->first_entry_offset); memcpy(dump_buff, template_buff, adapter->mdump.md_template_size); dump_buff = dump_buff + adapter->mdump.md_template_size; if (template_hdr->entry_type == TLHDR) sane_start = 1; for (e_cnt = 0, buff_level = 0; e_cnt < num_of_entries; e_cnt++) { if (!(entry->hdr.entry_capture_mask & capture_mask)) { entry->hdr.driver_flags |= NX_DUMP_SKIP; entry = (struct netxen_minidump_entry *) ((char *) entry + entry->hdr.entry_size); continue; } switch (entry->hdr.entry_type) { case RDNOP: entry->hdr.driver_flags |= NX_DUMP_SKIP; break; case RDEND: entry->hdr.driver_flags |= NX_DUMP_SKIP; if (!sane_end) end_cnt = e_cnt; sane_end += 1; break; case CNTRL: rv = netxen_md_cntrl(adapter, template_hdr, (void *)entry); if (rv) entry->hdr.driver_flags |= NX_DUMP_SKIP; break; case RDCRB: dbuff = dump_buff + buff_level; esize = netxen_md_rd_crb(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; case RDMN: case RDMEM: dbuff = dump_buff + buff_level; esize = netxen_md_rdmem(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; case BOARD: case RDROM: dbuff = dump_buff + buff_level; esize = netxen_md_rdrom(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; case L2ITG: case L2DTG: case L2DAT: case L2INS: dbuff = dump_buff + buff_level; esize = netxen_md_L2Cache(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; case L1DAT: case L1INS: dbuff = dump_buff + buff_level; esize = netxen_md_L1Cache(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; case RDOCM: dbuff = dump_buff + buff_level; esize = netxen_md_rdocm(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; case RDMUX: dbuff = dump_buff + buff_level; esize = netxen_md_rdmux(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; case QUEUE: dbuff = dump_buff + buff_level; esize = netxen_md_rdqueue(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; default: entry->hdr.driver_flags |= NX_DUMP_SKIP; break; } /* Next entry in the template */ entry = (struct netxen_minidump_entry *) ((char *) entry + entry->hdr.entry_size); } if (!sane_start || sane_end > 1) { dev_err(&adapter->pdev->dev, "Firmware minidump template configuration error.\n"); } return 0; } static int netxen_collect_minidump(struct netxen_adapter *adapter) { int ret = 0; struct netxen_minidump_template_hdr *hdr; struct timespec val; hdr = (struct netxen_minidump_template_hdr *) adapter->mdump.md_template; hdr->driver_capture_mask = adapter->mdump.md_capture_mask; jiffies_to_timespec(jiffies, &val); hdr->driver_timestamp = (u32) val.tv_sec; hdr->driver_info_word2 = adapter->fw_version; hdr->driver_info_word3 = NXRD32(adapter, CRB_DRIVER_VERSION); ret = netxen_parse_md_template(adapter); if (ret) return ret; return ret; } void netxen_dump_fw(struct netxen_adapter *adapter) { struct netxen_minidump_template_hdr *hdr; int i, k, data_size = 0; u32 capture_mask; hdr = (struct netxen_minidump_template_hdr *) adapter->mdump.md_template; capture_mask = adapter->mdump.md_capture_mask; for (i = 0x2, k = 1; (i & NX_DUMP_MASK_MAX); i <<= 1, k++) { if (i & capture_mask) data_size += hdr->capture_size_array[k]; } if (!data_size) { dev_err(&adapter->pdev->dev, "Invalid cap sizes for capture_mask=0x%x\n", adapter->mdump.md_capture_mask); return; } adapter->mdump.md_capture_size = data_size; adapter->mdump.md_dump_size = adapter->mdump.md_template_size + adapter->mdump.md_capture_size; if (!adapter->mdump.md_capture_buff) { adapter->mdump.md_capture_buff = vzalloc(adapter->mdump.md_dump_size); if (!adapter->mdump.md_capture_buff) return; if (netxen_collect_minidump(adapter)) { adapter->mdump.has_valid_dump = 0; adapter->mdump.md_dump_size = 0; vfree(adapter->mdump.md_capture_buff); adapter->mdump.md_capture_buff = NULL; dev_err(&adapter->pdev->dev, "Error in collecting firmware minidump.\n"); } else { adapter->mdump.md_timestamp = jiffies; adapter->mdump.has_valid_dump = 1; adapter->fw_mdump_rdy = 1; dev_info(&adapter->pdev->dev, "%s Successfully " "collected fw dump.\n", adapter->netdev->name); } } else { dev_info(&adapter->pdev->dev, "Cannot overwrite previously collected " "firmware minidump.\n"); adapter->fw_mdump_rdy = 1; return; } }
gpl-2.0
ccyrowski/cm-kernel
drivers/i2c/busses/scx200_i2c.c
793
3464
/* linux/drivers/i2c/busses/scx200_i2c.c Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> National Semiconductor SCx200 I2C bus on GPIO pins Based on i2c-velleman.c Copyright (C) 1995-96, 2000 Simon G. Vogl This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <asm/io.h> #include <linux/scx200_gpio.h> #define NAME "scx200_i2c" MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); MODULE_DESCRIPTION("NatSemi SCx200 I2C Driver"); MODULE_LICENSE("GPL"); static int scl = CONFIG_SCx200_I2C_SCL; static int sda = CONFIG_SCx200_I2C_SDA; module_param(scl, int, 0); MODULE_PARM_DESC(scl, "GPIO line for SCL"); module_param(sda, int, 0); MODULE_PARM_DESC(sda, "GPIO line for SDA"); static void scx200_i2c_setscl(void *data, int state) { scx200_gpio_set(scl, state); } static void scx200_i2c_setsda(void *data, int state) { scx200_gpio_set(sda, state); } static int scx200_i2c_getscl(void *data) { return scx200_gpio_get(scl); } static int scx200_i2c_getsda(void *data) { return scx200_gpio_get(sda); } /* ------------------------------------------------------------------------ * Encapsulate the above functions in the correct operations structure. * This is only done when more than one hardware adapter is supported. */ static struct i2c_algo_bit_data scx200_i2c_data = { .setsda = scx200_i2c_setsda, .setscl = scx200_i2c_setscl, .getsda = scx200_i2c_getsda, .getscl = scx200_i2c_getscl, .udelay = 10, .timeout = HZ, }; static struct i2c_adapter scx200_i2c_ops = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo_data = &scx200_i2c_data, .name = "NatSemi SCx200 I2C", }; static int scx200_i2c_init(void) { pr_debug(NAME ": NatSemi SCx200 I2C Driver\n"); if (!scx200_gpio_present()) { printk(KERN_ERR NAME ": no SCx200 gpio pins available\n"); return -ENODEV; } pr_debug(NAME ": SCL=GPIO%02u, SDA=GPIO%02u\n", scl, sda); if (scl == -1 || sda == -1 || scl == sda) { printk(KERN_ERR NAME ": scl and sda must be specified\n"); return -EINVAL; } /* Configure GPIOs as open collector outputs */ scx200_gpio_configure(scl, ~2, 5); scx200_gpio_configure(sda, ~2, 5); if (i2c_bit_add_bus(&scx200_i2c_ops) < 0) { printk(KERN_ERR NAME ": adapter %s registration failed\n", scx200_i2c_ops.name); return -ENODEV; } return 0; } static void scx200_i2c_cleanup(void) { i2c_del_adapter(&scx200_i2c_ops); } module_init(scx200_i2c_init); module_exit(scx200_i2c_cleanup); /* Local variables: compile-command: "make -k -C ../.. SUBDIRS=drivers/i2c modules" c-basic-offset: 8 End: */
gpl-2.0
tsh-xx/folio100
drivers/edac/e7xxx_edac.c
793
15751
/* * Intel e7xxx Memory Controller kernel module * (C) 2003 Linux Networx (http://lnxi.com) * This file may be distributed under the terms of the * GNU General Public License. * * See "enum e7xxx_chips" below for supported chipsets * * Written by Thayne Harbaugh * Based on work by Dan Hollis <goemon at anime dot net> and others. * http://www.anime.net/~goemon/linux-ecc/ * * Contributors: * Eric Biederman (Linux Networx) * Tom Zimmerman (Linux Networx) * Jim Garlick (Lawrence Livermore National Labs) * Dave Peterson (Lawrence Livermore National Labs) * That One Guy (Some other place) * Wang Zhenyu (intel.com) * * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $ * */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> #include <linux/edac.h> #include "edac_core.h" #define E7XXX_REVISION " Ver: 2.0.2 " __DATE__ #define EDAC_MOD_STR "e7xxx_edac" #define e7xxx_printk(level, fmt, arg...) \ edac_printk(level, "e7xxx", fmt, ##arg) #define e7xxx_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg) #ifndef PCI_DEVICE_ID_INTEL_7205_0 #define PCI_DEVICE_ID_INTEL_7205_0 0x255d #endif /* PCI_DEVICE_ID_INTEL_7205_0 */ #ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR #define PCI_DEVICE_ID_INTEL_7205_1_ERR 0x2551 #endif /* PCI_DEVICE_ID_INTEL_7205_1_ERR */ #ifndef PCI_DEVICE_ID_INTEL_7500_0 #define PCI_DEVICE_ID_INTEL_7500_0 0x2540 #endif /* PCI_DEVICE_ID_INTEL_7500_0 */ #ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR #define PCI_DEVICE_ID_INTEL_7500_1_ERR 0x2541 #endif /* PCI_DEVICE_ID_INTEL_7500_1_ERR */ #ifndef PCI_DEVICE_ID_INTEL_7501_0 #define PCI_DEVICE_ID_INTEL_7501_0 0x254c #endif /* PCI_DEVICE_ID_INTEL_7501_0 */ #ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR #define PCI_DEVICE_ID_INTEL_7501_1_ERR 0x2541 #endif /* PCI_DEVICE_ID_INTEL_7501_1_ERR */ #ifndef PCI_DEVICE_ID_INTEL_7505_0 #define PCI_DEVICE_ID_INTEL_7505_0 0x2550 #endif /* PCI_DEVICE_ID_INTEL_7505_0 */ #ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR #define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551 #endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */ #define E7XXX_NR_CSROWS 8 /* number of csrows */ #define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */ /* E7XXX register addresses - device 0 function 0 */ #define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */ #define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */ /* * 31 Device width row 7 0=x8 1=x4 * 27 Device width row 6 * 23 Device width row 5 * 19 Device width row 4 * 15 Device width row 3 * 11 Device width row 2 * 7 Device width row 1 * 3 Device width row 0 */ #define E7XXX_DRC 0x7C /* DRAM controller mode reg (32b) */ /* * 22 Number channels 0=1,1=2 * 19:18 DRB Granularity 32/64MB */ #define E7XXX_TOLM 0xC4 /* DRAM top of low memory reg (16b) */ #define E7XXX_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */ #define E7XXX_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */ /* E7XXX register addresses - device 0 function 1 */ #define E7XXX_DRAM_FERR 0x80 /* DRAM first error register (8b) */ #define E7XXX_DRAM_NERR 0x82 /* DRAM next error register (8b) */ #define E7XXX_DRAM_CELOG_ADD 0xA0 /* DRAM first correctable memory */ /* error address register (32b) */ /* * 31:28 Reserved * 27:6 CE address (4k block 33:12) * 5:0 Reserved */ #define E7XXX_DRAM_UELOG_ADD 0xB0 /* DRAM first uncorrectable memory */ /* error address register (32b) */ /* * 31:28 Reserved * 27:6 CE address (4k block 33:12) * 5:0 Reserved */ #define E7XXX_DRAM_CELOG_SYNDROME 0xD0 /* DRAM first correctable memory */ /* error syndrome register (16b) */ enum e7xxx_chips { E7500 = 0, E7501, E7505, E7205, }; struct e7xxx_pvt { struct pci_dev *bridge_ck; u32 tolm; u32 remapbase; u32 remaplimit; const struct e7xxx_dev_info *dev_info; }; struct e7xxx_dev_info { u16 err_dev; const char *ctl_name; }; struct e7xxx_error_info { u8 dram_ferr; u8 dram_nerr; u32 dram_celog_add; u16 dram_celog_syndrome; u32 dram_uelog_add; }; static struct edac_pci_ctl_info *e7xxx_pci; static const struct e7xxx_dev_info e7xxx_devs[] = { [E7500] = { .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR, .ctl_name = "E7500"}, [E7501] = { .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR, .ctl_name = "E7501"}, [E7505] = { .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR, .ctl_name = "E7505"}, [E7205] = { .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR, .ctl_name = "E7205"}, }; /* FIXME - is this valid for both SECDED and S4ECD4ED? */ static inline int e7xxx_find_channel(u16 syndrome) { debugf3("%s()\n", __func__); if ((syndrome & 0xff00) == 0) return 0; if ((syndrome & 0x00ff) == 0) return 1; if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0) return 0; return 1; } static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, unsigned long page) { u32 remap; struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info; debugf3("%s()\n", __func__); if ((page < pvt->tolm) || ((page >= 0x100000) && (page < pvt->remapbase))) return page; remap = (page - pvt->tolm) + pvt->remapbase; if (remap < pvt->remaplimit) return remap; e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page); return pvt->tolm - 1; } static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info) { u32 error_1b, page; u16 syndrome; int row; int channel; debugf3("%s()\n", __func__); /* read the error address */ error_1b = info->dram_celog_add; /* FIXME - should use PAGE_SHIFT */ page = error_1b >> 6; /* convert the address to 4k page */ /* read the syndrome */ syndrome = info->dram_celog_syndrome; /* FIXME - check for -1 */ row = edac_mc_find_csrow_by_page(mci, page); /* convert syndrome to channel */ channel = e7xxx_find_channel(syndrome); edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE"); } static void process_ce_no_info(struct mem_ctl_info *mci) { debugf3("%s()\n", __func__); edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow"); } static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) { u32 error_2b, block_page; int row; debugf3("%s()\n", __func__); /* read the error address */ error_2b = info->dram_uelog_add; /* FIXME - should use PAGE_SHIFT */ block_page = error_2b >> 6; /* convert to 4k address */ row = edac_mc_find_csrow_by_page(mci, block_page); edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE"); } static void process_ue_no_info(struct mem_ctl_info *mci) { debugf3("%s()\n", __func__); edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow"); } static void e7xxx_get_error_info(struct mem_ctl_info *mci, struct e7xxx_error_info *info) { struct e7xxx_pvt *pvt; pvt = (struct e7xxx_pvt *)mci->pvt_info; pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, &info->dram_ferr); pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, &info->dram_nerr); if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) { pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD, &info->dram_celog_add); pci_read_config_word(pvt->bridge_ck, E7XXX_DRAM_CELOG_SYNDROME, &info->dram_celog_syndrome); } if ((info->dram_ferr & 2) || (info->dram_nerr & 2)) pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD, &info->dram_uelog_add); if (info->dram_ferr & 3) pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03); if (info->dram_nerr & 3) pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03); } static int e7xxx_process_error_info(struct mem_ctl_info *mci, struct e7xxx_error_info *info, int handle_errors) { int error_found; error_found = 0; /* decode and report errors */ if (info->dram_ferr & 1) { /* check first error correctable */ error_found = 1; if (handle_errors) process_ce(mci, info); } if (info->dram_ferr & 2) { /* check first error uncorrectable */ error_found = 1; if (handle_errors) process_ue(mci, info); } if (info->dram_nerr & 1) { /* check next error correctable */ error_found = 1; if (handle_errors) { if (info->dram_ferr & 1) process_ce_no_info(mci); else process_ce(mci, info); } } if (info->dram_nerr & 2) { /* check next error uncorrectable */ error_found = 1; if (handle_errors) { if (info->dram_ferr & 2) process_ue_no_info(mci); else process_ue(mci, info); } } return error_found; } static void e7xxx_check(struct mem_ctl_info *mci) { struct e7xxx_error_info info; debugf3("%s()\n", __func__); e7xxx_get_error_info(mci, &info); e7xxx_process_error_info(mci, &info, 1); } /* Return 1 if dual channel mode is active. Else return 0. */ static inline int dual_channel_active(u32 drc, int dev_idx) { return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1; } /* Return DRB granularity (0=32mb, 1=64mb). */ static inline int drb_granularity(u32 drc, int dev_idx) { /* only e7501 can be single channel */ return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1; } static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, int dev_idx, u32 drc) { unsigned long last_cumul_size; int index; u8 value; u32 dra, cumul_size; int drc_chan, drc_drbg, drc_ddim, mem_dev; struct csrow_info *csrow; pci_read_config_dword(pdev, E7XXX_DRA, &dra); drc_chan = dual_channel_active(drc, dev_idx); drc_drbg = drb_granularity(drc, dev_idx); drc_ddim = (drc >> 20) & 0x3; last_cumul_size = 0; /* The dram row boundary (DRB) reg values are boundary address * for each DRAM row with a granularity of 32 or 64MB (single/dual * channel operation). DRB regs are cumulative; therefore DRB7 will * contain the total memory contained in all eight rows. */ for (index = 0; index < mci->nr_csrows; index++) { /* mem_dev 0=x8, 1=x4 */ mem_dev = (dra >> (index * 4 + 3)) & 0x1; csrow = &mci->csrows[index]; pci_read_config_byte(pdev, E7XXX_DRB + index, &value); /* convert a 64 or 32 MiB DRB to a page size. */ cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, cumul_size); if (cumul_size == last_cumul_size) continue; /* not populated */ csrow->first_page = last_cumul_size; csrow->last_page = cumul_size - 1; csrow->nr_pages = cumul_size - last_cumul_size; last_cumul_size = cumul_size; csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ csrow->mtype = MEM_RDDR; /* only one type supported */ csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; /* * if single channel or x8 devices then SECDED * if dual channel and x4 then S4ECD4ED */ if (drc_ddim) { if (drc_chan && mem_dev) { csrow->edac_mode = EDAC_S4ECD4ED; mci->edac_cap |= EDAC_FLAG_S4ECD4ED; } else { csrow->edac_mode = EDAC_SECDED; mci->edac_cap |= EDAC_FLAG_SECDED; } } else csrow->edac_mode = EDAC_NONE; } } static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) { u16 pci_data; struct mem_ctl_info *mci = NULL; struct e7xxx_pvt *pvt = NULL; u32 drc; int drc_chan; struct e7xxx_error_info discard; debugf0("%s(): mci\n", __func__); pci_read_config_dword(pdev, E7XXX_DRC, &drc); drc_chan = dual_channel_active(drc, dev_idx); mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0); if (mci == NULL) return -ENOMEM; debugf3("%s(): init mci\n", __func__); mci->mtype_cap = MEM_FLAG_RDDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED; /* FIXME - what if different memory types are in different csrows? */ mci->mod_name = EDAC_MOD_STR; mci->mod_ver = E7XXX_REVISION; mci->dev = &pdev->dev; debugf3("%s(): init pvt\n", __func__); pvt = (struct e7xxx_pvt *)mci->pvt_info; pvt->dev_info = &e7xxx_devs[dev_idx]; pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, pvt->dev_info->err_dev, pvt->bridge_ck); if (!pvt->bridge_ck) { e7xxx_printk(KERN_ERR, "error reporting device not found:" "vendor %x device 0x%x (broken BIOS?)\n", PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev); goto fail0; } debugf3("%s(): more mci init\n", __func__); mci->ctl_name = pvt->dev_info->ctl_name; mci->dev_name = pci_name(pdev); mci->edac_check = e7xxx_check; mci->ctl_page_to_phys = ctl_page_to_phys; e7xxx_init_csrows(mci, pdev, dev_idx, drc); mci->edac_cap |= EDAC_FLAG_NONE; debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); /* load the top of low memory, remap base, and remap limit vars */ pci_read_config_word(pdev, E7XXX_TOLM, &pci_data); pvt->tolm = ((u32) pci_data) << 4; pci_read_config_word(pdev, E7XXX_REMAPBASE, &pci_data); pvt->remapbase = ((u32) pci_data) << 14; pci_read_config_word(pdev, E7XXX_REMAPLIMIT, &pci_data); pvt->remaplimit = ((u32) pci_data) << 14; e7xxx_printk(KERN_INFO, "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, pvt->remapbase, pvt->remaplimit); /* clear any pending errors, or initial state bits */ e7xxx_get_error_info(mci, &discard); /* Here we assume that we will never see multiple instances of this * type of memory controller. The ID is therefore hardcoded to 0. */ if (edac_mc_add_mc(mci)) { debugf3("%s(): failed edac_mc_add_mc()\n", __func__); goto fail1; } /* allocating generic PCI control info */ e7xxx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); if (!e7xxx_pci) { printk(KERN_WARNING "%s(): Unable to create PCI control\n", __func__); printk(KERN_WARNING "%s(): PCI error report via EDAC not setup\n", __func__); } /* get this far and it's successful */ debugf3("%s(): success\n", __func__); return 0; fail1: pci_dev_put(pvt->bridge_ck); fail0: edac_mc_free(mci); return -ENODEV; } /* returns count (>= 0), or negative on error */ static int __devinit e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { debugf0("%s()\n", __func__); /* wake up and enable device */ return pci_enable_device(pdev) ? -EIO : e7xxx_probe1(pdev, ent->driver_data); } static void __devexit e7xxx_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; struct e7xxx_pvt *pvt; debugf0("%s()\n", __func__); if (e7xxx_pci) edac_pci_release_generic_ctl(e7xxx_pci); if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) return; pvt = (struct e7xxx_pvt *)mci->pvt_info; pci_dev_put(pvt->bridge_ck); edac_mc_free(mci); } static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = { { PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, E7205}, { PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, E7500}, { PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, E7501}, { PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, E7505}, { 0, } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl); static struct pci_driver e7xxx_driver = { .name = EDAC_MOD_STR, .probe = e7xxx_init_one, .remove = __devexit_p(e7xxx_remove_one), .id_table = e7xxx_pci_tbl, }; static int __init e7xxx_init(void) { /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); return pci_register_driver(&e7xxx_driver); } static void __exit e7xxx_exit(void) { pci_unregister_driver(&e7xxx_driver); } module_init(e7xxx_init); module_exit(e7xxx_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" "Based on.work by Dan Hollis et al"); MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers"); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
teamacid/android_kernel_teamacid
drivers/staging/rt2860/sta/auth.c
1049
16550
/* ************************************************************************* * Ralink Tech Inc. * 5F., No.36, Taiyuan St., Jhubei City, * Hsinchu County 302, * Taiwan, R.O.C. * * (c) Copyright 2002-2007, Ralink Technology, Inc. * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * ************************************************************************* Module Name: auth.c Abstract: Revision History: Who When What -------- ---------- ---------------------------------------------- John 2004-9-3 porting from RT2500 */ #include "../rt_config.h" /* ========================================================================== Description: authenticate state machine init, including state transition and timer init Parameters: Sm - pointer to the auth state machine Note: The state machine looks like this AUTH_REQ_IDLE AUTH_WAIT_SEQ2 AUTH_WAIT_SEQ4 MT2_MLME_AUTH_REQ mlme_auth_req_action invalid_state_when_auth invalid_state_when_auth MT2_PEER_AUTH_EVEN drop peer_auth_even_at_seq2_action peer_auth_even_at_seq4_action MT2_AUTH_TIMEOUT Drop auth_timeout_action auth_timeout_action IRQL = PASSIVE_LEVEL ========================================================================== */ void AuthStateMachineInit(struct rt_rtmp_adapter *pAd, struct rt_state_machine *Sm, OUT STATE_MACHINE_FUNC Trans[]) { StateMachineInit(Sm, Trans, MAX_AUTH_STATE, MAX_AUTH_MSG, (STATE_MACHINE_FUNC) Drop, AUTH_REQ_IDLE, AUTH_MACHINE_BASE); /* the first column */ StateMachineSetAction(Sm, AUTH_REQ_IDLE, MT2_MLME_AUTH_REQ, (STATE_MACHINE_FUNC) MlmeAuthReqAction); /* the second column */ StateMachineSetAction(Sm, AUTH_WAIT_SEQ2, MT2_MLME_AUTH_REQ, (STATE_MACHINE_FUNC) InvalidStateWhenAuth); StateMachineSetAction(Sm, AUTH_WAIT_SEQ2, MT2_PEER_AUTH_EVEN, (STATE_MACHINE_FUNC) PeerAuthRspAtSeq2Action); StateMachineSetAction(Sm, AUTH_WAIT_SEQ2, MT2_AUTH_TIMEOUT, (STATE_MACHINE_FUNC) AuthTimeoutAction); /* the third column */ StateMachineSetAction(Sm, AUTH_WAIT_SEQ4, MT2_MLME_AUTH_REQ, (STATE_MACHINE_FUNC) InvalidStateWhenAuth); StateMachineSetAction(Sm, AUTH_WAIT_SEQ4, MT2_PEER_AUTH_EVEN, (STATE_MACHINE_FUNC) PeerAuthRspAtSeq4Action); StateMachineSetAction(Sm, AUTH_WAIT_SEQ4, MT2_AUTH_TIMEOUT, (STATE_MACHINE_FUNC) AuthTimeoutAction); RTMPInitTimer(pAd, &pAd->MlmeAux.AuthTimer, GET_TIMER_FUNCTION(AuthTimeout), pAd, FALSE); } /* ========================================================================== Description: function to be executed at timer thread when auth timer expires IRQL = DISPATCH_LEVEL ========================================================================== */ void AuthTimeout(void *SystemSpecific1, void *FunctionContext, void *SystemSpecific2, void *SystemSpecific3) { struct rt_rtmp_adapter *pAd = (struct rt_rtmp_adapter *)FunctionContext; DBGPRINT(RT_DEBUG_TRACE, ("AUTH - AuthTimeout\n")); /* Do nothing if the driver is starting halt state. */ /* This might happen when timer already been fired before cancel timer with mlmehalt */ if (RTMP_TEST_FLAG (pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS | fRTMP_ADAPTER_NIC_NOT_EXIST)) return; /* send a de-auth to reset AP's state machine (Patch AP-Dir635) */ if (pAd->Mlme.AuthMachine.CurrState == AUTH_WAIT_SEQ2) Cls2errAction(pAd, pAd->MlmeAux.Bssid); MlmeEnqueue(pAd, AUTH_STATE_MACHINE, MT2_AUTH_TIMEOUT, 0, NULL); RTMP_MLME_HANDLER(pAd); } /* ========================================================================== Description: IRQL = DISPATCH_LEVEL ========================================================================== */ void MlmeAuthReqAction(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem *Elem) { if (AUTH_ReqSend (pAd, Elem, &pAd->MlmeAux.AuthTimer, "AUTH", 1, NULL, 0)) pAd->Mlme.AuthMachine.CurrState = AUTH_WAIT_SEQ2; else { u16 Status; pAd->Mlme.AuthMachine.CurrState = AUTH_REQ_IDLE; Status = MLME_INVALID_FORMAT; MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_AUTH_CONF, 2, &Status); } } /* ========================================================================== Description: IRQL = DISPATCH_LEVEL ========================================================================== */ void PeerAuthRspAtSeq2Action(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem *Elem) { u8 Addr2[MAC_ADDR_LEN]; u16 Seq, Status, RemoteStatus, Alg; u8 ChlgText[CIPHER_TEXT_LEN]; u8 CyperChlgText[CIPHER_TEXT_LEN + 8 + 8]; u8 Element[2]; struct rt_header_802_11 AuthHdr; BOOLEAN TimerCancelled; u8 *pOutBuffer = NULL; int NStatus; unsigned long FrameLen = 0; u16 Status2; if (PeerAuthSanity (pAd, Elem->Msg, Elem->MsgLen, Addr2, &Alg, &Seq, &Status, (char *)ChlgText)) { if (MAC_ADDR_EQUAL(pAd->MlmeAux.Bssid, Addr2) && Seq == 2) { DBGPRINT(RT_DEBUG_TRACE, ("AUTH - Receive AUTH_RSP seq#2 to me (Alg=%d, Status=%d)\n", Alg, Status)); RTMPCancelTimer(&pAd->MlmeAux.AuthTimer, &TimerCancelled); if (Status == MLME_SUCCESS) { /* Authentication Mode "LEAP" has allow for CCX 1.X */ if (pAd->MlmeAux.Alg == Ndis802_11AuthModeOpen) { pAd->Mlme.AuthMachine.CurrState = AUTH_REQ_IDLE; MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_AUTH_CONF, 2, &Status); } else { /* 2. shared key, need to be challenged */ Seq++; RemoteStatus = MLME_SUCCESS; /* Get an unused nonpaged memory */ NStatus = MlmeAllocateMemory(pAd, &pOutBuffer); if (NStatus != NDIS_STATUS_SUCCESS) { DBGPRINT(RT_DEBUG_TRACE, ("AUTH - PeerAuthRspAtSeq2Action() allocate memory fail\n")); pAd->Mlme.AuthMachine. CurrState = AUTH_REQ_IDLE; Status2 = MLME_FAIL_NO_RESOURCE; MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_AUTH_CONF, 2, &Status2); return; } DBGPRINT(RT_DEBUG_TRACE, ("AUTH - Send AUTH request seq#3...\n")); MgtMacHeaderInit(pAd, &AuthHdr, SUBTYPE_AUTH, 0, Addr2, pAd->MlmeAux.Bssid); AuthHdr.FC.Wep = 1; /* Encrypt challenge text & auth information */ RTMPInitWepEngine(pAd, pAd-> SharedKey[BSS0][pAd-> StaCfg. DefaultKeyId]. Key, pAd->StaCfg. DefaultKeyId, pAd-> SharedKey[BSS0][pAd-> StaCfg. DefaultKeyId]. KeyLen, CyperChlgText); Alg = cpu2le16(*(u16 *) & Alg); Seq = cpu2le16(*(u16 *) & Seq); RemoteStatus = cpu2le16(*(u16 *) & RemoteStatus); RTMPEncryptData(pAd, (u8 *)& Alg, CyperChlgText + 4, 2); RTMPEncryptData(pAd, (u8 *)& Seq, CyperChlgText + 6, 2); RTMPEncryptData(pAd, (u8 *)& RemoteStatus, CyperChlgText + 8, 2); Element[0] = 16; Element[1] = 128; RTMPEncryptData(pAd, Element, CyperChlgText + 10, 2); RTMPEncryptData(pAd, ChlgText, CyperChlgText + 12, 128); RTMPSetICV(pAd, CyperChlgText + 140); MakeOutgoingFrame(pOutBuffer, &FrameLen, sizeof(struct rt_header_802_11), &AuthHdr, CIPHER_TEXT_LEN + 16, CyperChlgText, END_OF_ARGS); MiniportMMRequest(pAd, 0, pOutBuffer, FrameLen); MlmeFreeMemory(pAd, pOutBuffer); RTMPSetTimer(&pAd->MlmeAux.AuthTimer, AUTH_TIMEOUT); pAd->Mlme.AuthMachine.CurrState = AUTH_WAIT_SEQ4; } } else { pAd->StaCfg.AuthFailReason = Status; COPY_MAC_ADDR(pAd->StaCfg.AuthFailSta, Addr2); pAd->Mlme.AuthMachine.CurrState = AUTH_REQ_IDLE; MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_AUTH_CONF, 2, &Status); } } } else { DBGPRINT(RT_DEBUG_TRACE, ("AUTH - PeerAuthSanity() sanity check fail\n")); } } /* ========================================================================== Description: IRQL = DISPATCH_LEVEL ========================================================================== */ void PeerAuthRspAtSeq4Action(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem *Elem) { u8 Addr2[MAC_ADDR_LEN]; u16 Alg, Seq, Status; char ChlgText[CIPHER_TEXT_LEN]; BOOLEAN TimerCancelled; if (PeerAuthSanity (pAd, Elem->Msg, Elem->MsgLen, Addr2, &Alg, &Seq, &Status, ChlgText)) { if (MAC_ADDR_EQUAL(pAd->MlmeAux.Bssid, Addr2) && Seq == 4) { DBGPRINT(RT_DEBUG_TRACE, ("AUTH - Receive AUTH_RSP seq#4 to me\n")); RTMPCancelTimer(&pAd->MlmeAux.AuthTimer, &TimerCancelled); if (Status != MLME_SUCCESS) { pAd->StaCfg.AuthFailReason = Status; COPY_MAC_ADDR(pAd->StaCfg.AuthFailSta, Addr2); } pAd->Mlme.AuthMachine.CurrState = AUTH_REQ_IDLE; MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_AUTH_CONF, 2, &Status); } } else { DBGPRINT(RT_DEBUG_TRACE, ("AUTH - PeerAuthRspAtSeq4Action() sanity check fail\n")); } } /* ========================================================================== Description: IRQL = DISPATCH_LEVEL ========================================================================== */ void MlmeDeauthReqAction(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem *Elem) { struct rt_mlme_deauth_req *pInfo; struct rt_header_802_11 DeauthHdr; u8 *pOutBuffer = NULL; int NStatus; unsigned long FrameLen = 0; u16 Status; pInfo = (struct rt_mlme_deauth_req *)Elem->Msg; NStatus = MlmeAllocateMemory(pAd, &pOutBuffer); /*Get an unused nonpaged memory */ if (NStatus != NDIS_STATUS_SUCCESS) { DBGPRINT(RT_DEBUG_TRACE, ("AUTH - MlmeDeauthReqAction() allocate memory fail\n")); pAd->Mlme.AuthMachine.CurrState = AUTH_REQ_IDLE; Status = MLME_FAIL_NO_RESOURCE; MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_DEAUTH_CONF, 2, &Status); return; } DBGPRINT(RT_DEBUG_TRACE, ("AUTH - Send DE-AUTH request (Reason=%d)...\n", pInfo->Reason)); MgtMacHeaderInit(pAd, &DeauthHdr, SUBTYPE_DEAUTH, 0, pInfo->Addr, pAd->MlmeAux.Bssid); MakeOutgoingFrame(pOutBuffer, &FrameLen, sizeof(struct rt_header_802_11), &DeauthHdr, 2, &pInfo->Reason, END_OF_ARGS); MiniportMMRequest(pAd, 0, pOutBuffer, FrameLen); MlmeFreeMemory(pAd, pOutBuffer); pAd->StaCfg.DeauthReason = pInfo->Reason; COPY_MAC_ADDR(pAd->StaCfg.DeauthSta, pInfo->Addr); pAd->Mlme.AuthMachine.CurrState = AUTH_REQ_IDLE; Status = MLME_SUCCESS; MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_DEAUTH_CONF, 2, &Status); /* send wireless event - for deauthentication */ if (pAd->CommonCfg.bWirelessEvent) RTMPSendWirelessEvent(pAd, IW_DEAUTH_EVENT_FLAG, pAd->MacTab.Content[BSSID_WCID].Addr, BSS0, 0); } /* ========================================================================== Description: IRQL = DISPATCH_LEVEL ========================================================================== */ void AuthTimeoutAction(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem *Elem) { u16 Status; DBGPRINT(RT_DEBUG_TRACE, ("AUTH - AuthTimeoutAction\n")); pAd->Mlme.AuthMachine.CurrState = AUTH_REQ_IDLE; Status = MLME_REJ_TIMEOUT; MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_AUTH_CONF, 2, &Status); } /* ========================================================================== Description: IRQL = DISPATCH_LEVEL ========================================================================== */ void InvalidStateWhenAuth(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem *Elem) { u16 Status; DBGPRINT(RT_DEBUG_TRACE, ("AUTH - InvalidStateWhenAuth (state=%ld), reset AUTH state machine\n", pAd->Mlme.AuthMachine.CurrState)); pAd->Mlme.AuthMachine.CurrState = AUTH_REQ_IDLE; Status = MLME_STATE_MACHINE_REJECT; MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_AUTH_CONF, 2, &Status); } /* ========================================================================== Description: Some STA/AP Note: This action should never trigger AUTH state transition, therefore we separate it from AUTH state machine, and make it as a standalone service IRQL = DISPATCH_LEVEL ========================================================================== */ void Cls2errAction(struct rt_rtmp_adapter *pAd, u8 *pAddr) { struct rt_header_802_11 DeauthHdr; u8 *pOutBuffer = NULL; int NStatus; unsigned long FrameLen = 0; u16 Reason = REASON_CLS2ERR; NStatus = MlmeAllocateMemory(pAd, &pOutBuffer); /*Get an unused nonpaged memory */ if (NStatus != NDIS_STATUS_SUCCESS) return; DBGPRINT(RT_DEBUG_TRACE, ("AUTH - Class 2 error, Send DEAUTH frame...\n")); MgtMacHeaderInit(pAd, &DeauthHdr, SUBTYPE_DEAUTH, 0, pAddr, pAd->MlmeAux.Bssid); MakeOutgoingFrame(pOutBuffer, &FrameLen, sizeof(struct rt_header_802_11), &DeauthHdr, 2, &Reason, END_OF_ARGS); MiniportMMRequest(pAd, 0, pOutBuffer, FrameLen); MlmeFreeMemory(pAd, pOutBuffer); pAd->StaCfg.DeauthReason = Reason; COPY_MAC_ADDR(pAd->StaCfg.DeauthSta, pAddr); } BOOLEAN AUTH_ReqSend(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem *pElem, struct rt_ralink_timer *pAuthTimer, char *pSMName, u16 SeqNo, u8 *pNewElement, unsigned long ElementLen) { u16 Alg, Seq, Status; u8 Addr[6]; unsigned long Timeout; struct rt_header_802_11 AuthHdr; BOOLEAN TimerCancelled; int NStatus; u8 *pOutBuffer = NULL; unsigned long FrameLen = 0, tmp = 0; /* Block all authentication request durning WPA block period */ if (pAd->StaCfg.bBlockAssoc == TRUE) { DBGPRINT(RT_DEBUG_TRACE, ("%s - Block Auth request durning WPA block period!\n", pSMName)); pAd->Mlme.AuthMachine.CurrState = AUTH_REQ_IDLE; Status = MLME_STATE_MACHINE_REJECT; MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_AUTH_CONF, 2, &Status); } else if (MlmeAuthReqSanity (pAd, pElem->Msg, pElem->MsgLen, Addr, &Timeout, &Alg)) { /* reset timer */ RTMPCancelTimer(pAuthTimer, &TimerCancelled); COPY_MAC_ADDR(pAd->MlmeAux.Bssid, Addr); pAd->MlmeAux.Alg = Alg; Seq = SeqNo; Status = MLME_SUCCESS; NStatus = MlmeAllocateMemory(pAd, &pOutBuffer); /*Get an unused nonpaged memory */ if (NStatus != NDIS_STATUS_SUCCESS) { DBGPRINT(RT_DEBUG_TRACE, ("%s - MlmeAuthReqAction(Alg:%d) allocate memory failed\n", pSMName, Alg)); pAd->Mlme.AuthMachine.CurrState = AUTH_REQ_IDLE; Status = MLME_FAIL_NO_RESOURCE; MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_AUTH_CONF, 2, &Status); return FALSE; } DBGPRINT(RT_DEBUG_TRACE, ("%s - Send AUTH request seq#1 (Alg=%d)...\n", pSMName, Alg)); MgtMacHeaderInit(pAd, &AuthHdr, SUBTYPE_AUTH, 0, Addr, pAd->MlmeAux.Bssid); MakeOutgoingFrame(pOutBuffer, &FrameLen, sizeof(struct rt_header_802_11), &AuthHdr, 2, &Alg, 2, &Seq, 2, &Status, END_OF_ARGS); if (pNewElement && ElementLen) { MakeOutgoingFrame(pOutBuffer + FrameLen, &tmp, ElementLen, pNewElement, END_OF_ARGS); FrameLen += tmp; } MiniportMMRequest(pAd, 0, pOutBuffer, FrameLen); MlmeFreeMemory(pAd, pOutBuffer); RTMPSetTimer(pAuthTimer, Timeout); return TRUE; } else { DBGPRINT_ERR(("%s - MlmeAuthReqAction() sanity check failed\n", pSMName)); return FALSE; } return TRUE; }
gpl-2.0
aksalj/kernel_rpi
drivers/firewire/core-transaction.c
1305
36226
/* * Core IEEE1394 transaction logic * * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/bug.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/idr.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/rculist.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/workqueue.h> #include <asm/byteorder.h> #include "core.h" #define HEADER_PRI(pri) ((pri) << 0) #define HEADER_TCODE(tcode) ((tcode) << 4) #define HEADER_RETRY(retry) ((retry) << 8) #define HEADER_TLABEL(tlabel) ((tlabel) << 10) #define HEADER_DESTINATION(destination) ((destination) << 16) #define HEADER_SOURCE(source) ((source) << 16) #define HEADER_RCODE(rcode) ((rcode) << 12) #define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0) #define HEADER_DATA_LENGTH(length) ((length) << 16) #define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0) #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) #define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f) #define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f) #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) #define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff) #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) #define HEADER_DESTINATION_IS_BROADCAST(q) \ (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f)) #define PHY_PACKET_CONFIG 0x0 #define PHY_PACKET_LINK_ON 0x1 #define PHY_PACKET_SELF_ID 0x2 #define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22)) #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) #define PHY_IDENTIFIER(id) ((id) << 30) /* returns 0 if the split timeout handler is already running */ static int try_cancel_split_timeout(struct fw_transaction *t) { if (t->is_split_transaction) return del_timer(&t->split_timeout_timer); else return 1; } static int close_transaction(struct fw_transaction *transaction, struct fw_card *card, int rcode) { struct fw_transaction *t; unsigned long flags; spin_lock_irqsave(&card->lock, flags); list_for_each_entry(t, &card->transaction_list, link) { if (t == transaction) { if (!try_cancel_split_timeout(t)) { spin_unlock_irqrestore(&card->lock, flags); goto timed_out; } list_del_init(&t->link); card->tlabel_mask &= ~(1ULL << t->tlabel); break; } } spin_unlock_irqrestore(&card->lock, flags); if (&t->link != &card->transaction_list) { t->callback(card, rcode, NULL, 0, t->callback_data); return 0; } timed_out: return -ENOENT; } /* * Only valid for transactions that are potentially pending (ie have * been sent). */ int fw_cancel_transaction(struct fw_card *card, struct fw_transaction *transaction) { /* * Cancel the packet transmission if it's still queued. That * will call the packet transmission callback which cancels * the transaction. */ if (card->driver->cancel_packet(card, &transaction->packet) == 0) return 0; /* * If the request packet has already been sent, we need to see * if the transaction is still pending and remove it in that case. */ return close_transaction(transaction, card, RCODE_CANCELLED); } EXPORT_SYMBOL(fw_cancel_transaction); static void split_transaction_timeout_callback(unsigned long data) { struct fw_transaction *t = (struct fw_transaction *)data; struct fw_card *card = t->card; unsigned long flags; spin_lock_irqsave(&card->lock, flags); if (list_empty(&t->link)) { spin_unlock_irqrestore(&card->lock, flags); return; } list_del(&t->link); card->tlabel_mask &= ~(1ULL << t->tlabel); spin_unlock_irqrestore(&card->lock, flags); t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); } static void start_split_transaction_timeout(struct fw_transaction *t, struct fw_card *card) { unsigned long flags; spin_lock_irqsave(&card->lock, flags); if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) { spin_unlock_irqrestore(&card->lock, flags); return; } t->is_split_transaction = true; mod_timer(&t->split_timeout_timer, jiffies + card->split_timeout_jiffies); spin_unlock_irqrestore(&card->lock, flags); } static void transmit_complete_callback(struct fw_packet *packet, struct fw_card *card, int status) { struct fw_transaction *t = container_of(packet, struct fw_transaction, packet); switch (status) { case ACK_COMPLETE: close_transaction(t, card, RCODE_COMPLETE); break; case ACK_PENDING: start_split_transaction_timeout(t, card); break; case ACK_BUSY_X: case ACK_BUSY_A: case ACK_BUSY_B: close_transaction(t, card, RCODE_BUSY); break; case ACK_DATA_ERROR: close_transaction(t, card, RCODE_DATA_ERROR); break; case ACK_TYPE_ERROR: close_transaction(t, card, RCODE_TYPE_ERROR); break; default: /* * In this case the ack is really a juju specific * rcode, so just forward that to the callback. */ close_transaction(t, card, status); break; } } static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, int destination_id, int source_id, int generation, int speed, unsigned long long offset, void *payload, size_t length) { int ext_tcode; if (tcode == TCODE_STREAM_DATA) { packet->header[0] = HEADER_DATA_LENGTH(length) | destination_id | HEADER_TCODE(TCODE_STREAM_DATA); packet->header_length = 4; packet->payload = payload; packet->payload_length = length; goto common; } if (tcode > 0x10) { ext_tcode = tcode & ~0x10; tcode = TCODE_LOCK_REQUEST; } else ext_tcode = 0; packet->header[0] = HEADER_RETRY(RETRY_X) | HEADER_TLABEL(tlabel) | HEADER_TCODE(tcode) | HEADER_DESTINATION(destination_id); packet->header[1] = HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id); packet->header[2] = offset; switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: packet->header[3] = *(u32 *)payload; packet->header_length = 16; packet->payload_length = 0; break; case TCODE_LOCK_REQUEST: case TCODE_WRITE_BLOCK_REQUEST: packet->header[3] = HEADER_DATA_LENGTH(length) | HEADER_EXTENDED_TCODE(ext_tcode); packet->header_length = 16; packet->payload = payload; packet->payload_length = length; break; case TCODE_READ_QUADLET_REQUEST: packet->header_length = 12; packet->payload_length = 0; break; case TCODE_READ_BLOCK_REQUEST: packet->header[3] = HEADER_DATA_LENGTH(length) | HEADER_EXTENDED_TCODE(ext_tcode); packet->header_length = 16; packet->payload_length = 0; break; default: WARN(1, "wrong tcode %d\n", tcode); } common: packet->speed = speed; packet->generation = generation; packet->ack = 0; packet->payload_mapped = false; } static int allocate_tlabel(struct fw_card *card) { int tlabel; tlabel = card->current_tlabel; while (card->tlabel_mask & (1ULL << tlabel)) { tlabel = (tlabel + 1) & 0x3f; if (tlabel == card->current_tlabel) return -EBUSY; } card->current_tlabel = (tlabel + 1) & 0x3f; card->tlabel_mask |= 1ULL << tlabel; return tlabel; } /** * fw_send_request() - submit a request packet for transmission * @card: interface to send the request at * @t: transaction instance to which the request belongs * @tcode: transaction code * @destination_id: destination node ID, consisting of bus_ID and phy_ID * @generation: bus generation in which request and response are valid * @speed: transmission speed * @offset: 48bit wide offset into destination's address space * @payload: data payload for the request subaction * @length: length of the payload, in bytes * @callback: function to be called when the transaction is completed * @callback_data: data to be passed to the transaction completion callback * * Submit a request packet into the asynchronous request transmission queue. * Can be called from atomic context. If you prefer a blocking API, use * fw_run_transaction() in a context that can sleep. * * In case of lock requests, specify one of the firewire-core specific %TCODE_ * constants instead of %TCODE_LOCK_REQUEST in @tcode. * * Make sure that the value in @destination_id is not older than the one in * @generation. Otherwise the request is in danger to be sent to a wrong node. * * In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller * needs to synthesize @destination_id with fw_stream_packet_destination_id(). * It will contain tag, channel, and sy data instead of a node ID then. * * The payload buffer at @data is going to be DMA-mapped except in case of * @length <= 8 or of local (loopback) requests. Hence make sure that the * buffer complies with the restrictions of the streaming DMA mapping API. * @payload must not be freed before the @callback is called. * * In case of request types without payload, @data is NULL and @length is 0. * * After the transaction is completed successfully or unsuccessfully, the * @callback will be called. Among its parameters is the response code which * is either one of the rcodes per IEEE 1394 or, in case of internal errors, * the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core * specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION, * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request * generation, or missing ACK respectively. * * Note some timing corner cases: fw_send_request() may complete much earlier * than when the request packet actually hits the wire. On the other hand, * transaction completion and hence execution of @callback may happen even * before fw_send_request() returns. */ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, int destination_id, int generation, int speed, unsigned long long offset, void *payload, size_t length, fw_transaction_callback_t callback, void *callback_data) { unsigned long flags; int tlabel; /* * Allocate tlabel from the bitmap and put the transaction on * the list while holding the card spinlock. */ spin_lock_irqsave(&card->lock, flags); tlabel = allocate_tlabel(card); if (tlabel < 0) { spin_unlock_irqrestore(&card->lock, flags); callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data); return; } t->node_id = destination_id; t->tlabel = tlabel; t->card = card; t->is_split_transaction = false; setup_timer(&t->split_timeout_timer, split_transaction_timeout_callback, (unsigned long)t); t->callback = callback; t->callback_data = callback_data; fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id, generation, speed, offset, payload, length); t->packet.callback = transmit_complete_callback; list_add_tail(&t->link, &card->transaction_list); spin_unlock_irqrestore(&card->lock, flags); card->driver->send_request(card, &t->packet); } EXPORT_SYMBOL(fw_send_request); struct transaction_callback_data { struct completion done; void *payload; int rcode; }; static void transaction_callback(struct fw_card *card, int rcode, void *payload, size_t length, void *data) { struct transaction_callback_data *d = data; if (rcode == RCODE_COMPLETE) memcpy(d->payload, payload, length); d->rcode = rcode; complete(&d->done); } /** * fw_run_transaction() - send request and sleep until transaction is completed * * Returns the RCODE. See fw_send_request() for parameter documentation. * Unlike fw_send_request(), @data points to the payload of the request or/and * to the payload of the response. DMA mapping restrictions apply to outbound * request payloads of >= 8 bytes but not to inbound response payloads. */ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, int generation, int speed, unsigned long long offset, void *payload, size_t length) { struct transaction_callback_data d; struct fw_transaction t; init_timer_on_stack(&t.split_timeout_timer); init_completion(&d.done); d.payload = payload; fw_send_request(card, &t, tcode, destination_id, generation, speed, offset, payload, length, transaction_callback, &d); wait_for_completion(&d.done); destroy_timer_on_stack(&t.split_timeout_timer); return d.rcode; } EXPORT_SYMBOL(fw_run_transaction); static DEFINE_MUTEX(phy_config_mutex); static DECLARE_COMPLETION(phy_config_done); static void transmit_phy_packet_callback(struct fw_packet *packet, struct fw_card *card, int status) { complete(&phy_config_done); } static struct fw_packet phy_config_packet = { .header_length = 12, .header[0] = TCODE_LINK_INTERNAL << 4, .payload_length = 0, .speed = SCODE_100, .callback = transmit_phy_packet_callback, }; void fw_send_phy_config(struct fw_card *card, int node_id, int generation, int gap_count) { long timeout = DIV_ROUND_UP(HZ, 10); u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG); if (node_id != FW_PHY_CONFIG_NO_NODE_ID) data |= PHY_CONFIG_ROOT_ID(node_id); if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) { gap_count = card->driver->read_phy_reg(card, 1); if (gap_count < 0) return; gap_count &= 63; if (gap_count == 63) return; } data |= PHY_CONFIG_GAP_COUNT(gap_count); mutex_lock(&phy_config_mutex); phy_config_packet.header[1] = data; phy_config_packet.header[2] = ~data; phy_config_packet.generation = generation; reinit_completion(&phy_config_done); card->driver->send_request(card, &phy_config_packet); wait_for_completion_timeout(&phy_config_done, timeout); mutex_unlock(&phy_config_mutex); } static struct fw_address_handler *lookup_overlapping_address_handler( struct list_head *list, unsigned long long offset, size_t length) { struct fw_address_handler *handler; list_for_each_entry_rcu(handler, list, link) { if (handler->offset < offset + length && offset < handler->offset + handler->length) return handler; } return NULL; } static bool is_enclosing_handler(struct fw_address_handler *handler, unsigned long long offset, size_t length) { return handler->offset <= offset && offset + length <= handler->offset + handler->length; } static struct fw_address_handler *lookup_enclosing_address_handler( struct list_head *list, unsigned long long offset, size_t length) { struct fw_address_handler *handler; list_for_each_entry_rcu(handler, list, link) { if (is_enclosing_handler(handler, offset, length)) return handler; } return NULL; } static DEFINE_SPINLOCK(address_handler_list_lock); static LIST_HEAD(address_handler_list); const struct fw_address_region fw_high_memory_region = { .start = FW_MAX_PHYSICAL_RANGE, .end = 0xffffe0000000ULL, }; EXPORT_SYMBOL(fw_high_memory_region); static const struct fw_address_region low_memory_region = { .start = 0x000000000000ULL, .end = FW_MAX_PHYSICAL_RANGE, }; #if 0 const struct fw_address_region fw_private_region = { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, }; const struct fw_address_region fw_csr_region = { .start = CSR_REGISTER_BASE, .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, }; const struct fw_address_region fw_unit_space_region = { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, }; #endif /* 0 */ static bool is_in_fcp_region(u64 offset, size_t length) { return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END); } /** * fw_core_add_address_handler() - register for incoming requests * @handler: callback * @region: region in the IEEE 1212 node space address range * * region->start, ->end, and handler->length have to be quadlet-aligned. * * When a request is received that falls within the specified address range, * the specified callback is invoked. The parameters passed to the callback * give the details of the particular request. * * To be called in process context. * Return value: 0 on success, non-zero otherwise. * * The start offset of the handler's address region is determined by * fw_core_add_address_handler() and is returned in handler->offset. * * Address allocations are exclusive, except for the FCP registers. */ int fw_core_add_address_handler(struct fw_address_handler *handler, const struct fw_address_region *region) { struct fw_address_handler *other; int ret = -EBUSY; if (region->start & 0xffff000000000003ULL || region->start >= region->end || region->end > 0x0001000000000000ULL || handler->length & 3 || handler->length == 0) return -EINVAL; spin_lock(&address_handler_list_lock); handler->offset = region->start; while (handler->offset + handler->length <= region->end) { if (is_in_fcp_region(handler->offset, handler->length)) other = NULL; else other = lookup_overlapping_address_handler (&address_handler_list, handler->offset, handler->length); if (other != NULL) { handler->offset += other->length; } else { list_add_tail_rcu(&handler->link, &address_handler_list); ret = 0; break; } } spin_unlock(&address_handler_list_lock); return ret; } EXPORT_SYMBOL(fw_core_add_address_handler); /** * fw_core_remove_address_handler() - unregister an address handler * * To be called in process context. * * When fw_core_remove_address_handler() returns, @handler->callback() is * guaranteed to not run on any CPU anymore. */ void fw_core_remove_address_handler(struct fw_address_handler *handler) { spin_lock(&address_handler_list_lock); list_del_rcu(&handler->link); spin_unlock(&address_handler_list_lock); synchronize_rcu(); } EXPORT_SYMBOL(fw_core_remove_address_handler); struct fw_request { struct fw_packet response; u32 request_header[4]; int ack; u32 length; u32 data[0]; }; static void free_response_callback(struct fw_packet *packet, struct fw_card *card, int status) { struct fw_request *request; request = container_of(packet, struct fw_request, response); kfree(request); } int fw_get_response_length(struct fw_request *r) { int tcode, ext_tcode, data_length; tcode = HEADER_GET_TCODE(r->request_header[0]); switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: case TCODE_WRITE_BLOCK_REQUEST: return 0; case TCODE_READ_QUADLET_REQUEST: return 4; case TCODE_READ_BLOCK_REQUEST: data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); return data_length; case TCODE_LOCK_REQUEST: ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]); data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); switch (ext_tcode) { case EXTCODE_FETCH_ADD: case EXTCODE_LITTLE_ADD: return data_length; default: return data_length / 2; } default: WARN(1, "wrong tcode %d\n", tcode); return 0; } } void fw_fill_response(struct fw_packet *response, u32 *request_header, int rcode, void *payload, size_t length) { int tcode, tlabel, extended_tcode, source, destination; tcode = HEADER_GET_TCODE(request_header[0]); tlabel = HEADER_GET_TLABEL(request_header[0]); source = HEADER_GET_DESTINATION(request_header[0]); destination = HEADER_GET_SOURCE(request_header[1]); extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]); response->header[0] = HEADER_RETRY(RETRY_1) | HEADER_TLABEL(tlabel) | HEADER_DESTINATION(destination); response->header[1] = HEADER_SOURCE(source) | HEADER_RCODE(rcode); response->header[2] = 0; switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: case TCODE_WRITE_BLOCK_REQUEST: response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE); response->header_length = 12; response->payload_length = 0; break; case TCODE_READ_QUADLET_REQUEST: response->header[0] |= HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE); if (payload != NULL) response->header[3] = *(u32 *)payload; else response->header[3] = 0; response->header_length = 16; response->payload_length = 0; break; case TCODE_READ_BLOCK_REQUEST: case TCODE_LOCK_REQUEST: response->header[0] |= HEADER_TCODE(tcode + 2); response->header[3] = HEADER_DATA_LENGTH(length) | HEADER_EXTENDED_TCODE(extended_tcode); response->header_length = 16; response->payload = payload; response->payload_length = length; break; default: WARN(1, "wrong tcode %d\n", tcode); } response->payload_mapped = false; } EXPORT_SYMBOL(fw_fill_response); static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp) { unsigned int cycles; u32 timestamp; cycles = card->split_timeout_cycles; cycles += request_timestamp & 0x1fff; timestamp = request_timestamp & ~0x1fff; timestamp += (cycles / 8000) << 13; timestamp |= cycles % 8000; return timestamp; } static struct fw_request *allocate_request(struct fw_card *card, struct fw_packet *p) { struct fw_request *request; u32 *data, length; int request_tcode; request_tcode = HEADER_GET_TCODE(p->header[0]); switch (request_tcode) { case TCODE_WRITE_QUADLET_REQUEST: data = &p->header[3]; length = 4; break; case TCODE_WRITE_BLOCK_REQUEST: case TCODE_LOCK_REQUEST: data = p->payload; length = HEADER_GET_DATA_LENGTH(p->header[3]); break; case TCODE_READ_QUADLET_REQUEST: data = NULL; length = 4; break; case TCODE_READ_BLOCK_REQUEST: data = NULL; length = HEADER_GET_DATA_LENGTH(p->header[3]); break; default: fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n", p->header[0], p->header[1], p->header[2]); return NULL; } request = kmalloc(sizeof(*request) + length, GFP_ATOMIC); if (request == NULL) return NULL; request->response.speed = p->speed; request->response.timestamp = compute_split_timeout_timestamp(card, p->timestamp); request->response.generation = p->generation; request->response.ack = 0; request->response.callback = free_response_callback; request->ack = p->ack; request->length = length; if (data) memcpy(request->data, data, length); memcpy(request->request_header, p->header, sizeof(p->header)); return request; } void fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) { if (WARN_ONCE(!request, "invalid for FCP address handlers")) return; /* unified transaction or broadcast transaction: don't respond */ if (request->ack != ACK_PENDING || HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) { kfree(request); return; } if (rcode == RCODE_COMPLETE) fw_fill_response(&request->response, request->request_header, rcode, request->data, fw_get_response_length(request)); else fw_fill_response(&request->response, request->request_header, rcode, NULL, 0); card->driver->send_response(card, &request->response); } EXPORT_SYMBOL(fw_send_response); /** * fw_get_request_speed() - returns speed at which the @request was received */ int fw_get_request_speed(struct fw_request *request) { return request->response.speed; } EXPORT_SYMBOL(fw_get_request_speed); static void handle_exclusive_region_request(struct fw_card *card, struct fw_packet *p, struct fw_request *request, unsigned long long offset) { struct fw_address_handler *handler; int tcode, destination, source; destination = HEADER_GET_DESTINATION(p->header[0]); source = HEADER_GET_SOURCE(p->header[1]); tcode = HEADER_GET_TCODE(p->header[0]); if (tcode == TCODE_LOCK_REQUEST) tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]); rcu_read_lock(); handler = lookup_enclosing_address_handler(&address_handler_list, offset, request->length); if (handler) handler->address_callback(card, request, tcode, destination, source, p->generation, offset, request->data, request->length, handler->callback_data); rcu_read_unlock(); if (!handler) fw_send_response(card, request, RCODE_ADDRESS_ERROR); } static void handle_fcp_region_request(struct fw_card *card, struct fw_packet *p, struct fw_request *request, unsigned long long offset) { struct fw_address_handler *handler; int tcode, destination, source; if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) || request->length > 0x200) { fw_send_response(card, request, RCODE_ADDRESS_ERROR); return; } tcode = HEADER_GET_TCODE(p->header[0]); destination = HEADER_GET_DESTINATION(p->header[0]); source = HEADER_GET_SOURCE(p->header[1]); if (tcode != TCODE_WRITE_QUADLET_REQUEST && tcode != TCODE_WRITE_BLOCK_REQUEST) { fw_send_response(card, request, RCODE_TYPE_ERROR); return; } rcu_read_lock(); list_for_each_entry_rcu(handler, &address_handler_list, link) { if (is_enclosing_handler(handler, offset, request->length)) handler->address_callback(card, NULL, tcode, destination, source, p->generation, offset, request->data, request->length, handler->callback_data); } rcu_read_unlock(); fw_send_response(card, request, RCODE_COMPLETE); } void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) { struct fw_request *request; unsigned long long offset; if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) return; if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) { fw_cdev_handle_phy_packet(card, p); return; } request = allocate_request(card, p); if (request == NULL) { /* FIXME: send statically allocated busy packet. */ return; } offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2]; if (!is_in_fcp_region(offset, request->length)) handle_exclusive_region_request(card, p, request, offset); else handle_fcp_region_request(card, p, request, offset); } EXPORT_SYMBOL(fw_core_handle_request); void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) { struct fw_transaction *t; unsigned long flags; u32 *data; size_t data_length; int tcode, tlabel, source, rcode; tcode = HEADER_GET_TCODE(p->header[0]); tlabel = HEADER_GET_TLABEL(p->header[0]); source = HEADER_GET_SOURCE(p->header[1]); rcode = HEADER_GET_RCODE(p->header[1]); spin_lock_irqsave(&card->lock, flags); list_for_each_entry(t, &card->transaction_list, link) { if (t->node_id == source && t->tlabel == tlabel) { if (!try_cancel_split_timeout(t)) { spin_unlock_irqrestore(&card->lock, flags); goto timed_out; } list_del_init(&t->link); card->tlabel_mask &= ~(1ULL << t->tlabel); break; } } spin_unlock_irqrestore(&card->lock, flags); if (&t->link == &card->transaction_list) { timed_out: fw_notice(card, "unsolicited response (source %x, tlabel %x)\n", source, tlabel); return; } /* * FIXME: sanity check packet, is length correct, does tcodes * and addresses match. */ switch (tcode) { case TCODE_READ_QUADLET_RESPONSE: data = (u32 *) &p->header[3]; data_length = 4; break; case TCODE_WRITE_RESPONSE: data = NULL; data_length = 0; break; case TCODE_READ_BLOCK_RESPONSE: case TCODE_LOCK_RESPONSE: data = p->payload; data_length = HEADER_GET_DATA_LENGTH(p->header[3]); break; default: /* Should never happen, this is just to shut up gcc. */ data = NULL; data_length = 0; break; } /* * The response handler may be executed while the request handler * is still pending. Cancel the request handler. */ card->driver->cancel_packet(card, &t->packet); t->callback(card, rcode, data, data_length, t->callback_data); } EXPORT_SYMBOL(fw_core_handle_response); /** * fw_rcode_string - convert a firewire result code to an error description * @rcode: the result code */ const char *fw_rcode_string(int rcode) { static const char *const names[] = { [RCODE_COMPLETE] = "no error", [RCODE_CONFLICT_ERROR] = "conflict error", [RCODE_DATA_ERROR] = "data error", [RCODE_TYPE_ERROR] = "type error", [RCODE_ADDRESS_ERROR] = "address error", [RCODE_SEND_ERROR] = "send error", [RCODE_CANCELLED] = "timeout", [RCODE_BUSY] = "busy", [RCODE_GENERATION] = "bus reset", [RCODE_NO_ACK] = "no ack", }; if ((unsigned int)rcode < ARRAY_SIZE(names) && names[rcode]) return names[rcode]; else return "unknown"; } EXPORT_SYMBOL(fw_rcode_string); static const struct fw_address_region topology_map_region = { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, }; static void handle_topology_map(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source, int generation, unsigned long long offset, void *payload, size_t length, void *callback_data) { int start; if (!TCODE_IS_READ_REQUEST(tcode)) { fw_send_response(card, request, RCODE_TYPE_ERROR); return; } if ((offset & 3) > 0 || (length & 3) > 0) { fw_send_response(card, request, RCODE_ADDRESS_ERROR); return; } start = (offset - topology_map_region.start) / 4; memcpy(payload, &card->topology_map[start], length); fw_send_response(card, request, RCODE_COMPLETE); } static struct fw_address_handler topology_map = { .length = 0x400, .address_callback = handle_topology_map, }; static const struct fw_address_region registers_region = { .start = CSR_REGISTER_BASE, .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; static void update_split_timeout(struct fw_card *card) { unsigned int cycles; cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19); /* minimum per IEEE 1394, maximum which doesn't overflow OHCI */ cycles = clamp(cycles, 800u, 3u * 8000u); card->split_timeout_cycles = cycles; card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000); } static void handle_registers(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source, int generation, unsigned long long offset, void *payload, size_t length, void *callback_data) { int reg = offset & ~CSR_REGISTER_BASE; __be32 *data = payload; int rcode = RCODE_COMPLETE; unsigned long flags; switch (reg) { case CSR_PRIORITY_BUDGET: if (!card->priority_budget_implemented) { rcode = RCODE_ADDRESS_ERROR; break; } /* else fall through */ case CSR_NODE_IDS: /* * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8 * and 9.6, but interoperable with IEEE 1394.1-2004 bridges */ /* fall through */ case CSR_STATE_CLEAR: case CSR_STATE_SET: case CSR_CYCLE_TIME: case CSR_BUS_TIME: case CSR_BUSY_TIMEOUT: if (tcode == TCODE_READ_QUADLET_REQUEST) *data = cpu_to_be32(card->driver->read_csr(card, reg)); else if (tcode == TCODE_WRITE_QUADLET_REQUEST) card->driver->write_csr(card, reg, be32_to_cpu(*data)); else rcode = RCODE_TYPE_ERROR; break; case CSR_RESET_START: if (tcode == TCODE_WRITE_QUADLET_REQUEST) card->driver->write_csr(card, CSR_STATE_CLEAR, CSR_STATE_BIT_ABDICATE); else rcode = RCODE_TYPE_ERROR; break; case CSR_SPLIT_TIMEOUT_HI: if (tcode == TCODE_READ_QUADLET_REQUEST) { *data = cpu_to_be32(card->split_timeout_hi); } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { spin_lock_irqsave(&card->lock, flags); card->split_timeout_hi = be32_to_cpu(*data) & 7; update_split_timeout(card); spin_unlock_irqrestore(&card->lock, flags); } else { rcode = RCODE_TYPE_ERROR; } break; case CSR_SPLIT_TIMEOUT_LO: if (tcode == TCODE_READ_QUADLET_REQUEST) { *data = cpu_to_be32(card->split_timeout_lo); } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { spin_lock_irqsave(&card->lock, flags); card->split_timeout_lo = be32_to_cpu(*data) & 0xfff80000; update_split_timeout(card); spin_unlock_irqrestore(&card->lock, flags); } else { rcode = RCODE_TYPE_ERROR; } break; case CSR_MAINT_UTILITY: if (tcode == TCODE_READ_QUADLET_REQUEST) *data = card->maint_utility_register; else if (tcode == TCODE_WRITE_QUADLET_REQUEST) card->maint_utility_register = *data; else rcode = RCODE_TYPE_ERROR; break; case CSR_BROADCAST_CHANNEL: if (tcode == TCODE_READ_QUADLET_REQUEST) *data = cpu_to_be32(card->broadcast_channel); else if (tcode == TCODE_WRITE_QUADLET_REQUEST) card->broadcast_channel = (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) | BROADCAST_CHANNEL_INITIAL; else rcode = RCODE_TYPE_ERROR; break; case CSR_BUS_MANAGER_ID: case CSR_BANDWIDTH_AVAILABLE: case CSR_CHANNELS_AVAILABLE_HI: case CSR_CHANNELS_AVAILABLE_LO: /* * FIXME: these are handled by the OHCI hardware and * the stack never sees these request. If we add * support for a new type of controller that doesn't * handle this in hardware we need to deal with these * transactions. */ BUG(); break; default: rcode = RCODE_ADDRESS_ERROR; break; } fw_send_response(card, request, rcode); } static struct fw_address_handler registers = { .length = 0x400, .address_callback = handle_registers, }; static void handle_low_memory(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source, int generation, unsigned long long offset, void *payload, size_t length, void *callback_data) { /* * This catches requests not handled by the physical DMA unit, * i.e., wrong transaction types or unauthorized source nodes. */ fw_send_response(card, request, RCODE_TYPE_ERROR); } static struct fw_address_handler low_memory = { .length = FW_MAX_PHYSICAL_RANGE, .address_callback = handle_low_memory, }; MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); MODULE_DESCRIPTION("Core IEEE1394 transaction logic"); MODULE_LICENSE("GPL"); static const u32 vendor_textual_descriptor[] = { /* textual descriptor leaf () */ 0x00060000, 0x00000000, 0x00000000, 0x4c696e75, /* L i n u */ 0x78204669, /* x F i */ 0x72657769, /* r e w i */ 0x72650000, /* r e */ }; static const u32 model_textual_descriptor[] = { /* model descriptor leaf () */ 0x00030000, 0x00000000, 0x00000000, 0x4a756a75, /* J u j u */ }; static struct fw_descriptor vendor_id_descriptor = { .length = ARRAY_SIZE(vendor_textual_descriptor), .immediate = 0x03001f11, .key = 0x81000000, .data = vendor_textual_descriptor, }; static struct fw_descriptor model_id_descriptor = { .length = ARRAY_SIZE(model_textual_descriptor), .immediate = 0x17023901, .key = 0x81000000, .data = model_textual_descriptor, }; static int __init fw_core_init(void) { int ret; fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM, 0); if (!fw_workqueue) return -ENOMEM; ret = bus_register(&fw_bus_type); if (ret < 0) { destroy_workqueue(fw_workqueue); return ret; } fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); if (fw_cdev_major < 0) { bus_unregister(&fw_bus_type); destroy_workqueue(fw_workqueue); return fw_cdev_major; } fw_core_add_address_handler(&topology_map, &topology_map_region); fw_core_add_address_handler(&registers, &registers_region); fw_core_add_address_handler(&low_memory, &low_memory_region); fw_core_add_descriptor(&vendor_id_descriptor); fw_core_add_descriptor(&model_id_descriptor); return 0; } static void __exit fw_core_cleanup(void) { unregister_chrdev(fw_cdev_major, "firewire"); bus_unregister(&fw_bus_type); destroy_workqueue(fw_workqueue); idr_destroy(&fw_device_idr); } module_init(fw_core_init); module_exit(fw_core_cleanup);
gpl-2.0
KaijiHakaroku/kernel_acer_picasso
kernel/futex_compat.c
2329
4561
/* * linux/kernel/futex_compat.c * * Futex compatibililty routines. * * Copyright 2006, Red Hat, Inc., Ingo Molnar */ #include <linux/linkage.h> #include <linux/compat.h> #include <linux/nsproxy.h> #include <linux/futex.h> #include <linux/ptrace.h> #include <linux/syscalls.h> #include <asm/uaccess.h> /* * Fetch a robust-list pointer. Bit 0 signals PI futexes: */ static inline int fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, compat_uptr_t __user *head, unsigned int *pi) { if (get_user(*uentry, head)) return -EFAULT; *entry = compat_ptr((*uentry) & ~1); *pi = (unsigned int)(*uentry) & 1; return 0; } static void __user *futex_uaddr(struct robust_list __user *entry, compat_long_t futex_offset) { compat_uptr_t base = ptr_to_compat(entry); void __user *uaddr = compat_ptr(base + futex_offset); return uaddr; } /* * Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. * * We silently return on any sign of list-walking problem. */ void compat_exit_robust_list(struct task_struct *curr) { struct compat_robust_list_head __user *head = curr->compat_robust_list; struct robust_list __user *entry, *next_entry, *pending; unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; unsigned int uninitialized_var(next_pi); compat_uptr_t uentry, next_uentry, upending; compat_long_t futex_offset; int rc; if (!futex_cmpxchg_enabled) return; /* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) return; /* * Fetch the relative futex offset: */ if (get_user(futex_offset, &head->futex_offset)) return; /* * Fetch any possibly pending lock-add first, and handle it * if it exists: */ if (fetch_robust_entry(&upending, &pending, &head->list_op_pending, &pip)) return; next_entry = NULL; /* avoid warning with gcc */ while (entry != (struct robust_list __user *) &head->list) { /* * Fetch the next entry in the list before calling * handle_futex_death: */ rc = fetch_robust_entry(&next_uentry, &next_entry, (compat_uptr_t __user *)&entry->next, &next_pi); /* * A pending lock might already be on the list, so * dont process it twice: */ if (entry != pending) { void __user *uaddr = futex_uaddr(entry, futex_offset); if (handle_futex_death(uaddr, curr, pi)) return; } if (rc) return; uentry = next_uentry; entry = next_entry; pi = next_pi; /* * Avoid excessively long or circular lists: */ if (!--limit) break; cond_resched(); } if (pending) { void __user *uaddr = futex_uaddr(pending, futex_offset); handle_futex_death(uaddr, curr, pip); } } COMPAT_SYSCALL_DEFINE2(set_robust_list, struct compat_robust_list_head __user *, head, compat_size_t, len) { if (!futex_cmpxchg_enabled) return -ENOSYS; if (unlikely(len != sizeof(*head))) return -EINVAL; current->compat_robust_list = head; return 0; } COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, compat_uptr_t __user *, head_ptr, compat_size_t __user *, len_ptr) { struct compat_robust_list_head __user *head; unsigned long ret; struct task_struct *p; if (!futex_cmpxchg_enabled) return -ENOSYS; rcu_read_lock(); ret = -ESRCH; if (!pid) p = current; else { p = find_task_by_vpid(pid); if (!p) goto err_unlock; } ret = -EPERM; if (!ptrace_may_access(p, PTRACE_MODE_READ)) goto err_unlock; head = p->compat_robust_list; rcu_read_unlock(); if (put_user(sizeof(*head), len_ptr)) return -EFAULT; return put_user(ptr_to_compat(head), head_ptr); err_unlock: rcu_read_unlock(); return ret; } COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, struct compat_timespec __user *, utime, u32 __user *, uaddr2, u32, val3) { struct timespec ts; ktime_t t, *tp = NULL; int val2 = 0; int cmd = op & FUTEX_CMD_MASK; if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || cmd == FUTEX_WAIT_BITSET || cmd == FUTEX_WAIT_REQUEUE_PI)) { if (get_compat_timespec(&ts, utime)) return -EFAULT; if (!timespec_valid(&ts)) return -EINVAL; t = timespec_to_ktime(ts); if (cmd == FUTEX_WAIT) t = ktime_add_safe(ktime_get(), t); tp = &t; } if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) val2 = (int) (unsigned long) utime; return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); }
gpl-2.0
sos987/kernel-v9plus
sound/isa/cs423x/cs4231.c
4633
6021
/* * Generic driver for CS4231 chips * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Originally the CS4232/CS4232A driver, modified for use on CS4231 by * Tugrul Galatali <galatalt@stuy.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/err.h> #include <linux/isa.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/wss.h> #include <sound/mpu401.h> #include <sound/initval.h> #define CRD_NAME "Generic CS4231" #define DEV_NAME "cs4231" MODULE_DESCRIPTION(CRD_NAME); MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Crystal Semiconductors,CS4231}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */ static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5,7,9,11,12,15 */ static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 9,11,12,15 */ static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3,5,6,7 */ static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3,5,6,7 */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for " CRD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for " CRD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable " CRD_NAME " soundcard."); module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for " CRD_NAME " driver."); module_param_array(mpu_port, long, NULL, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 port # for " CRD_NAME " driver."); module_param_array(irq, int, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ # for " CRD_NAME " driver."); module_param_array(mpu_irq, int, NULL, 0444); MODULE_PARM_DESC(mpu_irq, "MPU-401 IRQ # for " CRD_NAME " driver."); module_param_array(dma1, int, NULL, 0444); MODULE_PARM_DESC(dma1, "DMA1 # for " CRD_NAME " driver."); module_param_array(dma2, int, NULL, 0444); MODULE_PARM_DESC(dma2, "DMA2 # for " CRD_NAME " driver."); static int __devinit snd_cs4231_match(struct device *dev, unsigned int n) { if (!enable[n]) return 0; if (port[n] == SNDRV_AUTO_PORT) { dev_err(dev, "please specify port\n"); return 0; } if (irq[n] == SNDRV_AUTO_IRQ) { dev_err(dev, "please specify irq\n"); return 0; } if (dma1[n] == SNDRV_AUTO_DMA) { dev_err(dev, "please specify dma1\n"); return 0; } return 1; } static int __devinit snd_cs4231_probe(struct device *dev, unsigned int n) { struct snd_card *card; struct snd_wss *chip; struct snd_pcm *pcm; int error; error = snd_card_create(index[n], id[n], THIS_MODULE, 0, &card); if (error < 0) return error; error = snd_wss_create(card, port[n], -1, irq[n], dma1[n], dma2[n], WSS_HW_DETECT, 0, &chip); if (error < 0) goto out; card->private_data = chip; error = snd_wss_pcm(chip, 0, &pcm); if (error < 0) goto out; strcpy(card->driver, "CS4231"); strcpy(card->shortname, pcm->name); sprintf(card->longname, "%s at 0x%lx, irq %d, dma %d", pcm->name, chip->port, irq[n], dma1[n]); if (dma2[n] >= 0) sprintf(card->longname + strlen(card->longname), "&%d", dma2[n]); error = snd_wss_mixer(chip); if (error < 0) goto out; error = snd_wss_timer(chip, 0, NULL); if (error < 0) goto out; if (mpu_port[n] > 0 && mpu_port[n] != SNDRV_AUTO_PORT) { if (mpu_irq[n] == SNDRV_AUTO_IRQ) mpu_irq[n] = -1; if (snd_mpu401_uart_new(card, 0, MPU401_HW_CS4232, mpu_port[n], 0, mpu_irq[n], mpu_irq[n] >= 0 ? IRQF_DISABLED : 0, NULL) < 0) dev_warn(dev, "MPU401 not detected\n"); } snd_card_set_dev(card, dev); error = snd_card_register(card); if (error < 0) goto out; dev_set_drvdata(dev, card); return 0; out: snd_card_free(card); return error; } static int __devexit snd_cs4231_remove(struct device *dev, unsigned int n) { snd_card_free(dev_get_drvdata(dev)); dev_set_drvdata(dev, NULL); return 0; } #ifdef CONFIG_PM static int snd_cs4231_suspend(struct device *dev, unsigned int n, pm_message_t state) { struct snd_card *card = dev_get_drvdata(dev); struct snd_wss *chip = card->private_data; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); chip->suspend(chip); return 0; } static int snd_cs4231_resume(struct device *dev, unsigned int n) { struct snd_card *card = dev_get_drvdata(dev); struct snd_wss *chip = card->private_data; chip->resume(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif static struct isa_driver snd_cs4231_driver = { .match = snd_cs4231_match, .probe = snd_cs4231_probe, .remove = __devexit_p(snd_cs4231_remove), #ifdef CONFIG_PM .suspend = snd_cs4231_suspend, .resume = snd_cs4231_resume, #endif .driver = { .name = DEV_NAME } }; static int __init alsa_card_cs4231_init(void) { return isa_register_driver(&snd_cs4231_driver, SNDRV_CARDS); } static void __exit alsa_card_cs4231_exit(void) { isa_unregister_driver(&snd_cs4231_driver); } module_init(alsa_card_cs4231_init); module_exit(alsa_card_cs4231_exit);
gpl-2.0
AdiPat/android_kernel_tegra_n1
drivers/net/tokenring/skisa.c
4889
9833
/* * skisa.c: A network driver for SK-NET TMS380-based ISA token ring cards. * * Based on tmspci written 1999 by Adam Fritzler * * Written 2000 by Jochen Friedrich * Dedicated to my girlfriend Steffi Bopp * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * This driver module supports the following cards: * - SysKonnect TR4/16(+) ISA (SK-4190) * * Maintainer(s): * AF Adam Fritzler * JF Jochen Friedrich jochen@scram.de * * Modification History: * 14-Jan-01 JF Created * 28-Oct-02 JF Fixed probe of card for static compilation. * Fixed module init to not make hotplug go wild. * 09-Nov-02 JF Fixed early bail out on out of memory * situations if multiple cards are found. * Cleaned up some unnecessary console SPAM. * 09-Dec-02 JF Fixed module reference counting. * 02-Jan-03 JF Renamed to skisa.c * */ static const char version[] = "skisa.c: v1.03 09/12/2002 by Jochen Friedrich\n"; #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/trdevice.h> #include <linux/platform_device.h> #include <asm/system.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/pci.h> #include <asm/dma.h> #include "tms380tr.h" #define SK_ISA_IO_EXTENT 32 /* A zero-terminated list of I/O addresses to be probed. */ static unsigned int portlist[] __initdata = { 0x0A20, 0x1A20, 0x0B20, 0x1B20, 0x0980, 0x1980, 0x0900, 0x1900,// SK 0 }; /* A zero-terminated list of IRQs to be probed. * Used again after initial probe for sktr_chipset_init, called from sktr_open. */ static const unsigned short irqlist[] = { 3, 5, 9, 10, 11, 12, 15, 0 }; /* A zero-terminated list of DMAs to be probed. */ static int dmalist[] __initdata = { 5, 6, 7, 0 }; static char isa_cardname[] = "SK NET TR 4/16 ISA\0"; static u64 dma_mask = ISA_MAX_ADDRESS; static int sk_isa_open(struct net_device *dev); static void sk_isa_read_eeprom(struct net_device *dev); static unsigned short sk_isa_setnselout_pins(struct net_device *dev); static unsigned short sk_isa_sifreadb(struct net_device *dev, unsigned short reg) { return inb(dev->base_addr + reg); } static unsigned short sk_isa_sifreadw(struct net_device *dev, unsigned short reg) { return inw(dev->base_addr + reg); } static void sk_isa_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg) { outb(val, dev->base_addr + reg); } static void sk_isa_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg) { outw(val, dev->base_addr + reg); } static int __init sk_isa_probe1(struct net_device *dev, int ioaddr) { unsigned char old, chk1, chk2; if (!request_region(ioaddr, SK_ISA_IO_EXTENT, isa_cardname)) return -ENODEV; old = inb(ioaddr + SIFADR); /* Get the old SIFADR value */ chk1 = 0; /* Begin with check value 0 */ do { /* Write new SIFADR value */ outb(chk1, ioaddr + SIFADR); /* Read, invert and write */ chk2 = inb(ioaddr + SIFADD); chk2 ^= 0x0FE; outb(chk2, ioaddr + SIFADR); /* Read, invert and compare */ chk2 = inb(ioaddr + SIFADD); chk2 ^= 0x0FE; if(chk1 != chk2) { release_region(ioaddr, SK_ISA_IO_EXTENT); return -ENODEV; } chk1 -= 2; } while(chk1 != 0); /* Repeat 128 times (all byte values) */ /* Restore the SIFADR value */ outb(old, ioaddr + SIFADR); dev->base_addr = ioaddr; return 0; } static struct net_device_ops sk_isa_netdev_ops __read_mostly; static int __init setup_card(struct net_device *dev, struct device *pdev) { struct net_local *tp; static int versionprinted; const unsigned *port; int j, err = 0; if (!dev) return -ENOMEM; if (dev->base_addr) /* probe specific location */ err = sk_isa_probe1(dev, dev->base_addr); else { for (port = portlist; *port; port++) { err = sk_isa_probe1(dev, *port); if (!err) break; } } if (err) goto out5; /* At this point we have found a valid card. */ if (versionprinted++ == 0) printk(KERN_DEBUG "%s", version); err = -EIO; pdev->dma_mask = &dma_mask; if (tmsdev_init(dev, pdev)) goto out4; dev->base_addr &= ~3; sk_isa_read_eeprom(dev); printk(KERN_DEBUG "skisa.c: Ring Station Address: %pM\n", dev->dev_addr); tp = netdev_priv(dev); tp->setnselout = sk_isa_setnselout_pins; tp->sifreadb = sk_isa_sifreadb; tp->sifreadw = sk_isa_sifreadw; tp->sifwriteb = sk_isa_sifwriteb; tp->sifwritew = sk_isa_sifwritew; memcpy(tp->ProductID, isa_cardname, PROD_ID_SIZE + 1); tp->tmspriv = NULL; dev->netdev_ops = &sk_isa_netdev_ops; if (dev->irq == 0) { for(j = 0; irqlist[j] != 0; j++) { dev->irq = irqlist[j]; if (!request_irq(dev->irq, tms380tr_interrupt, 0, isa_cardname, dev)) break; } if(irqlist[j] == 0) { printk(KERN_INFO "skisa.c: AutoSelect no IRQ available\n"); goto out3; } } else { for(j = 0; irqlist[j] != 0; j++) if (irqlist[j] == dev->irq) break; if (irqlist[j] == 0) { printk(KERN_INFO "skisa.c: Illegal IRQ %d specified\n", dev->irq); goto out3; } if (request_irq(dev->irq, tms380tr_interrupt, 0, isa_cardname, dev)) { printk(KERN_INFO "skisa.c: Selected IRQ %d not available\n", dev->irq); goto out3; } } if (dev->dma == 0) { for(j = 0; dmalist[j] != 0; j++) { dev->dma = dmalist[j]; if (!request_dma(dev->dma, isa_cardname)) break; } if(dmalist[j] == 0) { printk(KERN_INFO "skisa.c: AutoSelect no DMA available\n"); goto out2; } } else { for(j = 0; dmalist[j] != 0; j++) if (dmalist[j] == dev->dma) break; if (dmalist[j] == 0) { printk(KERN_INFO "skisa.c: Illegal DMA %d specified\n", dev->dma); goto out2; } if (request_dma(dev->dma, isa_cardname)) { printk(KERN_INFO "skisa.c: Selected DMA %d not available\n", dev->dma); goto out2; } } err = register_netdev(dev); if (err) goto out; printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n", dev->name, dev->base_addr, dev->irq, dev->dma); return 0; out: free_dma(dev->dma); out2: free_irq(dev->irq, dev); out3: tmsdev_term(dev); out4: release_region(dev->base_addr, SK_ISA_IO_EXTENT); out5: return err; } /* * Reads MAC address from adapter RAM, which should've read it from * the onboard ROM. * * Calling this on a board that does not support it can be a very * dangerous thing. The Madge board, for instance, will lock your * machine hard when this is called. Luckily, its supported in a * separate driver. --ASF */ static void sk_isa_read_eeprom(struct net_device *dev) { int i; /* Address: 0000:0000 */ sk_isa_sifwritew(dev, 0, SIFADX); sk_isa_sifwritew(dev, 0, SIFADR); /* Read six byte MAC address data */ dev->addr_len = 6; for(i = 0; i < 6; i++) dev->dev_addr[i] = sk_isa_sifreadw(dev, SIFINC) >> 8; } static unsigned short sk_isa_setnselout_pins(struct net_device *dev) { return 0; } static int sk_isa_open(struct net_device *dev) { struct net_local *tp = netdev_priv(dev); unsigned short val = 0; unsigned short oldval; int i; val = 0; for(i = 0; irqlist[i] != 0; i++) { if(irqlist[i] == dev->irq) break; } val |= CYCLE_TIME << 2; val |= i << 4; i = dev->dma - 5; val |= i; if(tp->DataRate == SPEED_4) val |= LINE_SPEED_BIT; else val &= ~LINE_SPEED_BIT; oldval = sk_isa_sifreadb(dev, POSREG); /* Leave cycle bits alone */ oldval |= 0xf3; val &= oldval; sk_isa_sifwriteb(dev, val, POSREG); return tms380tr_open(dev); } #define ISATR_MAX_ADAPTERS 3 static int io[ISATR_MAX_ADAPTERS]; static int irq[ISATR_MAX_ADAPTERS]; static int dma[ISATR_MAX_ADAPTERS]; MODULE_LICENSE("GPL"); module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(dma, int, NULL, 0); static struct platform_device *sk_isa_dev[ISATR_MAX_ADAPTERS]; static struct platform_driver sk_isa_driver = { .driver = { .name = "skisa", }, }; static int __init sk_isa_init(void) { struct net_device *dev; struct platform_device *pdev; int i, num = 0, err = 0; sk_isa_netdev_ops = tms380tr_netdev_ops; sk_isa_netdev_ops.ndo_open = sk_isa_open; sk_isa_netdev_ops.ndo_stop = tms380tr_close; err = platform_driver_register(&sk_isa_driver); if (err) return err; for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { dev = alloc_trdev(sizeof(struct net_local)); if (!dev) continue; dev->base_addr = io[i]; dev->irq = irq[i]; dev->dma = dma[i]; pdev = platform_device_register_simple("skisa", i, NULL, 0); if (IS_ERR(pdev)) { free_netdev(dev); continue; } err = setup_card(dev, &pdev->dev); if (!err) { sk_isa_dev[i] = pdev; platform_set_drvdata(sk_isa_dev[i], dev); ++num; } else { platform_device_unregister(pdev); free_netdev(dev); } } printk(KERN_NOTICE "skisa.c: %d cards found.\n", num); /* Probe for cards. */ if (num == 0) { printk(KERN_NOTICE "skisa.c: No cards found.\n"); platform_driver_unregister(&sk_isa_driver); return -ENODEV; } return 0; } static void __exit sk_isa_cleanup(void) { struct net_device *dev; int i; for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { struct platform_device *pdev = sk_isa_dev[i]; if (!pdev) continue; dev = platform_get_drvdata(pdev); unregister_netdev(dev); release_region(dev->base_addr, SK_ISA_IO_EXTENT); free_irq(dev->irq, dev); free_dma(dev->dma); tmsdev_term(dev); free_netdev(dev); platform_set_drvdata(pdev, NULL); platform_device_unregister(pdev); } platform_driver_unregister(&sk_isa_driver); } module_init(sk_isa_init); module_exit(sk_isa_cleanup);
gpl-2.0
vwmofo/android_kernel_htc_liberty-villec2
sound/pci/bt87x.c
4889
30413
/* * bt87x.c - Brooktree Bt878/Bt879 driver for ALSA * * Copyright (c) Clemens Ladisch <clemens@ladisch.de> * * based on btaudio.c by Gerd Knorr <kraxel@bytesex.org> * * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/bitops.h> #include <asm/io.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/control.h> #include <sound/initval.h> MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); MODULE_DESCRIPTION("Brooktree Bt87x audio driver"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Brooktree,Bt878}," "{Brooktree,Bt879}}"); static int index[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = -2}; /* Exclude the first card */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static int digital_rate[SNDRV_CARDS]; /* digital input rate */ static bool load_all; /* allow to load the non-whitelisted cards */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Bt87x soundcard"); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Bt87x soundcard"); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Bt87x soundcard"); module_param_array(digital_rate, int, NULL, 0444); MODULE_PARM_DESC(digital_rate, "Digital input rate for Bt87x soundcard"); module_param(load_all, bool, 0444); MODULE_PARM_DESC(load_all, "Allow to load the non-whitelisted cards"); /* register offsets */ #define REG_INT_STAT 0x100 /* interrupt status */ #define REG_INT_MASK 0x104 /* interrupt mask */ #define REG_GPIO_DMA_CTL 0x10c /* audio control */ #define REG_PACKET_LEN 0x110 /* audio packet lengths */ #define REG_RISC_STRT_ADD 0x114 /* RISC program start address */ #define REG_RISC_COUNT 0x120 /* RISC program counter */ /* interrupt bits */ #define INT_OFLOW (1 << 3) /* audio A/D overflow */ #define INT_RISCI (1 << 11) /* RISC instruction IRQ bit set */ #define INT_FBUS (1 << 12) /* FIFO overrun due to bus access latency */ #define INT_FTRGT (1 << 13) /* FIFO overrun due to target latency */ #define INT_FDSR (1 << 14) /* FIFO data stream resynchronization */ #define INT_PPERR (1 << 15) /* PCI parity error */ #define INT_RIPERR (1 << 16) /* RISC instruction parity error */ #define INT_PABORT (1 << 17) /* PCI master or target abort */ #define INT_OCERR (1 << 18) /* invalid opcode */ #define INT_SCERR (1 << 19) /* sync counter overflow */ #define INT_RISC_EN (1 << 27) /* DMA controller running */ #define INT_RISCS_SHIFT 28 /* RISC status bits */ /* audio control bits */ #define CTL_FIFO_ENABLE (1 << 0) /* enable audio data FIFO */ #define CTL_RISC_ENABLE (1 << 1) /* enable audio DMA controller */ #define CTL_PKTP_4 (0 << 2) /* packet mode FIFO trigger point - 4 DWORDs */ #define CTL_PKTP_8 (1 << 2) /* 8 DWORDs */ #define CTL_PKTP_16 (2 << 2) /* 16 DWORDs */ #define CTL_ACAP_EN (1 << 4) /* enable audio capture */ #define CTL_DA_APP (1 << 5) /* GPIO input */ #define CTL_DA_IOM_AFE (0 << 6) /* audio A/D input */ #define CTL_DA_IOM_DA (1 << 6) /* digital audio input */ #define CTL_DA_SDR_SHIFT 8 /* DDF first stage decimation rate */ #define CTL_DA_SDR_MASK (0xf<< 8) #define CTL_DA_LMT (1 << 12) /* limit audio data values */ #define CTL_DA_ES2 (1 << 13) /* enable DDF stage 2 */ #define CTL_DA_SBR (1 << 14) /* samples rounded to 8 bits */ #define CTL_DA_DPM (1 << 15) /* data packet mode */ #define CTL_DA_LRD_SHIFT 16 /* ALRCK delay */ #define CTL_DA_MLB (1 << 21) /* MSB/LSB format */ #define CTL_DA_LRI (1 << 22) /* left/right indication */ #define CTL_DA_SCE (1 << 23) /* sample clock edge */ #define CTL_A_SEL_STV (0 << 24) /* TV tuner audio input */ #define CTL_A_SEL_SFM (1 << 24) /* FM audio input */ #define CTL_A_SEL_SML (2 << 24) /* mic/line audio input */ #define CTL_A_SEL_SMXC (3 << 24) /* MUX bypass */ #define CTL_A_SEL_SHIFT 24 #define CTL_A_SEL_MASK (3 << 24) #define CTL_A_PWRDN (1 << 26) /* analog audio power-down */ #define CTL_A_G2X (1 << 27) /* audio gain boost */ #define CTL_A_GAIN_SHIFT 28 /* audio input gain */ #define CTL_A_GAIN_MASK (0xf<<28) /* RISC instruction opcodes */ #define RISC_WRITE (0x1 << 28) /* write FIFO data to memory at address */ #define RISC_WRITEC (0x5 << 28) /* write FIFO data to memory at current address */ #define RISC_SKIP (0x2 << 28) /* skip FIFO data */ #define RISC_JUMP (0x7 << 28) /* jump to address */ #define RISC_SYNC (0x8 << 28) /* synchronize with FIFO */ /* RISC instruction bits */ #define RISC_BYTES_ENABLE (0xf << 12) /* byte enable bits */ #define RISC_RESYNC ( 1 << 15) /* disable FDSR errors */ #define RISC_SET_STATUS_SHIFT 16 /* set status bits */ #define RISC_RESET_STATUS_SHIFT 20 /* clear status bits */ #define RISC_IRQ ( 1 << 24) /* interrupt */ #define RISC_EOL ( 1 << 26) /* end of line */ #define RISC_SOL ( 1 << 27) /* start of line */ /* SYNC status bits values */ #define RISC_SYNC_FM1 0x6 #define RISC_SYNC_VRO 0xc #define ANALOG_CLOCK 1792000 #ifdef CONFIG_SND_BT87X_OVERCLOCK #define CLOCK_DIV_MIN 1 #else #define CLOCK_DIV_MIN 4 #endif #define CLOCK_DIV_MAX 15 #define ERROR_INTERRUPTS (INT_FBUS | INT_FTRGT | INT_PPERR | \ INT_RIPERR | INT_PABORT | INT_OCERR) #define MY_INTERRUPTS (INT_RISCI | ERROR_INTERRUPTS) /* SYNC, one WRITE per line, one extra WRITE per page boundary, SYNC, JUMP */ #define MAX_RISC_SIZE ((1 + 255 + (PAGE_ALIGN(255 * 4092) / PAGE_SIZE - 1) + 1 + 1) * 8) /* Cards with configuration information */ enum snd_bt87x_boardid { SND_BT87X_BOARD_UNKNOWN, SND_BT87X_BOARD_GENERIC, /* both an & dig interfaces, 32kHz */ SND_BT87X_BOARD_ANALOG, /* board with no external A/D */ SND_BT87X_BOARD_OSPREY2x0, SND_BT87X_BOARD_OSPREY440, SND_BT87X_BOARD_AVPHONE98, }; /* Card configuration */ struct snd_bt87x_board { int dig_rate; /* Digital input sampling rate */ u32 digital_fmt; /* Register settings for digital input */ unsigned no_analog:1; /* No analog input */ unsigned no_digital:1; /* No digital input */ }; static __devinitdata struct snd_bt87x_board snd_bt87x_boards[] = { [SND_BT87X_BOARD_UNKNOWN] = { .dig_rate = 32000, /* just a guess */ }, [SND_BT87X_BOARD_GENERIC] = { .dig_rate = 32000, }, [SND_BT87X_BOARD_ANALOG] = { .no_digital = 1, }, [SND_BT87X_BOARD_OSPREY2x0] = { .dig_rate = 44100, .digital_fmt = CTL_DA_LRI | (1 << CTL_DA_LRD_SHIFT), }, [SND_BT87X_BOARD_OSPREY440] = { .dig_rate = 32000, .digital_fmt = CTL_DA_LRI | (1 << CTL_DA_LRD_SHIFT), .no_analog = 1, }, [SND_BT87X_BOARD_AVPHONE98] = { .dig_rate = 48000, }, }; struct snd_bt87x { struct snd_card *card; struct pci_dev *pci; struct snd_bt87x_board board; void __iomem *mmio; int irq; spinlock_t reg_lock; unsigned long opened; struct snd_pcm_substream *substream; struct snd_dma_buffer dma_risc; unsigned int line_bytes; unsigned int lines; u32 reg_control; u32 interrupt_mask; int current_line; int pci_parity_errors; }; enum { DEVICE_DIGITAL, DEVICE_ANALOG }; static inline u32 snd_bt87x_readl(struct snd_bt87x *chip, u32 reg) { return readl(chip->mmio + reg); } static inline void snd_bt87x_writel(struct snd_bt87x *chip, u32 reg, u32 value) { writel(value, chip->mmio + reg); } static int snd_bt87x_create_risc(struct snd_bt87x *chip, struct snd_pcm_substream *substream, unsigned int periods, unsigned int period_bytes) { unsigned int i, offset; u32 *risc; if (chip->dma_risc.area == NULL) { if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), PAGE_ALIGN(MAX_RISC_SIZE), &chip->dma_risc) < 0) return -ENOMEM; } risc = (u32 *)chip->dma_risc.area; offset = 0; *risc++ = cpu_to_le32(RISC_SYNC | RISC_SYNC_FM1); *risc++ = cpu_to_le32(0); for (i = 0; i < periods; ++i) { u32 rest; rest = period_bytes; do { u32 cmd, len; unsigned int addr; len = PAGE_SIZE - (offset % PAGE_SIZE); if (len > rest) len = rest; cmd = RISC_WRITE | len; if (rest == period_bytes) { u32 block = i * 16 / periods; cmd |= RISC_SOL; cmd |= block << RISC_SET_STATUS_SHIFT; cmd |= (~block & 0xf) << RISC_RESET_STATUS_SHIFT; } if (len == rest) cmd |= RISC_EOL | RISC_IRQ; *risc++ = cpu_to_le32(cmd); addr = snd_pcm_sgbuf_get_addr(substream, offset); *risc++ = cpu_to_le32(addr); offset += len; rest -= len; } while (rest > 0); } *risc++ = cpu_to_le32(RISC_SYNC | RISC_SYNC_VRO); *risc++ = cpu_to_le32(0); *risc++ = cpu_to_le32(RISC_JUMP); *risc++ = cpu_to_le32(chip->dma_risc.addr); chip->line_bytes = period_bytes; chip->lines = periods; return 0; } static void snd_bt87x_free_risc(struct snd_bt87x *chip) { if (chip->dma_risc.area) { snd_dma_free_pages(&chip->dma_risc); chip->dma_risc.area = NULL; } } static void snd_bt87x_pci_error(struct snd_bt87x *chip, unsigned int status) { u16 pci_status; pci_read_config_word(chip->pci, PCI_STATUS, &pci_status); pci_status &= PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY; pci_write_config_word(chip->pci, PCI_STATUS, pci_status); if (pci_status != PCI_STATUS_DETECTED_PARITY) snd_printk(KERN_ERR "Aieee - PCI error! status %#08x, PCI status %#04x\n", status & ERROR_INTERRUPTS, pci_status); else { snd_printk(KERN_ERR "Aieee - PCI parity error detected!\n"); /* error 'handling' similar to aic7xxx_pci.c: */ chip->pci_parity_errors++; if (chip->pci_parity_errors > 20) { snd_printk(KERN_ERR "Too many PCI parity errors observed.\n"); snd_printk(KERN_ERR "Some device on this bus is generating bad parity.\n"); snd_printk(KERN_ERR "This is an error *observed by*, not *generated by*, this card.\n"); snd_printk(KERN_ERR "PCI parity error checking has been disabled.\n"); chip->interrupt_mask &= ~(INT_PPERR | INT_RIPERR); snd_bt87x_writel(chip, REG_INT_MASK, chip->interrupt_mask); } } } static irqreturn_t snd_bt87x_interrupt(int irq, void *dev_id) { struct snd_bt87x *chip = dev_id; unsigned int status, irq_status; status = snd_bt87x_readl(chip, REG_INT_STAT); irq_status = status & chip->interrupt_mask; if (!irq_status) return IRQ_NONE; snd_bt87x_writel(chip, REG_INT_STAT, irq_status); if (irq_status & ERROR_INTERRUPTS) { if (irq_status & (INT_FBUS | INT_FTRGT)) snd_printk(KERN_WARNING "FIFO overrun, status %#08x\n", status); if (irq_status & INT_OCERR) snd_printk(KERN_ERR "internal RISC error, status %#08x\n", status); if (irq_status & (INT_PPERR | INT_RIPERR | INT_PABORT)) snd_bt87x_pci_error(chip, irq_status); } if ((irq_status & INT_RISCI) && (chip->reg_control & CTL_ACAP_EN)) { int current_block, irq_block; /* assume that exactly one line has been recorded */ chip->current_line = (chip->current_line + 1) % chip->lines; /* but check if some interrupts have been skipped */ current_block = chip->current_line * 16 / chip->lines; irq_block = status >> INT_RISCS_SHIFT; if (current_block != irq_block) chip->current_line = (irq_block * chip->lines + 15) / 16; snd_pcm_period_elapsed(chip->substream); } return IRQ_HANDLED; } static struct snd_pcm_hardware snd_bt87x_digital_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = 0, /* set at runtime */ .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 255 * 4092, .period_bytes_min = 32, .period_bytes_max = 4092, .periods_min = 2, .periods_max = 255, }; static struct snd_pcm_hardware snd_bt87x_analog_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8, .rates = SNDRV_PCM_RATE_KNOT, .rate_min = ANALOG_CLOCK / CLOCK_DIV_MAX, .rate_max = ANALOG_CLOCK / CLOCK_DIV_MIN, .channels_min = 1, .channels_max = 1, .buffer_bytes_max = 255 * 4092, .period_bytes_min = 32, .period_bytes_max = 4092, .periods_min = 2, .periods_max = 255, }; static int snd_bt87x_set_digital_hw(struct snd_bt87x *chip, struct snd_pcm_runtime *runtime) { chip->reg_control |= CTL_DA_IOM_DA | CTL_A_PWRDN; runtime->hw = snd_bt87x_digital_hw; runtime->hw.rates = snd_pcm_rate_to_rate_bit(chip->board.dig_rate); runtime->hw.rate_min = chip->board.dig_rate; runtime->hw.rate_max = chip->board.dig_rate; return 0; } static int snd_bt87x_set_analog_hw(struct snd_bt87x *chip, struct snd_pcm_runtime *runtime) { static struct snd_ratnum analog_clock = { .num = ANALOG_CLOCK, .den_min = CLOCK_DIV_MIN, .den_max = CLOCK_DIV_MAX, .den_step = 1 }; static struct snd_pcm_hw_constraint_ratnums constraint_rates = { .nrats = 1, .rats = &analog_clock }; chip->reg_control &= ~(CTL_DA_IOM_DA | CTL_A_PWRDN); runtime->hw = snd_bt87x_analog_hw; return snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &constraint_rates); } static int snd_bt87x_pcm_open(struct snd_pcm_substream *substream) { struct snd_bt87x *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; if (test_and_set_bit(0, &chip->opened)) return -EBUSY; if (substream->pcm->device == DEVICE_DIGITAL) err = snd_bt87x_set_digital_hw(chip, runtime); else err = snd_bt87x_set_analog_hw(chip, runtime); if (err < 0) goto _error; err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) goto _error; chip->substream = substream; return 0; _error: clear_bit(0, &chip->opened); smp_mb__after_clear_bit(); return err; } static int snd_bt87x_close(struct snd_pcm_substream *substream) { struct snd_bt87x *chip = snd_pcm_substream_chip(substream); spin_lock_irq(&chip->reg_lock); chip->reg_control |= CTL_A_PWRDN; snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control); spin_unlock_irq(&chip->reg_lock); chip->substream = NULL; clear_bit(0, &chip->opened); smp_mb__after_clear_bit(); return 0; } static int snd_bt87x_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_bt87x *chip = snd_pcm_substream_chip(substream); int err; err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (err < 0) return err; return snd_bt87x_create_risc(chip, substream, params_periods(hw_params), params_period_bytes(hw_params)); } static int snd_bt87x_hw_free(struct snd_pcm_substream *substream) { struct snd_bt87x *chip = snd_pcm_substream_chip(substream); snd_bt87x_free_risc(chip); snd_pcm_lib_free_pages(substream); return 0; } static int snd_bt87x_prepare(struct snd_pcm_substream *substream) { struct snd_bt87x *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int decimation; spin_lock_irq(&chip->reg_lock); chip->reg_control &= ~(CTL_DA_SDR_MASK | CTL_DA_SBR); decimation = (ANALOG_CLOCK + runtime->rate / 4) / runtime->rate; chip->reg_control |= decimation << CTL_DA_SDR_SHIFT; if (runtime->format == SNDRV_PCM_FORMAT_S8) chip->reg_control |= CTL_DA_SBR; snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control); spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_bt87x_start(struct snd_bt87x *chip) { spin_lock(&chip->reg_lock); chip->current_line = 0; chip->reg_control |= CTL_FIFO_ENABLE | CTL_RISC_ENABLE | CTL_ACAP_EN; snd_bt87x_writel(chip, REG_RISC_STRT_ADD, chip->dma_risc.addr); snd_bt87x_writel(chip, REG_PACKET_LEN, chip->line_bytes | (chip->lines << 16)); snd_bt87x_writel(chip, REG_INT_MASK, chip->interrupt_mask); snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control); spin_unlock(&chip->reg_lock); return 0; } static int snd_bt87x_stop(struct snd_bt87x *chip) { spin_lock(&chip->reg_lock); chip->reg_control &= ~(CTL_FIFO_ENABLE | CTL_RISC_ENABLE | CTL_ACAP_EN); snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control); snd_bt87x_writel(chip, REG_INT_MASK, 0); snd_bt87x_writel(chip, REG_INT_STAT, MY_INTERRUPTS); spin_unlock(&chip->reg_lock); return 0; } static int snd_bt87x_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_bt87x *chip = snd_pcm_substream_chip(substream); switch (cmd) { case SNDRV_PCM_TRIGGER_START: return snd_bt87x_start(chip); case SNDRV_PCM_TRIGGER_STOP: return snd_bt87x_stop(chip); default: return -EINVAL; } } static snd_pcm_uframes_t snd_bt87x_pointer(struct snd_pcm_substream *substream) { struct snd_bt87x *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; return (snd_pcm_uframes_t)bytes_to_frames(runtime, chip->current_line * chip->line_bytes); } static struct snd_pcm_ops snd_bt87x_pcm_ops = { .open = snd_bt87x_pcm_open, .close = snd_bt87x_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_bt87x_hw_params, .hw_free = snd_bt87x_hw_free, .prepare = snd_bt87x_prepare, .trigger = snd_bt87x_trigger, .pointer = snd_bt87x_pointer, .page = snd_pcm_sgbuf_ops_page, }; static int snd_bt87x_capture_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 1; info->value.integer.min = 0; info->value.integer.max = 15; return 0; } static int snd_bt87x_capture_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_bt87x *chip = snd_kcontrol_chip(kcontrol); value->value.integer.value[0] = (chip->reg_control & CTL_A_GAIN_MASK) >> CTL_A_GAIN_SHIFT; return 0; } static int snd_bt87x_capture_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_bt87x *chip = snd_kcontrol_chip(kcontrol); u32 old_control; int changed; spin_lock_irq(&chip->reg_lock); old_control = chip->reg_control; chip->reg_control = (chip->reg_control & ~CTL_A_GAIN_MASK) | (value->value.integer.value[0] << CTL_A_GAIN_SHIFT); snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control); changed = old_control != chip->reg_control; spin_unlock_irq(&chip->reg_lock); return changed; } static struct snd_kcontrol_new snd_bt87x_capture_volume = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Volume", .info = snd_bt87x_capture_volume_info, .get = snd_bt87x_capture_volume_get, .put = snd_bt87x_capture_volume_put, }; #define snd_bt87x_capture_boost_info snd_ctl_boolean_mono_info static int snd_bt87x_capture_boost_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_bt87x *chip = snd_kcontrol_chip(kcontrol); value->value.integer.value[0] = !! (chip->reg_control & CTL_A_G2X); return 0; } static int snd_bt87x_capture_boost_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_bt87x *chip = snd_kcontrol_chip(kcontrol); u32 old_control; int changed; spin_lock_irq(&chip->reg_lock); old_control = chip->reg_control; chip->reg_control = (chip->reg_control & ~CTL_A_G2X) | (value->value.integer.value[0] ? CTL_A_G2X : 0); snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control); changed = chip->reg_control != old_control; spin_unlock_irq(&chip->reg_lock); return changed; } static struct snd_kcontrol_new snd_bt87x_capture_boost = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Boost", .info = snd_bt87x_capture_boost_info, .get = snd_bt87x_capture_boost_get, .put = snd_bt87x_capture_boost_put, }; static int snd_bt87x_capture_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { static const char *const texts[3] = {"TV Tuner", "FM", "Mic/Line"}; return snd_ctl_enum_info(info, 1, 3, texts); } static int snd_bt87x_capture_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_bt87x *chip = snd_kcontrol_chip(kcontrol); value->value.enumerated.item[0] = (chip->reg_control & CTL_A_SEL_MASK) >> CTL_A_SEL_SHIFT; return 0; } static int snd_bt87x_capture_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_bt87x *chip = snd_kcontrol_chip(kcontrol); u32 old_control; int changed; spin_lock_irq(&chip->reg_lock); old_control = chip->reg_control; chip->reg_control = (chip->reg_control & ~CTL_A_SEL_MASK) | (value->value.enumerated.item[0] << CTL_A_SEL_SHIFT); snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control); changed = chip->reg_control != old_control; spin_unlock_irq(&chip->reg_lock); return changed; } static struct snd_kcontrol_new snd_bt87x_capture_source = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Source", .info = snd_bt87x_capture_source_info, .get = snd_bt87x_capture_source_get, .put = snd_bt87x_capture_source_put, }; static int snd_bt87x_free(struct snd_bt87x *chip) { if (chip->mmio) snd_bt87x_stop(chip); if (chip->irq >= 0) free_irq(chip->irq, chip); if (chip->mmio) iounmap(chip->mmio); pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); return 0; } static int snd_bt87x_dev_free(struct snd_device *device) { struct snd_bt87x *chip = device->device_data; return snd_bt87x_free(chip); } static int __devinit snd_bt87x_pcm(struct snd_bt87x *chip, int device, char *name) { int err; struct snd_pcm *pcm; err = snd_pcm_new(chip->card, name, device, 0, 1, &pcm); if (err < 0) return err; pcm->private_data = chip; strcpy(pcm->name, name); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_bt87x_pcm_ops); return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(chip->pci), 128 * 1024, ALIGN(255 * 4092, 1024)); } static int __devinit snd_bt87x_create(struct snd_card *card, struct pci_dev *pci, struct snd_bt87x **rchip) { struct snd_bt87x *chip; int err; static struct snd_device_ops ops = { .dev_free = snd_bt87x_dev_free }; *rchip = NULL; err = pci_enable_device(pci); if (err < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) { pci_disable_device(pci); return -ENOMEM; } chip->card = card; chip->pci = pci; chip->irq = -1; spin_lock_init(&chip->reg_lock); if ((err = pci_request_regions(pci, "Bt87x audio")) < 0) { kfree(chip); pci_disable_device(pci); return err; } chip->mmio = pci_ioremap_bar(pci, 0); if (!chip->mmio) { snd_printk(KERN_ERR "cannot remap io memory\n"); err = -ENOMEM; goto fail; } chip->reg_control = CTL_A_PWRDN | CTL_DA_ES2 | CTL_PKTP_16 | (15 << CTL_DA_SDR_SHIFT); chip->interrupt_mask = MY_INTERRUPTS; snd_bt87x_writel(chip, REG_GPIO_DMA_CTL, chip->reg_control); snd_bt87x_writel(chip, REG_INT_MASK, 0); snd_bt87x_writel(chip, REG_INT_STAT, MY_INTERRUPTS); err = request_irq(pci->irq, snd_bt87x_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip); if (err < 0) { snd_printk(KERN_ERR "cannot grab irq %d\n", pci->irq); goto fail; } chip->irq = pci->irq; pci_set_master(pci); synchronize_irq(chip->irq); err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) goto fail; snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; fail: snd_bt87x_free(chip); return err; } #define BT_DEVICE(chip, subvend, subdev, id) \ { .vendor = PCI_VENDOR_ID_BROOKTREE, \ .device = chip, \ .subvendor = subvend, .subdevice = subdev, \ .driver_data = SND_BT87X_BOARD_ ## id } /* driver_data is the card id for that device */ static DEFINE_PCI_DEVICE_TABLE(snd_bt87x_ids) = { /* Hauppauge WinTV series */ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x0070, 0x13eb, GENERIC), /* Hauppauge WinTV series */ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_879, 0x0070, 0x13eb, GENERIC), /* Viewcast Osprey 200 */ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x0070, 0xff01, OSPREY2x0), /* Viewcast Osprey 440 (rate is configurable via gpio) */ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x0070, 0xff07, OSPREY440), /* ATI TV-Wonder */ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x1002, 0x0001, GENERIC), /* Leadtek Winfast tv 2000xp delux */ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x107d, 0x6606, GENERIC), /* Pinnacle PCTV */ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x11bd, 0x0012, GENERIC), /* Voodoo TV 200 */ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x121a, 0x3000, GENERIC), /* Askey Computer Corp. MagicTView'99 */ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x144f, 0x3000, GENERIC), /* AVerMedia Studio No. 103, 203, ...? */ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x1461, 0x0003, AVPHONE98), /* Prolink PixelView PV-M4900 */ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x1554, 0x4011, GENERIC), /* Pinnacle Studio PCTV rave */ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0xbd11, 0x1200, GENERIC), { } }; MODULE_DEVICE_TABLE(pci, snd_bt87x_ids); /* cards known not to have audio * (DVB cards use the audio function to transfer MPEG data) */ static struct { unsigned short subvendor, subdevice; } blacklist[] __devinitdata = { {0x0071, 0x0101}, /* Nebula Electronics DigiTV */ {0x11bd, 0x001c}, /* Pinnacle PCTV Sat */ {0x11bd, 0x0026}, /* Pinnacle PCTV SAT CI */ {0x1461, 0x0761}, /* AVermedia AverTV DVB-T */ {0x1461, 0x0771}, /* AVermedia DVB-T 771 */ {0x1822, 0x0001}, /* Twinhan VisionPlus DVB-T */ {0x18ac, 0xd500}, /* DVICO FusionHDTV 5 Lite */ {0x18ac, 0xdb10}, /* DVICO FusionHDTV DVB-T Lite */ {0x18ac, 0xdb11}, /* Ultraview DVB-T Lite */ {0x270f, 0xfc00}, /* Chaintech Digitop DST-1000 DVB-S */ {0x7063, 0x2000}, /* pcHDTV HD-2000 TV */ }; static struct pci_driver driver; /* return the id of the card, or a negative value if it's blacklisted */ static int __devinit snd_bt87x_detect_card(struct pci_dev *pci) { int i; const struct pci_device_id *supported; supported = pci_match_id(snd_bt87x_ids, pci); if (supported && supported->driver_data > 0) return supported->driver_data; for (i = 0; i < ARRAY_SIZE(blacklist); ++i) if (blacklist[i].subvendor == pci->subsystem_vendor && blacklist[i].subdevice == pci->subsystem_device) { snd_printdd(KERN_INFO "card %#04x-%#04x:%#04x has no audio\n", pci->device, pci->subsystem_vendor, pci->subsystem_device); return -EBUSY; } snd_printk(KERN_INFO "unknown card %#04x-%#04x:%#04x\n", pci->device, pci->subsystem_vendor, pci->subsystem_device); snd_printk(KERN_DEBUG "please mail id, board name, and, " "if it works, the correct digital_rate option to " "<alsa-devel@alsa-project.org>\n"); return SND_BT87X_BOARD_UNKNOWN; } static int __devinit snd_bt87x_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_bt87x *chip; int err; enum snd_bt87x_boardid boardid; if (!pci_id->driver_data) { err = snd_bt87x_detect_card(pci); if (err < 0) return -ENODEV; boardid = err; } else boardid = pci_id->driver_data; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { ++dev; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; err = snd_bt87x_create(card, pci, &chip); if (err < 0) goto _error; memcpy(&chip->board, &snd_bt87x_boards[boardid], sizeof(chip->board)); if (!chip->board.no_digital) { if (digital_rate[dev] > 0) chip->board.dig_rate = digital_rate[dev]; chip->reg_control |= chip->board.digital_fmt; err = snd_bt87x_pcm(chip, DEVICE_DIGITAL, "Bt87x Digital"); if (err < 0) goto _error; } if (!chip->board.no_analog) { err = snd_bt87x_pcm(chip, DEVICE_ANALOG, "Bt87x Analog"); if (err < 0) goto _error; err = snd_ctl_add(card, snd_ctl_new1( &snd_bt87x_capture_volume, chip)); if (err < 0) goto _error; err = snd_ctl_add(card, snd_ctl_new1( &snd_bt87x_capture_boost, chip)); if (err < 0) goto _error; err = snd_ctl_add(card, snd_ctl_new1( &snd_bt87x_capture_source, chip)); if (err < 0) goto _error; } snd_printk(KERN_INFO "bt87x%d: Using board %d, %sanalog, %sdigital " "(rate %d Hz)\n", dev, boardid, chip->board.no_analog ? "no " : "", chip->board.no_digital ? "no " : "", chip->board.dig_rate); strcpy(card->driver, "Bt87x"); sprintf(card->shortname, "Brooktree Bt%x", pci->device); sprintf(card->longname, "%s at %#llx, irq %i", card->shortname, (unsigned long long)pci_resource_start(pci, 0), chip->irq); strcpy(card->mixername, "Bt87x"); err = snd_card_register(card); if (err < 0) goto _error; pci_set_drvdata(pci, card); ++dev; return 0; _error: snd_card_free(card); return err; } static void __devexit snd_bt87x_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } /* default entries for all Bt87x cards - it's not exported */ /* driver_data is set to 0 to call detection */ static DEFINE_PCI_DEVICE_TABLE(snd_bt87x_default_ids) = { BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, PCI_ANY_ID, PCI_ANY_ID, UNKNOWN), BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_879, PCI_ANY_ID, PCI_ANY_ID, UNKNOWN), { } }; static struct pci_driver driver = { .name = KBUILD_MODNAME, .id_table = snd_bt87x_ids, .probe = snd_bt87x_probe, .remove = __devexit_p(snd_bt87x_remove), }; static int __init alsa_card_bt87x_init(void) { if (load_all) driver.id_table = snd_bt87x_default_ids; return pci_register_driver(&driver); } static void __exit alsa_card_bt87x_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_bt87x_init) module_exit(alsa_card_bt87x_exit)
gpl-2.0
CyanogenMod/android_kernel_lge_ls970
drivers/mtd/maps/h720x-flash.c
5145
2747
/* * Flash memory access on Hynix GMS30C7201/HMS30C7202 based * evaluation boards * * (C) 2002 Jungjun Kim <jungjun.kim@hynix.com> * 2003 Thomas Gleixner <tglx@linutronix.de> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <mach/hardware.h> #include <asm/io.h> static struct mtd_info *mymtd; static struct map_info h720x_map = { .name = "H720X", .bankwidth = 4, .size = H720X_FLASH_SIZE, .phys = H720X_FLASH_PHYS, }; static struct mtd_partition h720x_partitions[] = { { .name = "ArMon", .size = 0x00080000, .offset = 0, .mask_flags = MTD_WRITEABLE },{ .name = "Env", .size = 0x00040000, .offset = 0x00080000, .mask_flags = MTD_WRITEABLE },{ .name = "Kernel", .size = 0x00180000, .offset = 0x000c0000, .mask_flags = MTD_WRITEABLE },{ .name = "Ramdisk", .size = 0x00400000, .offset = 0x00240000, .mask_flags = MTD_WRITEABLE },{ .name = "jffs2", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND } }; #define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions) /* * Initialize FLASH support */ static int __init h720x_mtd_init(void) { h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size); if (!h720x_map.virt) { printk(KERN_ERR "H720x-MTD: ioremap failed\n"); return -EIO; } simple_map_init(&h720x_map); // Probe for flash bankwidth 4 printk (KERN_INFO "H720x-MTD probing 32bit FLASH\n"); mymtd = do_map_probe("cfi_probe", &h720x_map); if (!mymtd) { printk (KERN_INFO "H720x-MTD probing 16bit FLASH\n"); // Probe for bankwidth 2 h720x_map.bankwidth = 2; mymtd = do_map_probe("cfi_probe", &h720x_map); } if (mymtd) { mymtd->owner = THIS_MODULE; mtd_device_parse_register(mymtd, NULL, NULL, h720x_partitions, NUM_PARTITIONS); return 0; } iounmap((void *)h720x_map.virt); return -ENXIO; } /* * Cleanup */ static void __exit h720x_mtd_cleanup(void) { if (mymtd) { mtd_device_unregister(mymtd); map_destroy(mymtd); } if (h720x_map.virt) { iounmap((void *)h720x_map.virt); h720x_map.virt = 0; } } module_init(h720x_mtd_init); module_exit(h720x_mtd_cleanup); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); MODULE_DESCRIPTION("MTD map driver for Hynix evaluation boards");
gpl-2.0
cholokei/Samsung_Kernel_SHV-E120K
fs/ocfs2/export.c
7961
6404
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * export.c * * Functions to facilitate NFS exporting * * Copyright (C) 2002, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/types.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "dir.h" #include "dlmglue.h" #include "dcache.h" #include "export.h" #include "inode.h" #include "buffer_head_io.h" #include "suballoc.h" #include "ocfs2_trace.h" struct ocfs2_inode_handle { u64 ih_blkno; u32 ih_generation; }; static struct dentry *ocfs2_get_dentry(struct super_block *sb, struct ocfs2_inode_handle *handle) { struct inode *inode; struct ocfs2_super *osb = OCFS2_SB(sb); u64 blkno = handle->ih_blkno; int status, set; struct dentry *result; trace_ocfs2_get_dentry_begin(sb, handle, (unsigned long long)blkno); if (blkno == 0) { result = ERR_PTR(-ESTALE); goto bail; } inode = ocfs2_ilookup(sb, blkno); /* * If the inode exists in memory, we only need to check it's * generation number */ if (inode) goto check_gen; /* * This will synchronize us against ocfs2_delete_inode() on * all nodes */ status = ocfs2_nfs_sync_lock(osb, 1); if (status < 0) { mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status); goto check_err; } status = ocfs2_test_inode_bit(osb, blkno, &set); trace_ocfs2_get_dentry_test_bit(status, set); if (status < 0) { if (status == -EINVAL) { /* * The blkno NFS gave us doesn't even show up * as an inode, we return -ESTALE to be * nice */ status = -ESTALE; } else mlog(ML_ERROR, "test inode bit failed %d\n", status); goto unlock_nfs_sync; } /* If the inode allocator bit is clear, this inode must be stale */ if (!set) { status = -ESTALE; goto unlock_nfs_sync; } inode = ocfs2_iget(osb, blkno, 0, 0); unlock_nfs_sync: ocfs2_nfs_sync_unlock(osb, 1); check_err: if (status < 0) { if (status == -ESTALE) { trace_ocfs2_get_dentry_stale((unsigned long long)blkno, handle->ih_generation); } result = ERR_PTR(status); goto bail; } if (IS_ERR(inode)) { mlog_errno(PTR_ERR(inode)); result = (void *)inode; goto bail; } check_gen: if (handle->ih_generation != inode->i_generation) { iput(inode); trace_ocfs2_get_dentry_generation((unsigned long long)blkno, handle->ih_generation, inode->i_generation); result = ERR_PTR(-ESTALE); goto bail; } result = d_obtain_alias(inode); if (IS_ERR(result)) mlog_errno(PTR_ERR(result)); bail: trace_ocfs2_get_dentry_end(result); return result; } static struct dentry *ocfs2_get_parent(struct dentry *child) { int status; u64 blkno; struct dentry *parent; struct inode *dir = child->d_inode; trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno); status = ocfs2_inode_lock(dir, NULL, 0); if (status < 0) { if (status != -ENOENT) mlog_errno(status); parent = ERR_PTR(status); goto bail; } status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno); if (status < 0) { parent = ERR_PTR(-ENOENT); goto bail_unlock; } parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0)); bail_unlock: ocfs2_inode_unlock(dir, 0); bail: trace_ocfs2_get_parent_end(parent); return parent; } static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, int connectable) { struct inode *inode = dentry->d_inode; int len = *max_len; int type = 1; u64 blkno; u32 generation; __le32 *fh = (__force __le32 *) fh_in; trace_ocfs2_encode_fh_begin(dentry, dentry->d_name.len, dentry->d_name.name, fh, len, connectable); if (connectable && (len < 6)) { *max_len = 6; type = 255; goto bail; } else if (len < 3) { *max_len = 3; type = 255; goto bail; } blkno = OCFS2_I(inode)->ip_blkno; generation = inode->i_generation; trace_ocfs2_encode_fh_self((unsigned long long)blkno, generation); len = 3; fh[0] = cpu_to_le32((u32)(blkno >> 32)); fh[1] = cpu_to_le32((u32)(blkno & 0xffffffff)); fh[2] = cpu_to_le32(generation); if (connectable && !S_ISDIR(inode->i_mode)) { struct inode *parent; spin_lock(&dentry->d_lock); parent = dentry->d_parent->d_inode; blkno = OCFS2_I(parent)->ip_blkno; generation = parent->i_generation; fh[3] = cpu_to_le32((u32)(blkno >> 32)); fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff)); fh[5] = cpu_to_le32(generation); spin_unlock(&dentry->d_lock); len = 6; type = 2; trace_ocfs2_encode_fh_parent((unsigned long long)blkno, generation); } *max_len = len; bail: trace_ocfs2_encode_fh_type(type); return type; } static struct dentry *ocfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct ocfs2_inode_handle handle; if (fh_len < 3 || fh_type > 2) return NULL; handle.ih_blkno = (u64)le32_to_cpu(fid->raw[0]) << 32; handle.ih_blkno |= (u64)le32_to_cpu(fid->raw[1]); handle.ih_generation = le32_to_cpu(fid->raw[2]); return ocfs2_get_dentry(sb, &handle); } static struct dentry *ocfs2_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct ocfs2_inode_handle parent; if (fh_type != 2 || fh_len < 6) return NULL; parent.ih_blkno = (u64)le32_to_cpu(fid->raw[3]) << 32; parent.ih_blkno |= (u64)le32_to_cpu(fid->raw[4]); parent.ih_generation = le32_to_cpu(fid->raw[5]); return ocfs2_get_dentry(sb, &parent); } const struct export_operations ocfs2_export_ops = { .encode_fh = ocfs2_encode_fh, .fh_to_dentry = ocfs2_fh_to_dentry, .fh_to_parent = ocfs2_fh_to_parent, .get_parent = ocfs2_get_parent, };
gpl-2.0
JackpotClavin/android_kernel_lge_g2
net/dccp/ccids/lib/loss_interval.c
8217
5650
/* * Copyright (c) 2007 The University of Aberdeen, Scotland, UK * Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand. * Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz> * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <net/sock.h> #include "tfrc.h" static struct kmem_cache *tfrc_lh_slab __read_mostly; /* Loss Interval weights from [RFC 3448, 5.4], scaled by 10 */ static const int tfrc_lh_weights[NINTERVAL] = { 10, 10, 10, 10, 8, 6, 4, 2 }; /* implements LIFO semantics on the array */ static inline u8 LIH_INDEX(const u8 ctr) { return LIH_SIZE - 1 - (ctr % LIH_SIZE); } /* the `counter' index always points at the next entry to be populated */ static inline struct tfrc_loss_interval *tfrc_lh_peek(struct tfrc_loss_hist *lh) { return lh->counter ? lh->ring[LIH_INDEX(lh->counter - 1)] : NULL; } /* given i with 0 <= i <= k, return I_i as per the rfc3448bis notation */ static inline u32 tfrc_lh_get_interval(struct tfrc_loss_hist *lh, const u8 i) { BUG_ON(i >= lh->counter); return lh->ring[LIH_INDEX(lh->counter - i - 1)]->li_length; } /* * On-demand allocation and de-allocation of entries */ static struct tfrc_loss_interval *tfrc_lh_demand_next(struct tfrc_loss_hist *lh) { if (lh->ring[LIH_INDEX(lh->counter)] == NULL) lh->ring[LIH_INDEX(lh->counter)] = kmem_cache_alloc(tfrc_lh_slab, GFP_ATOMIC); return lh->ring[LIH_INDEX(lh->counter)]; } void tfrc_lh_cleanup(struct tfrc_loss_hist *lh) { if (!tfrc_lh_is_initialised(lh)) return; for (lh->counter = 0; lh->counter < LIH_SIZE; lh->counter++) if (lh->ring[LIH_INDEX(lh->counter)] != NULL) { kmem_cache_free(tfrc_lh_slab, lh->ring[LIH_INDEX(lh->counter)]); lh->ring[LIH_INDEX(lh->counter)] = NULL; } } static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh) { u32 i_i, i_tot0 = 0, i_tot1 = 0, w_tot = 0; int i, k = tfrc_lh_length(lh) - 1; /* k is as in rfc3448bis, 5.4 */ if (k <= 0) return; for (i = 0; i <= k; i++) { i_i = tfrc_lh_get_interval(lh, i); if (i < k) { i_tot0 += i_i * tfrc_lh_weights[i]; w_tot += tfrc_lh_weights[i]; } if (i > 0) i_tot1 += i_i * tfrc_lh_weights[i-1]; } lh->i_mean = max(i_tot0, i_tot1) / w_tot; } /** * tfrc_lh_update_i_mean - Update the `open' loss interval I_0 * For recomputing p: returns `true' if p > p_prev <=> 1/p < 1/p_prev */ u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb) { struct tfrc_loss_interval *cur = tfrc_lh_peek(lh); u32 old_i_mean = lh->i_mean; s64 len; if (cur == NULL) /* not initialised */ return 0; len = dccp_delta_seqno(cur->li_seqno, DCCP_SKB_CB(skb)->dccpd_seq) + 1; if (len - (s64)cur->li_length <= 0) /* duplicate or reordered */ return 0; if (SUB16(dccp_hdr(skb)->dccph_ccval, cur->li_ccval) > 4) /* * Implements RFC 4342, 10.2: * If a packet S (skb) exists whose seqno comes `after' the one * starting the current loss interval (cur) and if the modulo-16 * distance from C(cur) to C(S) is greater than 4, consider all * subsequent packets as belonging to a new loss interval. This * test is necessary since CCVal may wrap between intervals. */ cur->li_is_closed = 1; if (tfrc_lh_length(lh) == 1) /* due to RFC 3448, 6.3.1 */ return 0; cur->li_length = len; tfrc_lh_calc_i_mean(lh); return lh->i_mean < old_i_mean; } /* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */ static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur, struct tfrc_rx_hist_entry *new_loss) { return dccp_delta_seqno(cur->li_seqno, new_loss->tfrchrx_seqno) > 0 && (cur->li_is_closed || SUB16(new_loss->tfrchrx_ccval, cur->li_ccval) > 4); } /** * tfrc_lh_interval_add - Insert new record into the Loss Interval database * @lh: Loss Interval database * @rh: Receive history containing a fresh loss event * @calc_first_li: Caller-dependent routine to compute length of first interval * @sk: Used by @calc_first_li in caller-specific way (subtyping) * Updates I_mean and returns 1 if a new interval has in fact been added to @lh. */ int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh, u32 (*calc_first_li)(struct sock *), struct sock *sk) { struct tfrc_loss_interval *cur = tfrc_lh_peek(lh), *new; if (cur != NULL && !tfrc_lh_is_new_loss(cur, tfrc_rx_hist_loss_prev(rh))) return 0; new = tfrc_lh_demand_next(lh); if (unlikely(new == NULL)) { DCCP_CRIT("Cannot allocate/add loss record."); return 0; } new->li_seqno = tfrc_rx_hist_loss_prev(rh)->tfrchrx_seqno; new->li_ccval = tfrc_rx_hist_loss_prev(rh)->tfrchrx_ccval; new->li_is_closed = 0; if (++lh->counter == 1) lh->i_mean = new->li_length = (*calc_first_li)(sk); else { cur->li_length = dccp_delta_seqno(cur->li_seqno, new->li_seqno); new->li_length = dccp_delta_seqno(new->li_seqno, tfrc_rx_hist_last_rcv(rh)->tfrchrx_seqno) + 1; if (lh->counter > (2*LIH_SIZE)) lh->counter -= LIH_SIZE; tfrc_lh_calc_i_mean(lh); } return 1; } int __init tfrc_li_init(void) { tfrc_lh_slab = kmem_cache_create("tfrc_li_hist", sizeof(struct tfrc_loss_interval), 0, SLAB_HWCACHE_ALIGN, NULL); return tfrc_lh_slab == NULL ? -ENOBUFS : 0; } void tfrc_li_exit(void) { if (tfrc_lh_slab != NULL) { kmem_cache_destroy(tfrc_lh_slab); tfrc_lh_slab = NULL; } }
gpl-2.0
goodhanrry/N915S_goodHanrry_kernel
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
8473
9375
/* * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc. * * Licensed under the terms of the GNU GPL License version 2. */ #if defined(__i386__) || defined(__x86_64__) #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <cpufreq.h> #include "helpers/helpers.h" #include "idle_monitor/cpupower-monitor.h" #define MSR_APERF 0xE8 #define MSR_MPERF 0xE7 #define MSR_TSC 0x10 #define MSR_AMD_HWCR 0xc0010015 enum mperf_id { C0 = 0, Cx, AVG_FREQ, MPERF_CSTATE_COUNT }; static int mperf_get_count_percent(unsigned int self_id, double *percent, unsigned int cpu); static int mperf_get_count_freq(unsigned int id, unsigned long long *count, unsigned int cpu); static struct timespec time_start, time_end; static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = { { .name = "C0", .desc = N_("Processor Core not idle"), .id = C0, .range = RANGE_THREAD, .get_count_percent = mperf_get_count_percent, }, { .name = "Cx", .desc = N_("Processor Core in an idle state"), .id = Cx, .range = RANGE_THREAD, .get_count_percent = mperf_get_count_percent, }, { .name = "Freq", .desc = N_("Average Frequency (including boost) in MHz"), .id = AVG_FREQ, .range = RANGE_THREAD, .get_count = mperf_get_count_freq, }, }; enum MAX_FREQ_MODE { MAX_FREQ_SYSFS, MAX_FREQ_TSC_REF }; static int max_freq_mode; /* * The max frequency mperf is ticking at (in C0), either retrieved via: * 1) calculated after measurements if we know TSC ticks at mperf/P0 frequency * 2) cpufreq /sys/devices/.../cpu0/cpufreq/cpuinfo_max_freq at init time * 1. Is preferred as it also works without cpufreq subsystem (e.g. on Xen) */ static unsigned long max_frequency; static unsigned long long tsc_at_measure_start; static unsigned long long tsc_at_measure_end; static unsigned long long *mperf_previous_count; static unsigned long long *aperf_previous_count; static unsigned long long *mperf_current_count; static unsigned long long *aperf_current_count; /* valid flag for all CPUs. If a MSR read failed it will be zero */ static int *is_valid; static int mperf_get_tsc(unsigned long long *tsc) { int ret; ret = read_msr(0, MSR_TSC, tsc); if (ret) dprint("Reading TSC MSR failed, returning %llu\n", *tsc); return ret; } static int mperf_init_stats(unsigned int cpu) { unsigned long long val; int ret; ret = read_msr(cpu, MSR_APERF, &val); aperf_previous_count[cpu] = val; ret |= read_msr(cpu, MSR_MPERF, &val); mperf_previous_count[cpu] = val; is_valid[cpu] = !ret; return 0; } static int mperf_measure_stats(unsigned int cpu) { unsigned long long val; int ret; ret = read_msr(cpu, MSR_APERF, &val); aperf_current_count[cpu] = val; ret |= read_msr(cpu, MSR_MPERF, &val); mperf_current_count[cpu] = val; is_valid[cpu] = !ret; return 0; } static int mperf_get_count_percent(unsigned int id, double *percent, unsigned int cpu) { unsigned long long aperf_diff, mperf_diff, tsc_diff; unsigned long long timediff; if (!is_valid[cpu]) return -1; if (id != C0 && id != Cx) return -1; mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; if (max_freq_mode == MAX_FREQ_TSC_REF) { tsc_diff = tsc_at_measure_end - tsc_at_measure_start; *percent = 100.0 * mperf_diff / tsc_diff; dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n", mperf_cstates[id].name, mperf_diff, tsc_diff); } else if (max_freq_mode == MAX_FREQ_SYSFS) { timediff = timespec_diff_us(time_start, time_end); *percent = 100.0 * mperf_diff / timediff; dprint("%s: MAXFREQ - mperf_diff: %llu, time_diff: %llu\n", mperf_cstates[id].name, mperf_diff, timediff); } else return -1; if (id == Cx) *percent = 100.0 - *percent; dprint("%s: previous: %llu - current: %llu - (%u)\n", mperf_cstates[id].name, mperf_diff, aperf_diff, cpu); dprint("%s: %f\n", mperf_cstates[id].name, *percent); return 0; } static int mperf_get_count_freq(unsigned int id, unsigned long long *count, unsigned int cpu) { unsigned long long aperf_diff, mperf_diff, time_diff, tsc_diff; if (id != AVG_FREQ) return 1; if (!is_valid[cpu]) return -1; mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; if (max_freq_mode == MAX_FREQ_TSC_REF) { /* Calculate max_freq from TSC count */ tsc_diff = tsc_at_measure_end - tsc_at_measure_start; time_diff = timespec_diff_us(time_start, time_end); max_frequency = tsc_diff / time_diff; } *count = max_frequency * ((double)aperf_diff / mperf_diff); dprint("%s: Average freq based on %s maximum frequency:\n", mperf_cstates[id].name, (max_freq_mode == MAX_FREQ_TSC_REF) ? "TSC calculated" : "sysfs read"); dprint("%max_frequency: %lu", max_frequency); dprint("aperf_diff: %llu\n", aperf_diff); dprint("mperf_diff: %llu\n", mperf_diff); dprint("avg freq: %llu\n", *count); return 0; } static int mperf_start(void) { int cpu; unsigned long long dbg; clock_gettime(CLOCK_REALTIME, &time_start); mperf_get_tsc(&tsc_at_measure_start); for (cpu = 0; cpu < cpu_count; cpu++) mperf_init_stats(cpu); mperf_get_tsc(&dbg); dprint("TSC diff: %llu\n", dbg - tsc_at_measure_start); return 0; } static int mperf_stop(void) { unsigned long long dbg; int cpu; for (cpu = 0; cpu < cpu_count; cpu++) mperf_measure_stats(cpu); mperf_get_tsc(&tsc_at_measure_end); clock_gettime(CLOCK_REALTIME, &time_end); mperf_get_tsc(&dbg); dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end); return 0; } /* * Mperf register is defined to tick at P0 (maximum) frequency * * Instead of reading out P0 which can be tricky to read out from HW, * we use TSC counter if it reliably ticks at P0/mperf frequency. * * Still try to fall back to: * /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq * on older Intel HW without invariant TSC feature. * Or on AMD machines where TSC does not tick at P0 (do not exist yet, but * it's still double checked (MSR_AMD_HWCR)). * * On these machines the user would still get useful mperf * stats when acpi-cpufreq driver is loaded. */ static int init_maxfreq_mode(void) { int ret; unsigned long long hwcr; unsigned long min; if (!cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC) goto use_sysfs; if (cpupower_cpu_info.vendor == X86_VENDOR_AMD) { /* MSR_AMD_HWCR tells us whether TSC runs at P0/mperf * freq. * A test whether hwcr is accessable/available would be: * (cpupower_cpu_info.family > 0x10 || * cpupower_cpu_info.family == 0x10 && * cpupower_cpu_info.model >= 0x2)) * This should be the case for all aperf/mperf * capable AMD machines and is therefore safe to test here. * Compare with Linus kernel git commit: acf01734b1747b1ec4 */ ret = read_msr(0, MSR_AMD_HWCR, &hwcr); /* * If the MSR read failed, assume a Xen system that did * not explicitly provide access to it and assume TSC works */ if (ret != 0) { dprint("TSC read 0x%x failed - assume TSC working\n", MSR_AMD_HWCR); return 0; } else if (1 & (hwcr >> 24)) { max_freq_mode = MAX_FREQ_TSC_REF; return 0; } else { /* Use sysfs max frequency if available */ } } else if (cpupower_cpu_info.vendor == X86_VENDOR_INTEL) { /* * On Intel we assume mperf (in C0) is ticking at same * rate than TSC */ max_freq_mode = MAX_FREQ_TSC_REF; return 0; } use_sysfs: if (cpufreq_get_hardware_limits(0, &min, &max_frequency)) { dprint("Cannot retrieve max freq from cpufreq kernel " "subsystem\n"); return -1; } max_freq_mode = MAX_FREQ_SYSFS; return 0; } /* * This monitor provides: * * 1) Average frequency a CPU resided in * This always works if the CPU has aperf/mperf capabilities * * 2) C0 and Cx (any sleep state) time a CPU resided in * Works if mperf timer stops ticking in sleep states which * seem to be the case on all current HW. * Both is directly retrieved from HW registers and is independent * from kernel statistics. */ struct cpuidle_monitor mperf_monitor; struct cpuidle_monitor *mperf_register(void) { if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF)) return NULL; if (init_maxfreq_mode()) return NULL; /* Free this at program termination */ is_valid = calloc(cpu_count, sizeof(int)); mperf_previous_count = calloc(cpu_count, sizeof(unsigned long long)); aperf_previous_count = calloc(cpu_count, sizeof(unsigned long long)); mperf_current_count = calloc(cpu_count, sizeof(unsigned long long)); aperf_current_count = calloc(cpu_count, sizeof(unsigned long long)); mperf_monitor.name_len = strlen(mperf_monitor.name); return &mperf_monitor; } void mperf_unregister(void) { free(mperf_previous_count); free(aperf_previous_count); free(mperf_current_count); free(aperf_current_count); free(is_valid); } struct cpuidle_monitor mperf_monitor = { .name = "Mperf", .hw_states_num = MPERF_CSTATE_COUNT, .hw_states = mperf_cstates, .start = mperf_start, .stop = mperf_stop, .do_register = mperf_register, .unregister = mperf_unregister, .needs_root = 1, .overflow_s = 922000000 /* 922337203 seconds TSC overflow at 20GHz */ }; #endif /* #if defined(__i386__) || defined(__x86_64__) */
gpl-2.0
ftCommunity/ft-TXT
board/knobloch/TXT/board-support/u-boot-2013.10-ti2013.12.01/arch/arm/cpu/armv7/at91/cpu.c
26
1559
/* * (C) Copyright 2010 * Reinhard Meyer, reinhard.meyer@emk-elektronik.de * (C) Copyright 2009 * Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> * (C) Copyright 2013 * Bo Shen <voice.shen@atmel.com> * * SPDX-License-Identifier: GPL-2.0+ */ #include <common.h> #include <asm/io.h> #include <asm/arch/hardware.h> #include <asm/arch/at91_dbu.h> #include <asm/arch/at91_pmc.h> #include <asm/arch/at91_pit.h> #include <asm/arch/at91_gpbr.h> #include <asm/arch/clk.h> #ifndef CONFIG_SYS_AT91_MAIN_CLOCK #define CONFIG_SYS_AT91_MAIN_CLOCK 0 #endif int arch_cpu_init(void) { return at91_clock_init(CONFIG_SYS_AT91_MAIN_CLOCK); } void arch_preboot_os(void) { ulong cpiv; at91_pit_t *pit = (at91_pit_t *)ATMEL_BASE_PIT; cpiv = AT91_PIT_MR_PIV_MASK(readl(&pit->piir)); /* * Disable PITC * Add 0x1000 to current counter to stop it faster * without waiting for wrapping back to 0 */ writel(cpiv + 0x1000, &pit->mr); } #if defined(CONFIG_DISPLAY_CPUINFO) int print_cpuinfo(void) { char buf[32]; printf("CPU: %s\n", get_cpu_name()); printf("Crystal frequency: %8s MHz\n", strmhz(buf, get_main_clk_rate())); printf("CPU clock : %8s MHz\n", strmhz(buf, get_cpu_clk_rate())); printf("Master clock : %8s MHz\n", strmhz(buf, get_mck_clk_rate())); return 0; } #endif void enable_caches(void) { } unsigned int get_chip_id(void) { return readl(ATMEL_BASE_DBGU + AT91_DBU_CIDR) & ~AT91_DBU_CIDR_MASK; } unsigned int get_extension_chip_id(void) { return readl(ATMEL_BASE_DBGU + AT91_DBU_EXID); }
gpl-2.0
shakalaca/ASUS_ZenFone_A400CG
external/valgrind/main/memcheck/tests/varinfo6.c
26
214647
/* Test the variable identification machinery in a non-toy sized program. Also, the croak() call in BZ2_decompress causes Valgrind to try to describe a local variable (i) that has at least a dozen independent live ranges (hence, is really that many independent variables). Hence it tests the machinery's ability to correctly handle a variable which has multiple live ranges and hence multiple non-overlapping areas in which it actually exists. */ /* Relevant compile flags are: -Wall -g -I$prefix/include/valgrind eg -Wall -g -I`pwd`/Inst/include/valgrind */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "memcheck/memcheck.h" /* Cause memcheck to complain about the address "a" and so to print its best guess as to what "a" actually is. a must be addressible. */ void croak ( void* aV ) { char* a = (char*)aV; char* undefp = malloc(1); char saved = *a; assert(undefp); *a = *undefp; (void) VALGRIND_CHECK_MEM_IS_DEFINED(a, 1); *a = saved; free(undefp); } // This benchmark is basically bzip2 (mashed to be a single file) // compressing and decompressing some data. It tests Valgrind's handling of // realistic and "difficult" (ie. lots of branches and memory accesses) // integer code. Execution is spread out over quite a few basic blocks; // --profile-flags indicates that to get to the top 90%th percentile of // dynamic BB counts requires considering the top 51 basic blocks // This program can be used both as part of the performance test // suite, in which case we want it to run for quite a while, // and as part of the regression (correctness) test suite, in // which case we want it to run quickly and be verbose. // So it does the latter iff given a command line arg. // Licensing: the code within is mostly taken from bzip2, which has a BSD // license. There is a little code from VEX, which is licensed under GPLv2 // And it's all written by Julian Seward. #define BZ_NO_STDIO /*-------------------------------------------------------------*/ /*--- Private header file for the library. ---*/ /*--- bzlib_private.h ---*/ /*-------------------------------------------------------------*/ /*-- This file is a part of bzip2 and/or libbzip2, a program and library for lossless, block-sorting data compression. Copyright (C) 1996-2004 Julian R Seward. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Julian Seward, Cambridge, UK. jseward@bzip.org bzip2/libbzip2 version 1.0 of 21 March 2000 This program is based on (at least) the work of: Mike Burrows David Wheeler Peter Fenwick Alistair Moffat Radford Neal Ian H. Witten Robert Sedgewick Jon L. Bentley For more information on these sources, see the manual. --*/ #ifndef _BZLIB_PRIVATE_H #define _BZLIB_PRIVATE_H #include <stdlib.h> #ifndef BZ_NO_STDIO #include <stdio.h> #include <ctype.h> #include <string.h> #endif /*-------------------------------------------------------------*/ /*--- Public header file for the library. ---*/ /*--- bzlib.h ---*/ /*-------------------------------------------------------------*/ /*-- This file is a part of bzip2 and/or libbzip2, a program and library for lossless, block-sorting data compression. Copyright (C) 1996-2004 Julian R Seward. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Julian Seward, Cambridge, UK. jseward@bzip.org bzip2/libbzip2 version 1.0 of 21 March 2000 This program is based on (at least) the work of: Mike Burrows David Wheeler Peter Fenwick Alistair Moffat Radford Neal Ian H. Witten Robert Sedgewick Jon L. Bentley For more information on these sources, see the manual. --*/ #ifndef _BZLIB_H #define _BZLIB_H #ifdef __cplusplus extern "C" { #endif #define BZ_RUN 0 #define BZ_FLUSH 1 #define BZ_FINISH 2 #define BZ_OK 0 #define BZ_RUN_OK 1 #define BZ_FLUSH_OK 2 #define BZ_FINISH_OK 3 #define BZ_STREAM_END 4 #define BZ_SEQUENCE_ERROR (-1) #define BZ_PARAM_ERROR (-2) #define BZ_MEM_ERROR (-3) #define BZ_DATA_ERROR (-4) #define BZ_DATA_ERROR_MAGIC (-5) #define BZ_IO_ERROR (-6) #define BZ_UNEXPECTED_EOF (-7) #define BZ_OUTBUFF_FULL (-8) #define BZ_CONFIG_ERROR (-9) typedef struct { char *next_in; unsigned int avail_in; unsigned int total_in_lo32; unsigned int total_in_hi32; char *next_out; unsigned int avail_out; unsigned int total_out_lo32; unsigned int total_out_hi32; void *state; void *(*bzalloc)(void *,int,int); void (*bzfree)(void *,void *); void *opaque; } bz_stream; #ifndef BZ_IMPORT #define BZ_EXPORT #endif #ifndef BZ_NO_STDIO /* Need a definitition for FILE */ #include <stdio.h> #endif #ifdef _WIN32 # include <windows.h> # ifdef small /* windows.h define small to char */ # undef small # endif # ifdef BZ_EXPORT # define BZ_API(func) WINAPI func # define BZ_EXTERN extern # else /* import windows dll dynamically */ # define BZ_API(func) (WINAPI * func) # define BZ_EXTERN # endif #else # define BZ_API(func) func # define BZ_EXTERN extern #endif /*-- Core (low-level) library functions --*/ BZ_EXTERN int BZ_API(BZ2_bzCompressInit) ( bz_stream* strm, int blockSize100k, int verbosity, int workFactor ); BZ_EXTERN int BZ_API(BZ2_bzCompress) ( bz_stream* strm, int action ); BZ_EXTERN int BZ_API(BZ2_bzCompressEnd) ( bz_stream* strm ); BZ_EXTERN int BZ_API(BZ2_bzDecompressInit) ( bz_stream *strm, int verbosity, int small ); BZ_EXTERN int BZ_API(BZ2_bzDecompress) ( bz_stream* strm ); BZ_EXTERN int BZ_API(BZ2_bzDecompressEnd) ( bz_stream *strm ); /*-- High(er) level library functions --*/ #ifndef BZ_NO_STDIO #define BZ_MAX_UNUSED 5000 typedef void BZFILE; BZ_EXTERN BZFILE* BZ_API(BZ2_bzReadOpen) ( int* bzerror, FILE* f, int verbosity, int small, void* unused, int nUnused ); BZ_EXTERN void BZ_API(BZ2_bzReadClose) ( int* bzerror, BZFILE* b ); BZ_EXTERN void BZ_API(BZ2_bzReadGetUnused) ( int* bzerror, BZFILE* b, void** unused, int* nUnused ); BZ_EXTERN int BZ_API(BZ2_bzRead) ( int* bzerror, BZFILE* b, void* buf, int len ); BZ_EXTERN BZFILE* BZ_API(BZ2_bzWriteOpen) ( int* bzerror, FILE* f, int blockSize100k, int verbosity, int workFactor ); BZ_EXTERN void BZ_API(BZ2_bzWrite) ( int* bzerror, BZFILE* b, void* buf, int len ); BZ_EXTERN void BZ_API(BZ2_bzWriteClose) ( int* bzerror, BZFILE* b, int abandon, unsigned int* nbytes_in, unsigned int* nbytes_out ); BZ_EXTERN void BZ_API(BZ2_bzWriteClose64) ( int* bzerror, BZFILE* b, int abandon, unsigned int* nbytes_in_lo32, unsigned int* nbytes_in_hi32, unsigned int* nbytes_out_lo32, unsigned int* nbytes_out_hi32 ); #endif /*-- Utility functions --*/ BZ_EXTERN int BZ_API(BZ2_bzBuffToBuffCompress) ( char* dest, unsigned int* destLen, char* source, unsigned int sourceLen, int blockSize100k, int verbosity, int workFactor ); BZ_EXTERN int BZ_API(BZ2_bzBuffToBuffDecompress) ( char* dest, unsigned int* destLen, char* source, unsigned int sourceLen, int small, int verbosity ); /*-- Code contributed by Yoshioka Tsuneo (QWF00133@niftyserve.or.jp/tsuneo-y@is.aist-nara.ac.jp), to support better zlib compatibility. This code is not _officially_ part of libbzip2 (yet); I haven't tested it, documented it, or considered the threading-safeness of it. If this code breaks, please contact both Yoshioka and me. --*/ BZ_EXTERN const char * BZ_API(BZ2_bzlibVersion) ( void ); #ifndef BZ_NO_STDIO BZ_EXTERN BZFILE * BZ_API(BZ2_bzopen) ( const char *path, const char *mode ); BZ_EXTERN BZFILE * BZ_API(BZ2_bzdopen) ( int fd, const char *mode ); BZ_EXTERN int BZ_API(BZ2_bzread) ( BZFILE* b, void* buf, int len ); BZ_EXTERN int BZ_API(BZ2_bzwrite) ( BZFILE* b, void* buf, int len ); BZ_EXTERN int BZ_API(BZ2_bzflush) ( BZFILE* b ); BZ_EXTERN void BZ_API(BZ2_bzclose) ( BZFILE* b ); BZ_EXTERN const char * BZ_API(BZ2_bzerror) ( BZFILE *b, int *errnum ); #endif #ifdef __cplusplus } #endif #endif /*-------------------------------------------------------------*/ /*--- end bzlib.h ---*/ /*-------------------------------------------------------------*/ /*-- General stuff. --*/ #define BZ_VERSION "1.0.3, 17-Oct-2004" typedef char Char; typedef unsigned char Bool; typedef unsigned char UChar; typedef int Int32; typedef unsigned int UInt32; typedef short Int16; typedef unsigned short UInt16; #define True ((Bool)1) #define False ((Bool)0) #ifndef __GNUC__ #define __inline__ /* */ #endif #ifndef BZ_NO_STDIO extern void BZ2_bz__AssertH__fail ( int errcode ); #define AssertH(cond,errcode) \ { if (!(cond)) BZ2_bz__AssertH__fail ( errcode ); } #if BZ_DEBUG #define AssertD(cond,msg) \ { if (!(cond)) { \ fprintf ( stderr, \ "\n\nlibbzip2(debug build): internal error\n\t%s\n", msg );\ exit(1); \ }} #else #define AssertD(cond,msg) /* */ #endif #define VPrintf0(zf) \ fprintf(stderr,zf) #define VPrintf1(zf,za1) \ fprintf(stderr,zf,za1) #define VPrintf2(zf,za1,za2) \ fprintf(stderr,zf,za1,za2) #define VPrintf3(zf,za1,za2,za3) \ fprintf(stderr,zf,za1,za2,za3) #define VPrintf4(zf,za1,za2,za3,za4) \ fprintf(stderr,zf,za1,za2,za3,za4) #define VPrintf5(zf,za1,za2,za3,za4,za5) \ fprintf(stderr,zf,za1,za2,za3,za4,za5) #else extern void bz_internal_error ( int errcode ); #define AssertH(cond,errcode) \ { if (!(cond)) bz_internal_error ( errcode ); } #define AssertD(cond,msg) /* */ #define VPrintf0(zf) \ vex_printf(zf) #define VPrintf1(zf,za1) \ vex_printf(zf,za1) #define VPrintf2(zf,za1,za2) \ vex_printf(zf,za1,za2) #define VPrintf3(zf,za1,za2,za3) \ vex_printf(zf,za1,za2,za3) #define VPrintf4(zf,za1,za2,za3,za4) \ vex_printf(zf,za1,za2,za3,za4) #define VPrintf5(zf,za1,za2,za3,za4,za5) \ vex_printf(zf,za1,za2,za3,za4,za5) #endif #define BZALLOC(nnn) (strm->bzalloc)(strm->opaque,(nnn),1) #define BZFREE(ppp) (strm->bzfree)(strm->opaque,(ppp)) /*-- Header bytes. --*/ #define BZ_HDR_B 0x42 /* 'B' */ #define BZ_HDR_Z 0x5a /* 'Z' */ #define BZ_HDR_h 0x68 /* 'h' */ #define BZ_HDR_0 0x30 /* '0' */ /*-- Constants for the back end. --*/ #define BZ_MAX_ALPHA_SIZE 258 #define BZ_MAX_CODE_LEN 23 #define BZ_RUNA 0 #define BZ_RUNB 1 #define BZ_N_GROUPS 6 #define BZ_G_SIZE 50 #define BZ_N_ITERS 4 #define BZ_MAX_SELECTORS (2 + (900000 / BZ_G_SIZE)) /*-- Stuff for randomising repetitive blocks. --*/ extern Int32 BZ2_rNums[512]; #define BZ_RAND_DECLS \ Int32 rNToGo; \ Int32 rTPos \ #define BZ_RAND_INIT_MASK \ s->rNToGo = 0; \ s->rTPos = 0 \ #define BZ_RAND_MASK ((s->rNToGo == 1) ? 1 : 0) #define BZ_RAND_UPD_MASK \ if (s->rNToGo == 0) { \ s->rNToGo = BZ2_rNums[s->rTPos]; \ s->rTPos++; \ if (s->rTPos == 512) s->rTPos = 0; \ } \ s->rNToGo--; /*-- Stuff for doing CRCs. --*/ extern UInt32 BZ2_crc32Table[256]; #define BZ_INITIALISE_CRC(crcVar) \ { \ crcVar = 0xffffffffL; \ } #define BZ_FINALISE_CRC(crcVar) \ { \ crcVar = ~(crcVar); \ } #define BZ_UPDATE_CRC(crcVar,cha) \ { \ crcVar = (crcVar << 8) ^ \ BZ2_crc32Table[(crcVar >> 24) ^ \ ((UChar)cha)]; \ } /*-- States and modes for compression. --*/ #define BZ_M_IDLE 1 #define BZ_M_RUNNING 2 #define BZ_M_FLUSHING 3 #define BZ_M_FINISHING 4 #define BZ_S_OUTPUT 1 #define BZ_S_INPUT 2 #define BZ_N_RADIX 2 #define BZ_N_QSORT 12 #define BZ_N_SHELL 18 #define BZ_N_OVERSHOOT (BZ_N_RADIX + BZ_N_QSORT + BZ_N_SHELL + 2) /*-- Structure holding all the compression-side stuff. --*/ typedef struct { /* pointer back to the struct bz_stream */ bz_stream* strm; /* mode this stream is in, and whether inputting */ /* or outputting data */ Int32 mode; Int32 state; /* remembers avail_in when flush/finish requested */ UInt32 avail_in_expect; /* for doing the block sorting */ UInt32* arr1; UInt32* arr2; UInt32* ftab; Int32 origPtr; /* aliases for arr1 and arr2 */ UInt32* ptr; UChar* block; UInt16* mtfv; UChar* zbits; /* for deciding when to use the fallback sorting algorithm */ Int32 workFactor; /* run-length-encoding of the input */ UInt32 state_in_ch; Int32 state_in_len; BZ_RAND_DECLS; /* input and output limits and current posns */ Int32 nblock; Int32 nblockMAX; Int32 numZ; Int32 state_out_pos; /* map of bytes used in block */ Int32 nInUse; Bool inUse[256]; UChar unseqToSeq[256]; /* the buffer for bit stream creation */ UInt32 bsBuff; Int32 bsLive; /* block and combined CRCs */ UInt32 blockCRC; UInt32 combinedCRC; /* misc administratium */ Int32 verbosity; Int32 blockNo; Int32 blockSize100k; /* stuff for coding the MTF values */ Int32 nMTF; Int32 mtfFreq [BZ_MAX_ALPHA_SIZE]; UChar selector [BZ_MAX_SELECTORS]; UChar selectorMtf[BZ_MAX_SELECTORS]; UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; Int32 code [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; Int32 rfreq [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; /* second dimension: only 3 needed; 4 makes index calculations faster */ UInt32 len_pack[BZ_MAX_ALPHA_SIZE][4]; } EState; /*-- externs for compression. --*/ extern void BZ2_blockSort ( EState* ); extern void BZ2_compressBlock ( EState*, Bool ); extern void BZ2_bsInitWrite ( EState* ); extern void BZ2_hbAssignCodes ( Int32*, UChar*, Int32, Int32, Int32 ); extern void BZ2_hbMakeCodeLengths ( UChar*, Int32*, Int32, Int32 ); /*-- states for decompression. --*/ #define BZ_X_IDLE 1 #define BZ_X_OUTPUT 2 #define BZ_X_MAGIC_1 10 #define BZ_X_MAGIC_2 11 #define BZ_X_MAGIC_3 12 #define BZ_X_MAGIC_4 13 #define BZ_X_BLKHDR_1 14 #define BZ_X_BLKHDR_2 15 #define BZ_X_BLKHDR_3 16 #define BZ_X_BLKHDR_4 17 #define BZ_X_BLKHDR_5 18 #define BZ_X_BLKHDR_6 19 #define BZ_X_BCRC_1 20 #define BZ_X_BCRC_2 21 #define BZ_X_BCRC_3 22 #define BZ_X_BCRC_4 23 #define BZ_X_RANDBIT 24 #define BZ_X_ORIGPTR_1 25 #define BZ_X_ORIGPTR_2 26 #define BZ_X_ORIGPTR_3 27 #define BZ_X_MAPPING_1 28 #define BZ_X_MAPPING_2 29 #define BZ_X_SELECTOR_1 30 #define BZ_X_SELECTOR_2 31 #define BZ_X_SELECTOR_3 32 #define BZ_X_CODING_1 33 #define BZ_X_CODING_2 34 #define BZ_X_CODING_3 35 #define BZ_X_MTF_1 36 #define BZ_X_MTF_2 37 #define BZ_X_MTF_3 38 #define BZ_X_MTF_4 39 #define BZ_X_MTF_5 40 #define BZ_X_MTF_6 41 #define BZ_X_ENDHDR_2 42 #define BZ_X_ENDHDR_3 43 #define BZ_X_ENDHDR_4 44 #define BZ_X_ENDHDR_5 45 #define BZ_X_ENDHDR_6 46 #define BZ_X_CCRC_1 47 #define BZ_X_CCRC_2 48 #define BZ_X_CCRC_3 49 #define BZ_X_CCRC_4 50 /*-- Constants for the fast MTF decoder. --*/ #define MTFA_SIZE 4096 #define MTFL_SIZE 16 /*-- Structure holding all the decompression-side stuff. --*/ typedef struct { /* pointer back to the struct bz_stream */ bz_stream* strm; /* state indicator for this stream */ Int32 state; /* for doing the final run-length decoding */ UChar state_out_ch; Int32 state_out_len; Bool blockRandomised; BZ_RAND_DECLS; /* the buffer for bit stream reading */ UInt32 bsBuff; Int32 bsLive; /* misc administratium */ Int32 blockSize100k; Bool smallDecompress; Int32 currBlockNo; Int32 verbosity; /* for undoing the Burrows-Wheeler transform */ Int32 origPtr; UInt32 tPos; Int32 k0; Int32 unzftab[256]; Int32 nblock_used; Int32 cftab[257]; Int32 cftabCopy[257]; /* for undoing the Burrows-Wheeler transform (FAST) */ UInt32 *tt; /* for undoing the Burrows-Wheeler transform (SMALL) */ UInt16 *ll16; UChar *ll4; /* stored and calculated CRCs */ UInt32 storedBlockCRC; UInt32 storedCombinedCRC; UInt32 calculatedBlockCRC; UInt32 calculatedCombinedCRC; /* map of bytes used in block */ Int32 nInUse; Bool inUse[256]; Bool inUse16[16]; UChar seqToUnseq[256]; /* for decoding the MTF values */ UChar mtfa [MTFA_SIZE]; Int32 mtfbase[256 / MTFL_SIZE]; UChar selector [BZ_MAX_SELECTORS]; UChar selectorMtf[BZ_MAX_SELECTORS]; UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; Int32 limit [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; Int32 base [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; Int32 perm [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; Int32 minLens[BZ_N_GROUPS]; /* save area for scalars in the main decompress code */ Int32 save_i; Int32 save_j; Int32 save_t; Int32 save_alphaSize; Int32 save_nGroups; Int32 save_nSelectors; Int32 save_EOB; Int32 save_groupNo; Int32 save_groupPos; Int32 save_nextSym; Int32 save_nblockMAX; Int32 save_nblock; Int32 save_es; Int32 save_N; Int32 save_curr; Int32 save_zt; Int32 save_zn; Int32 save_zvec; Int32 save_zj; Int32 save_gSel; Int32 save_gMinlen; Int32* save_gLimit; Int32* save_gBase; Int32* save_gPerm; } DState; /*-- Macros for decompression. --*/ #define BZ_GET_FAST(cccc) \ s->tPos = s->tt[s->tPos]; \ cccc = (UChar)(s->tPos & 0xff); \ s->tPos >>= 8; #define BZ_GET_FAST_C(cccc) \ c_tPos = c_tt[c_tPos]; \ cccc = (UChar)(c_tPos & 0xff); \ c_tPos >>= 8; #define SET_LL4(i,n) \ { if (((i) & 0x1) == 0) \ s->ll4[(i) >> 1] = (s->ll4[(i) >> 1] & 0xf0) | (n); else \ s->ll4[(i) >> 1] = (s->ll4[(i) >> 1] & 0x0f) | ((n) << 4); \ } #define GET_LL4(i) \ ((((UInt32)(s->ll4[(i) >> 1])) >> (((i) << 2) & 0x4)) & 0xF) #define SET_LL(i,n) \ { s->ll16[i] = (UInt16)(n & 0x0000ffff); \ SET_LL4(i, n >> 16); \ } #define GET_LL(i) \ (((UInt32)s->ll16[i]) | (GET_LL4(i) << 16)) #define BZ_GET_SMALL(cccc) \ cccc = BZ2_indexIntoF ( s->tPos, s->cftab ); \ s->tPos = GET_LL(s->tPos); /*-- externs for decompression. --*/ extern Int32 BZ2_indexIntoF ( Int32, Int32* ); extern Int32 BZ2_decompress ( DState* ); extern void BZ2_hbCreateDecodeTables ( Int32*, Int32*, Int32*, UChar*, Int32, Int32, Int32 ); #endif /*-- BZ_NO_STDIO seems to make NULL disappear on some platforms. --*/ #ifdef BZ_NO_STDIO #ifndef NULL #define NULL 0 #endif #endif /*-------------------------------------------------------------*/ /*--- end bzlib_private.h ---*/ /*-------------------------------------------------------------*/ /* Something which has the same size as void* on the host. That is, it is 32 bits on a 32-bit host and 64 bits on a 64-bit host, and so it can safely be coerced to and from a pointer type on the host machine. */ typedef unsigned long HWord; typedef char HChar; typedef signed int Int; typedef unsigned int UInt; typedef signed long long int Long; typedef unsigned long long int ULong; ///////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////// static HWord (*serviceFn)(HWord,HWord) = 0; #if 0 static char* my_strcpy ( char* dest, const char* src ) { char* dest_orig = dest; while (*src) *dest++ = *src++; *dest = 0; return dest_orig; } static void* my_memcpy ( void *dest, const void *src, int sz ) { const char *s = (const char *)src; char *d = (char *)dest; while (sz--) *d++ = *s++; return dest; } static void* my_memmove( void *dst, const void *src, unsigned int len ) { register char *d; register char *s; if ( dst > src ) { d = (char *)dst + len - 1; s = (char *)src + len - 1; while ( len >= 4 ) { *d-- = *s--; *d-- = *s--; *d-- = *s--; *d-- = *s--; len -= 4; } while ( len-- ) { *d-- = *s--; } } else if ( dst < src ) { d = (char *)dst; s = (char *)src; while ( len >= 4 ) { *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; len -= 4; } while ( len-- ) { *d++ = *s++; } } return dst; } #endif char* my_strcat ( char* dest, const char* src ) { char* dest_orig = dest; while (*dest) dest++; while (*src) *dest++ = *src++; *dest = 0; return dest_orig; } ///////////////////////////////////////////////////////////////////// static void vex_log_bytes ( char* p, int n ) { int i; for (i = 0; i < n; i++) (*serviceFn)( 1, (int)p[i] ); } /*---------------------------------------------------------*/ /*--- vex_printf ---*/ /*---------------------------------------------------------*/ /* This should be the only <...> include in the entire VEX library. New code for vex_util.c should go above this point. */ #include <stdarg.h> static HChar vex_toupper ( HChar c ) { if (c >= 'a' && c <= 'z') return c + ('A' - 'a'); else return c; } static Int vex_strlen ( const HChar* str ) { Int i = 0; while (str[i] != 0) i++; return i; } Bool vex_streq ( const HChar* s1, const HChar* s2 ) { while (True) { if (*s1 == 0 && *s2 == 0) return True; if (*s1 != *s2) return False; s1++; s2++; } } /* Some flags. */ #define VG_MSG_SIGNED 1 /* The value is signed. */ #define VG_MSG_ZJUSTIFY 2 /* Must justify with '0'. */ #define VG_MSG_LJUSTIFY 4 /* Must justify on the left. */ #define VG_MSG_PAREN 8 /* Parenthesize if present (for %y) */ #define VG_MSG_COMMA 16 /* Add commas to numbers (for %d, %u) */ /* Copy a string into the buffer. */ static UInt myvprintf_str ( void(*send)(HChar), Int flags, Int width, HChar* str, Bool capitalise ) { # define MAYBE_TOUPPER(ch) (capitalise ? vex_toupper(ch) : (ch)) UInt ret = 0; Int i, extra; Int len = vex_strlen(str); if (width == 0) { ret += len; for (i = 0; i < len; i++) send(MAYBE_TOUPPER(str[i])); return ret; } if (len > width) { ret += width; for (i = 0; i < width; i++) send(MAYBE_TOUPPER(str[i])); return ret; } extra = width - len; if (flags & VG_MSG_LJUSTIFY) { ret += extra; for (i = 0; i < extra; i++) send(' '); } ret += len; for (i = 0; i < len; i++) send(MAYBE_TOUPPER(str[i])); if (!(flags & VG_MSG_LJUSTIFY)) { ret += extra; for (i = 0; i < extra; i++) send(' '); } # undef MAYBE_TOUPPER return ret; } /* Write P into the buffer according to these args: * If SIGN is true, p is a signed. * BASE is the base. * If WITH_ZERO is true, '0' must be added. * WIDTH is the width of the field. */ static UInt myvprintf_int64 ( void(*send)(HChar), Int flags, Int base, Int width, ULong pL) { HChar buf[40]; Int ind = 0; Int i, nc = 0; Bool neg = False; HChar *digits = "0123456789ABCDEF"; UInt ret = 0; UInt p = (UInt)pL; if (base < 2 || base > 16) return ret; if ((flags & VG_MSG_SIGNED) && (Int)p < 0) { p = - (Int)p; neg = True; } if (p == 0) buf[ind++] = '0'; else { while (p > 0) { if ((flags & VG_MSG_COMMA) && 10 == base && 0 == (ind-nc) % 3 && 0 != ind) { buf[ind++] = ','; nc++; } buf[ind++] = digits[p % base]; p /= base; } } if (neg) buf[ind++] = '-'; if (width > 0 && !(flags & VG_MSG_LJUSTIFY)) { for(; ind < width; ind++) { //vassert(ind < 39); buf[ind] = ((flags & VG_MSG_ZJUSTIFY) ? '0': ' '); } } /* Reverse copy to buffer. */ ret += ind; for (i = ind -1; i >= 0; i--) { send(buf[i]); } if (width > 0 && (flags & VG_MSG_LJUSTIFY)) { for(; ind < width; ind++) { ret++; send(' '); // Never pad with zeroes on RHS -- changes the value! } } return ret; } /* A simple vprintf(). */ static UInt vprintf_wrk ( void(*send)(HChar), const HChar *format, va_list vargs ) { UInt ret = 0; int i; int flags; int width; Bool is_long; /* We assume that vargs has already been initialised by the caller, using va_start, and that the caller will similarly clean up with va_end. */ for (i = 0; format[i] != 0; i++) { if (format[i] != '%') { send(format[i]); ret++; continue; } i++; /* A '%' has been found. Ignore a trailing %. */ if (format[i] == 0) break; if (format[i] == '%') { /* `%%' is replaced by `%'. */ send('%'); ret++; continue; } flags = 0; is_long = False; width = 0; /* length of the field. */ if (format[i] == '(') { flags |= VG_MSG_PAREN; i++; } /* If ',' follows '%', commas will be inserted. */ if (format[i] == ',') { flags |= VG_MSG_COMMA; i++; } /* If '-' follows '%', justify on the left. */ if (format[i] == '-') { flags |= VG_MSG_LJUSTIFY; i++; } /* If '0' follows '%', pads will be inserted. */ if (format[i] == '0') { flags |= VG_MSG_ZJUSTIFY; i++; } /* Compute the field length. */ while (format[i] >= '0' && format[i] <= '9') { width *= 10; width += format[i++] - '0'; } while (format[i] == 'l') { i++; is_long = True; } switch (format[i]) { case 'd': /* %d */ flags |= VG_MSG_SIGNED; if (is_long) ret += myvprintf_int64(send, flags, 10, width, (ULong)(va_arg (vargs, Long))); else ret += myvprintf_int64(send, flags, 10, width, (ULong)(va_arg (vargs, Int))); break; case 'u': /* %u */ if (is_long) ret += myvprintf_int64(send, flags, 10, width, (ULong)(va_arg (vargs, ULong))); else ret += myvprintf_int64(send, flags, 10, width, (ULong)(va_arg (vargs, UInt))); break; case 'p': /* %p */ ret += 2; send('0'); send('x'); ret += myvprintf_int64(send, flags, 16, width, (ULong)((HWord)va_arg (vargs, void *))); break; case 'x': /* %x */ if (is_long) ret += myvprintf_int64(send, flags, 16, width, (ULong)(va_arg (vargs, ULong))); else ret += myvprintf_int64(send, flags, 16, width, (ULong)(va_arg (vargs, UInt))); break; case 'c': /* %c */ ret++; send((va_arg (vargs, int))); break; case 's': case 'S': { /* %s */ char *str = va_arg (vargs, char *); if (str == (char*) 0) str = "(null)"; ret += myvprintf_str(send, flags, width, str, (format[i]=='S')); break; } # if 0 case 'y': { /* %y - print symbol */ Char buf[100]; Char *cp = buf; Addr a = va_arg(vargs, Addr); if (flags & VG_MSG_PAREN) *cp++ = '('; if (VG_(get_fnname_w_offset)(a, cp, sizeof(buf)-4)) { if (flags & VG_MSG_PAREN) { cp += VG_(strlen)(cp); *cp++ = ')'; *cp = '\0'; } ret += myvprintf_str(send, flags, width, buf, 0); } break; } # endif default: break; } } return ret; } /* A general replacement for printf(). Note that only low-level debugging info should be sent via here. The official route is to to use vg_message(). This interface is deprecated. */ static HChar myprintf_buf[1000]; static Int n_myprintf_buf; static void add_to_myprintf_buf ( HChar c ) { if (c == '\n' || n_myprintf_buf >= 1000-10 /*paranoia*/ ) { (*vex_log_bytes)( myprintf_buf, vex_strlen(myprintf_buf) ); n_myprintf_buf = 0; myprintf_buf[n_myprintf_buf] = 0; } myprintf_buf[n_myprintf_buf++] = c; myprintf_buf[n_myprintf_buf] = 0; } static UInt vex_printf ( const char *format, ... ) { UInt ret; va_list vargs; va_start(vargs,format); n_myprintf_buf = 0; myprintf_buf[n_myprintf_buf] = 0; ret = vprintf_wrk ( add_to_myprintf_buf, format, vargs ); if (n_myprintf_buf > 0) { (*vex_log_bytes)( myprintf_buf, n_myprintf_buf ); } va_end(vargs); return ret; } /*---------------------------------------------------------------*/ /*--- end vex_util.c ---*/ /*---------------------------------------------------------------*/ ///////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////// /*-------------------------------------------------------------*/ /*--- Decompression machinery ---*/ /*--- decompress.c ---*/ /*-------------------------------------------------------------*/ /*-- This file is a part of bzip2 and/or libbzip2, a program and library for lossless, block-sorting data compression. Copyright (C) 1996-2004 Julian R Seward. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Julian Seward, Cambridge, UK. jseward@bzip.org bzip2/libbzip2 version 1.0 of 21 March 2000 This program is based on (at least) the work of: Mike Burrows David Wheeler Peter Fenwick Alistair Moffat Radford Neal Ian H. Witten Robert Sedgewick Jon L. Bentley For more information on these sources, see the manual. --*/ /*---------------------------------------------------*/ static void makeMaps_d ( DState* s ) { Int32 i; s->nInUse = 0; for (i = 0; i < 256; i++) if (s->inUse[i]) { s->seqToUnseq[s->nInUse] = i; s->nInUse++; } } /*---------------------------------------------------*/ #define RETURN(rrr) \ { retVal = rrr; goto save_state_and_return; }; #define GET_BITS(lll,vvv,nnn) \ case lll: s->state = lll; \ while (True) { \ if (s->bsLive >= nnn) { \ UInt32 v; \ v = (s->bsBuff >> \ (s->bsLive-nnn)) & ((1 << nnn)-1); \ s->bsLive -= nnn; \ vvv = v; \ break; \ } \ if (s->strm->avail_in == 0) RETURN(BZ_OK); \ s->bsBuff \ = (s->bsBuff << 8) | \ ((UInt32) \ (*((UChar*)(s->strm->next_in)))); \ s->bsLive += 8; \ s->strm->next_in++; \ s->strm->avail_in--; \ s->strm->total_in_lo32++; \ if (s->strm->total_in_lo32 == 0) \ s->strm->total_in_hi32++; \ } #define GET_UCHAR(lll,uuu) \ GET_BITS(lll,uuu,8) #define GET_BIT(lll,uuu) \ GET_BITS(lll,uuu,1) /*---------------------------------------------------*/ #define GET_MTF_VAL(label1,label2,lval) \ { \ if (groupPos == 0) { \ groupNo++; \ if (groupNo >= nSelectors) \ RETURN(BZ_DATA_ERROR); \ groupPos = BZ_G_SIZE; \ gSel = s->selector[groupNo]; \ gMinlen = s->minLens[gSel]; \ gLimit = &(s->limit[gSel][0]); \ gPerm = &(s->perm[gSel][0]); \ gBase = &(s->base[gSel][0]); \ } \ groupPos--; \ zn = gMinlen; \ GET_BITS(label1, zvec, zn); \ while (1) { \ if (zn > 20 /* the longest code */) \ RETURN(BZ_DATA_ERROR); \ if (zvec <= gLimit[zn]) break; \ zn++; \ GET_BIT(label2, zj); \ zvec = (zvec << 1) | zj; \ }; \ if (zvec - gBase[zn] < 0 \ || zvec - gBase[zn] >= BZ_MAX_ALPHA_SIZE) \ RETURN(BZ_DATA_ERROR); \ lval = gPerm[zvec - gBase[zn]]; \ } /*---------------------------------------------------*/ Int32 BZ2_indexIntoF ( Int32 indx, Int32 *cftab ) { Int32 nb, na, mid; nb = 0; na = 256; do { mid = (nb + na) >> 1; if (indx >= cftab[mid]) nb = mid; else na = mid; } while (na - nb != 1); return nb; } /*---------------------------------------------------*/ Int32 BZ2_decompress ( DState* s ) { UChar uc; Int32 retVal; Int32 minLen, maxLen; bz_stream* strm = s->strm; /* stuff that needs to be saved/restored */ Int32 i; Int32 j; Int32 t; Int32 alphaSize; Int32 nGroups; Int32 nSelectors; Int32 EOB; Int32 groupNo; Int32 groupPos; Int32 nextSym; Int32 nblockMAX; Int32 nblock; Int32 es; Int32 N; Int32 curr; Int32 zt; Int32 zn; Int32 zvec; Int32 zj; Int32 gSel; Int32 gMinlen; Int32* gLimit; Int32* gBase; Int32* gPerm; if (s->state == BZ_X_MAGIC_1) { /*initialise the save area*/ s->save_i = 0; s->save_j = 0; s->save_t = 0; s->save_alphaSize = 0; s->save_nGroups = 0; s->save_nSelectors = 0; s->save_EOB = 0; s->save_groupNo = 0; s->save_groupPos = 0; s->save_nextSym = 0; s->save_nblockMAX = 0; s->save_nblock = 0; s->save_es = 0; s->save_N = 0; s->save_curr = 0; s->save_zt = 0; s->save_zn = 0; s->save_zvec = 0; s->save_zj = 0; s->save_gSel = 0; s->save_gMinlen = 0; s->save_gLimit = NULL; s->save_gBase = NULL; s->save_gPerm = NULL; } /*restore from the save area*/ i = s->save_i; j = s->save_j; t = s->save_t; alphaSize = s->save_alphaSize; nGroups = s->save_nGroups; nSelectors = s->save_nSelectors; EOB = s->save_EOB; groupNo = s->save_groupNo; groupPos = s->save_groupPos; nextSym = s->save_nextSym; nblockMAX = s->save_nblockMAX; nblock = s->save_nblock; es = s->save_es; N = s->save_N; curr = s->save_curr; zt = s->save_zt; zn = s->save_zn; zvec = s->save_zvec; zj = s->save_zj; gSel = s->save_gSel; gMinlen = s->save_gMinlen; gLimit = s->save_gLimit; gBase = s->save_gBase; gPerm = s->save_gPerm; retVal = BZ_OK; switch (s->state) { GET_UCHAR(BZ_X_MAGIC_1, uc); if (uc != BZ_HDR_B) RETURN(BZ_DATA_ERROR_MAGIC); GET_UCHAR(BZ_X_MAGIC_2, uc); if (uc != BZ_HDR_Z) RETURN(BZ_DATA_ERROR_MAGIC); GET_UCHAR(BZ_X_MAGIC_3, uc) if (uc != BZ_HDR_h) RETURN(BZ_DATA_ERROR_MAGIC); GET_BITS(BZ_X_MAGIC_4, s->blockSize100k, 8) if (s->blockSize100k < (BZ_HDR_0 + 1) || s->blockSize100k > (BZ_HDR_0 + 9)) RETURN(BZ_DATA_ERROR_MAGIC); s->blockSize100k -= BZ_HDR_0; if (s->smallDecompress) { s->ll16 = BZALLOC( s->blockSize100k * 100000 * sizeof(UInt16) ); s->ll4 = BZALLOC( ((1 + s->blockSize100k * 100000) >> 1) * sizeof(UChar) ); if (s->ll16 == NULL || s->ll4 == NULL) RETURN(BZ_MEM_ERROR); } else { s->tt = BZALLOC( s->blockSize100k * 100000 * sizeof(Int32) ); if (s->tt == NULL) RETURN(BZ_MEM_ERROR); } GET_UCHAR(BZ_X_BLKHDR_1, uc); if (uc == 0x17) goto endhdr_2; if (uc != 0x31) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_2, uc); if (uc != 0x41) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_3, uc); if (uc != 0x59) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_4, uc); if (uc != 0x26) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_5, uc); if (uc != 0x53) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_BLKHDR_6, uc); if (uc != 0x59) RETURN(BZ_DATA_ERROR); s->currBlockNo++; if (s->verbosity >= 2) VPrintf1 ( "\n [%d: huff+mtf ", s->currBlockNo ); s->storedBlockCRC = 0; GET_UCHAR(BZ_X_BCRC_1, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_BCRC_2, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_BCRC_3, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_BCRC_4, uc); s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc); GET_BITS(BZ_X_RANDBIT, s->blockRandomised, 1); s->origPtr = 0; GET_UCHAR(BZ_X_ORIGPTR_1, uc); s->origPtr = (s->origPtr << 8) | ((Int32)uc); GET_UCHAR(BZ_X_ORIGPTR_2, uc); s->origPtr = (s->origPtr << 8) | ((Int32)uc); GET_UCHAR(BZ_X_ORIGPTR_3, uc); s->origPtr = (s->origPtr << 8) | ((Int32)uc); if (s->origPtr < 0) RETURN(BZ_DATA_ERROR); if (s->origPtr > 10 + 100000*s->blockSize100k) RETURN(BZ_DATA_ERROR); /*--- Receive the mapping table ---*/ for (i = 0; i < 16; i++) { GET_BIT(BZ_X_MAPPING_1, uc); if (uc == 1) s->inUse16[i] = True; else s->inUse16[i] = False; } for (i = 0; i < 256; i++) s->inUse[i] = False; for (i = 0; i < 16; i++) if (s->inUse16[i]) for (j = 0; j < 16; j++) { GET_BIT(BZ_X_MAPPING_2, uc); if (uc == 1) s->inUse[i * 16 + j] = True; } makeMaps_d ( s ); if (s->nInUse == 0) RETURN(BZ_DATA_ERROR); alphaSize = s->nInUse+2; /*--- Now the selectors ---*/ GET_BITS(BZ_X_SELECTOR_1, nGroups, 3); if (nGroups < 2 || nGroups > 6) RETURN(BZ_DATA_ERROR); GET_BITS(BZ_X_SELECTOR_2, nSelectors, 15); if (nSelectors < 1) RETURN(BZ_DATA_ERROR); for (i = 0; i < nSelectors; i++) { j = 0; while (True) { GET_BIT(BZ_X_SELECTOR_3, uc); if (uc == 0) break; croak( 2 + (char*)&i ); j++; if (j >= nGroups) RETURN(BZ_DATA_ERROR); } s->selectorMtf[i] = j; } /*--- Undo the MTF values for the selectors. ---*/ { UChar pos[BZ_N_GROUPS], tmp, v; for (v = 0; v < nGroups; v++) pos[v] = v; for (i = 0; i < nSelectors; i++) { v = s->selectorMtf[i]; tmp = pos[v]; while (v > 0) { pos[v] = pos[v-1]; v--; } pos[0] = tmp; s->selector[i] = tmp; } } /*--- Now the coding tables ---*/ for (t = 0; t < nGroups; t++) { GET_BITS(BZ_X_CODING_1, curr, 5); for (i = 0; i < alphaSize; i++) { while (True) { if (curr < 1 || curr > 20) RETURN(BZ_DATA_ERROR); GET_BIT(BZ_X_CODING_2, uc); if (uc == 0) break; GET_BIT(BZ_X_CODING_3, uc); if (uc == 0) curr++; else curr--; } s->len[t][i] = curr; } } /*--- Create the Huffman decoding tables ---*/ for (t = 0; t < nGroups; t++) { minLen = 32; maxLen = 0; for (i = 0; i < alphaSize; i++) { if (s->len[t][i] > maxLen) maxLen = s->len[t][i]; if (s->len[t][i] < minLen) minLen = s->len[t][i]; } BZ2_hbCreateDecodeTables ( &(s->limit[t][0]), &(s->base[t][0]), &(s->perm[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize ); s->minLens[t] = minLen; } /*--- Now the MTF values ---*/ EOB = s->nInUse+1; nblockMAX = 100000 * s->blockSize100k; groupNo = -1; groupPos = 0; for (i = 0; i <= 255; i++) s->unzftab[i] = 0; /*-- MTF init --*/ { Int32 ii, jj, kk; kk = MTFA_SIZE-1; for (ii = 256 / MTFL_SIZE - 1; ii >= 0; ii--) { for (jj = MTFL_SIZE-1; jj >= 0; jj--) { s->mtfa[kk] = (UChar)(ii * MTFL_SIZE + jj); kk--; } s->mtfbase[ii] = kk + 1; } } /*-- end MTF init --*/ nblock = 0; GET_MTF_VAL(BZ_X_MTF_1, BZ_X_MTF_2, nextSym); while (True) { if (nextSym == EOB) break; if (nextSym == BZ_RUNA || nextSym == BZ_RUNB) { es = -1; N = 1; do { if (nextSym == BZ_RUNA) es = es + (0+1) * N; else if (nextSym == BZ_RUNB) es = es + (1+1) * N; N = N * 2; GET_MTF_VAL(BZ_X_MTF_3, BZ_X_MTF_4, nextSym); } while (nextSym == BZ_RUNA || nextSym == BZ_RUNB); es++; uc = s->seqToUnseq[ s->mtfa[s->mtfbase[0]] ]; s->unzftab[uc] += es; if (s->smallDecompress) while (es > 0) { if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); s->ll16[nblock] = (UInt16)uc; nblock++; es--; } else while (es > 0) { if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); s->tt[nblock] = (UInt32)uc; nblock++; es--; }; continue; } else { if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR); /*-- uc = MTF ( nextSym-1 ) --*/ { Int32 ii, jj, kk, pp, lno, off; UInt32 nn; nn = (UInt32)(nextSym - 1); if (nn < MTFL_SIZE) { /* avoid general-case expense */ pp = s->mtfbase[0]; uc = s->mtfa[pp+nn]; while (nn > 3) { Int32 z = pp+nn; s->mtfa[(z) ] = s->mtfa[(z)-1]; s->mtfa[(z)-1] = s->mtfa[(z)-2]; s->mtfa[(z)-2] = s->mtfa[(z)-3]; s->mtfa[(z)-3] = s->mtfa[(z)-4]; nn -= 4; } while (nn > 0) { s->mtfa[(pp+nn)] = s->mtfa[(pp+nn)-1]; nn--; }; s->mtfa[pp] = uc; } else { /* general case */ lno = nn / MTFL_SIZE; off = nn % MTFL_SIZE; pp = s->mtfbase[lno] + off; uc = s->mtfa[pp]; while (pp > s->mtfbase[lno]) { s->mtfa[pp] = s->mtfa[pp-1]; pp--; }; s->mtfbase[lno]++; while (lno > 0) { s->mtfbase[lno]--; s->mtfa[s->mtfbase[lno]] = s->mtfa[s->mtfbase[lno-1] + MTFL_SIZE - 1]; lno--; } s->mtfbase[0]--; s->mtfa[s->mtfbase[0]] = uc; if (s->mtfbase[0] == 0) { kk = MTFA_SIZE-1; for (ii = 256 / MTFL_SIZE-1; ii >= 0; ii--) { for (jj = MTFL_SIZE-1; jj >= 0; jj--) { s->mtfa[kk] = s->mtfa[s->mtfbase[ii] + jj]; kk--; } s->mtfbase[ii] = kk + 1; } } } } /*-- end uc = MTF ( nextSym-1 ) --*/ s->unzftab[s->seqToUnseq[uc]]++; if (s->smallDecompress) s->ll16[nblock] = (UInt16)(s->seqToUnseq[uc]); else s->tt[nblock] = (UInt32)(s->seqToUnseq[uc]); nblock++; GET_MTF_VAL(BZ_X_MTF_5, BZ_X_MTF_6, nextSym); continue; } } /* Now we know what nblock is, we can do a better sanity check on s->origPtr. */ if (s->origPtr < 0 || s->origPtr >= nblock) RETURN(BZ_DATA_ERROR); /*-- Set up cftab to facilitate generation of T^(-1) --*/ s->cftab[0] = 0; for (i = 1; i <= 256; i++) s->cftab[i] = s->unzftab[i-1]; for (i = 1; i <= 256; i++) s->cftab[i] += s->cftab[i-1]; for (i = 0; i <= 256; i++) { if (s->cftab[i] < 0 || s->cftab[i] > nblock) { /* s->cftab[i] can legitimately be == nblock */ RETURN(BZ_DATA_ERROR); } } s->state_out_len = 0; s->state_out_ch = 0; BZ_INITIALISE_CRC ( s->calculatedBlockCRC ); s->state = BZ_X_OUTPUT; if (s->verbosity >= 2) VPrintf0 ( "rt+rld" ); if (s->smallDecompress) { /*-- Make a copy of cftab, used in generation of T --*/ for (i = 0; i <= 256; i++) s->cftabCopy[i] = s->cftab[i]; /*-- compute the T vector --*/ for (i = 0; i < nblock; i++) { uc = (UChar)(s->ll16[i]); SET_LL(i, s->cftabCopy[uc]); s->cftabCopy[uc]++; } /*-- Compute T^(-1) by pointer reversal on T --*/ i = s->origPtr; j = GET_LL(i); do { Int32 tmp = GET_LL(j); SET_LL(j, i); i = j; j = tmp; } while (i != s->origPtr); s->tPos = s->origPtr; s->nblock_used = 0; if (s->blockRandomised) { BZ_RAND_INIT_MASK; BZ_GET_SMALL(s->k0); s->nblock_used++; BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; } else { BZ_GET_SMALL(s->k0); s->nblock_used++; } } else { /*-- compute the T^(-1) vector --*/ for (i = 0; i < nblock; i++) { uc = (UChar)(s->tt[i] & 0xff); s->tt[s->cftab[uc]] |= (i << 8); s->cftab[uc]++; } s->tPos = s->tt[s->origPtr] >> 8; s->nblock_used = 0; if (s->blockRandomised) { BZ_RAND_INIT_MASK; BZ_GET_FAST(s->k0); s->nblock_used++; BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; } else { BZ_GET_FAST(s->k0); s->nblock_used++; } } RETURN(BZ_OK); endhdr_2: GET_UCHAR(BZ_X_ENDHDR_2, uc); if (uc != 0x72) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_3, uc); if (uc != 0x45) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_4, uc); if (uc != 0x38) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_5, uc); if (uc != 0x50) RETURN(BZ_DATA_ERROR); GET_UCHAR(BZ_X_ENDHDR_6, uc); if (uc != 0x90) RETURN(BZ_DATA_ERROR); s->storedCombinedCRC = 0; GET_UCHAR(BZ_X_CCRC_1, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_CCRC_2, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_CCRC_3, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); GET_UCHAR(BZ_X_CCRC_4, uc); s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc); s->state = BZ_X_IDLE; RETURN(BZ_STREAM_END); default: AssertH ( False, 4001 ); } AssertH ( False, 4002 ); save_state_and_return: s->save_i = i; s->save_j = j; s->save_t = t; s->save_alphaSize = alphaSize; s->save_nGroups = nGroups; s->save_nSelectors = nSelectors; s->save_EOB = EOB; s->save_groupNo = groupNo; s->save_groupPos = groupPos; s->save_nextSym = nextSym; s->save_nblockMAX = nblockMAX; s->save_nblock = nblock; s->save_es = es; s->save_N = N; s->save_curr = curr; s->save_zt = zt; s->save_zn = zn; s->save_zvec = zvec; s->save_zj = zj; s->save_gSel = gSel; s->save_gMinlen = gMinlen; s->save_gLimit = gLimit; s->save_gBase = gBase; s->save_gPerm = gPerm; return retVal; } /*-------------------------------------------------------------*/ /*--- end decompress.c ---*/ /*-------------------------------------------------------------*/ /*-------------------------------------------------------------*/ /*--- Block sorting machinery ---*/ /*--- blocksort.c ---*/ /*-------------------------------------------------------------*/ /*-- This file is a part of bzip2 and/or libbzip2, a program and library for lossless, block-sorting data compression. Copyright (C) 1996-2004 Julian R Seward. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Julian Seward, Cambridge, UK. jseward@bzip.org bzip2/libbzip2 version 1.0 of 21 March 2000 This program is based on (at least) the work of: Mike Burrows David Wheeler Peter Fenwick Alistair Moffat Radford Neal Ian H. Witten Robert Sedgewick Jon L. Bentley For more information on these sources, see the manual. To get some idea how the block sorting algorithms in this file work, read my paper On the Performance of BWT Sorting Algorithms in Proceedings of the IEEE Data Compression Conference 2000, Snowbird, Utah, USA, 27-30 March 2000. The main sort in this file implements the algorithm called cache in the paper. --*/ /*---------------------------------------------*/ /*--- Fallback O(N log(N)^2) sorting ---*/ /*--- algorithm, for repetitive blocks ---*/ /*---------------------------------------------*/ /*---------------------------------------------*/ static void fallbackSimpleSort ( UInt32* fmap, UInt32* eclass, Int32 lo, Int32 hi ) { Int32 i, j, tmp; UInt32 ec_tmp; if (lo == hi) return; if (hi - lo > 3) { for ( i = hi-4; i >= lo; i-- ) { tmp = fmap[i]; ec_tmp = eclass[tmp]; for ( j = i+4; j <= hi && ec_tmp > eclass[fmap[j]]; j += 4 ) fmap[j-4] = fmap[j]; fmap[j-4] = tmp; } } for ( i = hi-1; i >= lo; i-- ) { tmp = fmap[i]; ec_tmp = eclass[tmp]; for ( j = i+1; j <= hi && ec_tmp > eclass[fmap[j]]; j++ ) fmap[j-1] = fmap[j]; fmap[j-1] = tmp; } } /*---------------------------------------------*/ #define fswap(zz1, zz2) \ { Int32 zztmp = zz1; zz1 = zz2; zz2 = zztmp; } #define fvswap(zzp1, zzp2, zzn) \ { \ Int32 yyp1 = (zzp1); \ Int32 yyp2 = (zzp2); \ Int32 yyn = (zzn); \ while (yyn > 0) { \ fswap(fmap[yyp1], fmap[yyp2]); \ yyp1++; yyp2++; yyn--; \ } \ } #define fmin(a,b) ((a) < (b)) ? (a) : (b) #define fpush(lz,hz) { stackLo[sp] = lz; \ stackHi[sp] = hz; \ sp++; } #define fpop(lz,hz) { sp--; \ lz = stackLo[sp]; \ hz = stackHi[sp]; } #define FALLBACK_QSORT_SMALL_THRESH 10 #define FALLBACK_QSORT_STACK_SIZE 100 static void fallbackQSort3 ( UInt32* fmap, UInt32* eclass, Int32 loSt, Int32 hiSt ) { Int32 unLo, unHi, ltLo, gtHi, n, m; Int32 sp, lo, hi; UInt32 med, r, r3; Int32 stackLo[FALLBACK_QSORT_STACK_SIZE]; Int32 stackHi[FALLBACK_QSORT_STACK_SIZE]; r = 0; sp = 0; fpush ( loSt, hiSt ); while (sp > 0) { AssertH ( sp < FALLBACK_QSORT_STACK_SIZE, 1004 ); fpop ( lo, hi ); if (hi - lo < FALLBACK_QSORT_SMALL_THRESH) { fallbackSimpleSort ( fmap, eclass, lo, hi ); continue; } /* Random partitioning. Median of 3 sometimes fails to avoid bad cases. Median of 9 seems to help but looks rather expensive. This too seems to work but is cheaper. Guidance for the magic constants 7621 and 32768 is taken from Sedgewick's algorithms book, chapter 35. */ r = ((r * 7621) + 1) % 32768; r3 = r % 3; if (r3 == 0) med = eclass[fmap[lo]]; else if (r3 == 1) med = eclass[fmap[(lo+hi)>>1]]; else med = eclass[fmap[hi]]; unLo = ltLo = lo; unHi = gtHi = hi; while (1) { while (1) { if (unLo > unHi) break; n = (Int32)eclass[fmap[unLo]] - (Int32)med; if (n == 0) { fswap(fmap[unLo], fmap[ltLo]); ltLo++; unLo++; continue; }; if (n > 0) break; unLo++; } while (1) { if (unLo > unHi) break; n = (Int32)eclass[fmap[unHi]] - (Int32)med; if (n == 0) { fswap(fmap[unHi], fmap[gtHi]); gtHi--; unHi--; continue; }; if (n < 0) break; unHi--; } if (unLo > unHi) break; fswap(fmap[unLo], fmap[unHi]); unLo++; unHi--; } AssertD ( unHi == unLo-1, "fallbackQSort3(2)" ); if (gtHi < ltLo) continue; n = fmin(ltLo-lo, unLo-ltLo); fvswap(lo, unLo-n, n); m = fmin(hi-gtHi, gtHi-unHi); fvswap(unLo, hi-m+1, m); n = lo + unLo - ltLo - 1; m = hi - (gtHi - unHi) + 1; if (n - lo > hi - m) { fpush ( lo, n ); fpush ( m, hi ); } else { fpush ( m, hi ); fpush ( lo, n ); } } } #undef fmin #undef fpush #undef fpop #undef fswap #undef fvswap #undef FALLBACK_QSORT_SMALL_THRESH #undef FALLBACK_QSORT_STACK_SIZE /*---------------------------------------------*/ /* Pre: nblock > 0 eclass exists for [0 .. nblock-1] ((UChar*)eclass) [0 .. nblock-1] holds block ptr exists for [0 .. nblock-1] Post: ((UChar*)eclass) [0 .. nblock-1] holds block All other areas of eclass destroyed fmap [0 .. nblock-1] holds sorted order bhtab [ 0 .. 2+(nblock/32) ] destroyed */ #define SET_BH(zz) bhtab[(zz) >> 5] |= (1 << ((zz) & 31)) #define CLEAR_BH(zz) bhtab[(zz) >> 5] &= ~(1 << ((zz) & 31)) #define ISSET_BH(zz) (bhtab[(zz) >> 5] & (1 << ((zz) & 31))) #define WORD_BH(zz) bhtab[(zz) >> 5] #define UNALIGNED_BH(zz) ((zz) & 0x01f) static void fallbackSort ( UInt32* fmap, UInt32* eclass, UInt32* bhtab, Int32 nblock, Int32 verb ) { Int32 ftab[257]; Int32 ftabCopy[256]; Int32 H, i, j, k, l, r, cc, cc1; Int32 nNotDone; Int32 nBhtab; UChar* eclass8 = (UChar*)eclass; /*-- Initial 1-char radix sort to generate initial fmap and initial BH bits. --*/ if (verb >= 4) VPrintf0 ( " bucket sorting ...\n" ); for (i = 0; i < 257; i++) ftab[i] = 0; for (i = 0; i < nblock; i++) ftab[eclass8[i]]++; for (i = 0; i < 256; i++) ftabCopy[i] = ftab[i]; for (i = 1; i < 257; i++) ftab[i] += ftab[i-1]; for (i = 0; i < nblock; i++) { j = eclass8[i]; k = ftab[j] - 1; ftab[j] = k; fmap[k] = i; } nBhtab = 2 + (nblock / 32); for (i = 0; i < nBhtab; i++) bhtab[i] = 0; for (i = 0; i < 256; i++) SET_BH(ftab[i]); /*-- Inductively refine the buckets. Kind-of an "exponential radix sort" (!), inspired by the Manber-Myers suffix array construction algorithm. --*/ /*-- set sentinel bits for block-end detection --*/ for (i = 0; i < 32; i++) { SET_BH(nblock + 2*i); CLEAR_BH(nblock + 2*i + 1); } /*-- the log(N) loop --*/ H = 1; while (1) { if (verb >= 4) VPrintf1 ( " depth %6d has ", H ); j = 0; for (i = 0; i < nblock; i++) { if (ISSET_BH(i)) j = i; k = fmap[i] - H; if (k < 0) k += nblock; eclass[k] = j; } nNotDone = 0; r = -1; while (1) { /*-- find the next non-singleton bucket --*/ k = r + 1; while (ISSET_BH(k) && UNALIGNED_BH(k)) k++; if (ISSET_BH(k)) { while (WORD_BH(k) == 0xffffffff) k += 32; while (ISSET_BH(k)) k++; } l = k - 1; if (l >= nblock) break; while (!ISSET_BH(k) && UNALIGNED_BH(k)) k++; if (!ISSET_BH(k)) { while (WORD_BH(k) == 0x00000000) k += 32; while (!ISSET_BH(k)) k++; } r = k - 1; if (r >= nblock) break; /*-- now [l, r] bracket current bucket --*/ if (r > l) { nNotDone += (r - l + 1); fallbackQSort3 ( fmap, eclass, l, r ); /*-- scan bucket and generate header bits-- */ cc = -1; for (i = l; i <= r; i++) { cc1 = eclass[fmap[i]]; if (cc != cc1) { SET_BH(i); cc = cc1; }; } } } if (verb >= 4) VPrintf1 ( "%6d unresolved strings\n", nNotDone ); H *= 2; if (H > nblock || nNotDone == 0) break; } /*-- Reconstruct the original block in eclass8 [0 .. nblock-1], since the previous phase destroyed it. --*/ if (verb >= 4) VPrintf0 ( " reconstructing block ...\n" ); j = 0; for (i = 0; i < nblock; i++) { while (ftabCopy[j] == 0) j++; ftabCopy[j]--; eclass8[fmap[i]] = (UChar)j; } AssertH ( j < 256, 1005 ); } #undef SET_BH #undef CLEAR_BH #undef ISSET_BH #undef WORD_BH #undef UNALIGNED_BH /*---------------------------------------------*/ /*--- The main, O(N^2 log(N)) sorting ---*/ /*--- algorithm. Faster for "normal" ---*/ /*--- non-repetitive blocks. ---*/ /*---------------------------------------------*/ /*---------------------------------------------*/ static Bool mainGtU ( UInt32 i1, UInt32 i2, UChar* block, UInt16* quadrant, UInt32 nblock, Int32* budget ) { Int32 k; UChar c1, c2; UInt16 s1, s2; AssertD ( i1 != i2, "mainGtU" ); /* 1 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); i1++; i2++; /* 2 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); i1++; i2++; /* 3 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); i1++; i2++; /* 4 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); i1++; i2++; /* 5 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); i1++; i2++; /* 6 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); i1++; i2++; /* 7 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); i1++; i2++; /* 8 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); i1++; i2++; /* 9 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); i1++; i2++; /* 10 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); i1++; i2++; /* 11 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); i1++; i2++; /* 12 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); i1++; i2++; k = nblock + 8; do { /* 1 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); s1 = quadrant[i1]; s2 = quadrant[i2]; if (s1 != s2) return (s1 > s2); i1++; i2++; /* 2 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); s1 = quadrant[i1]; s2 = quadrant[i2]; if (s1 != s2) return (s1 > s2); i1++; i2++; /* 3 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); s1 = quadrant[i1]; s2 = quadrant[i2]; if (s1 != s2) return (s1 > s2); i1++; i2++; /* 4 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); s1 = quadrant[i1]; s2 = quadrant[i2]; if (s1 != s2) return (s1 > s2); i1++; i2++; /* 5 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); s1 = quadrant[i1]; s2 = quadrant[i2]; if (s1 != s2) return (s1 > s2); i1++; i2++; /* 6 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); s1 = quadrant[i1]; s2 = quadrant[i2]; if (s1 != s2) return (s1 > s2); i1++; i2++; /* 7 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); s1 = quadrant[i1]; s2 = quadrant[i2]; if (s1 != s2) return (s1 > s2); i1++; i2++; /* 8 */ c1 = block[i1]; c2 = block[i2]; if (c1 != c2) return (c1 > c2); s1 = quadrant[i1]; s2 = quadrant[i2]; if (s1 != s2) return (s1 > s2); i1++; i2++; if (i1 >= nblock) i1 -= nblock; if (i2 >= nblock) i2 -= nblock; k -= 8; (*budget)--; } while (k >= 0); return False; } /*---------------------------------------------*/ /*-- Knuth's increments seem to work better than Incerpi-Sedgewick here. Possibly because the number of elems to sort is usually small, typically <= 20. --*/ static Int32 incs[14] = { 1, 4, 13, 40, 121, 364, 1093, 3280, 9841, 29524, 88573, 265720, 797161, 2391484 }; static void mainSimpleSort ( UInt32* ptr, UChar* block, UInt16* quadrant, Int32 nblock, Int32 lo, Int32 hi, Int32 d, Int32* budget ) { Int32 i, j, h, bigN, hp; UInt32 v; bigN = hi - lo + 1; if (bigN < 2) return; hp = 0; while (incs[hp] < bigN) hp++; hp--; for (; hp >= 0; hp--) { h = incs[hp]; i = lo + h; while (True) { /*-- copy 1 --*/ if (i > hi) break; v = ptr[i]; j = i; while ( mainGtU ( ptr[j-h]+d, v+d, block, quadrant, nblock, budget ) ) { ptr[j] = ptr[j-h]; j = j - h; if (j <= (lo + h - 1)) break; } ptr[j] = v; i++; /*-- copy 2 --*/ if (i > hi) break; v = ptr[i]; j = i; while ( mainGtU ( ptr[j-h]+d, v+d, block, quadrant, nblock, budget ) ) { ptr[j] = ptr[j-h]; j = j - h; if (j <= (lo + h - 1)) break; } ptr[j] = v; i++; /*-- copy 3 --*/ if (i > hi) break; v = ptr[i]; j = i; while ( mainGtU ( ptr[j-h]+d, v+d, block, quadrant, nblock, budget ) ) { ptr[j] = ptr[j-h]; j = j - h; if (j <= (lo + h - 1)) break; } ptr[j] = v; i++; if (*budget < 0) return; } } } /*---------------------------------------------*/ /*-- The following is an implementation of an elegant 3-way quicksort for strings, described in a paper "Fast Algorithms for Sorting and Searching Strings", by Robert Sedgewick and Jon L. Bentley. --*/ #define mswap(zz1, zz2) \ { Int32 zztmp = zz1; zz1 = zz2; zz2 = zztmp; } #define mvswap(zzp1, zzp2, zzn) \ { \ Int32 yyp1 = (zzp1); \ Int32 yyp2 = (zzp2); \ Int32 yyn = (zzn); \ while (yyn > 0) { \ mswap(ptr[yyp1], ptr[yyp2]); \ yyp1++; yyp2++; yyn--; \ } \ } static UChar mmed3 ( UChar a, UChar b, UChar c ) { UChar t; if (a > b) { t = a; a = b; b = t; }; if (b > c) { b = c; if (a > b) b = a; } return b; } #define mmin(a,b) ((a) < (b)) ? (a) : (b) #define mpush(lz,hz,dz) { stackLo[sp] = lz; \ stackHi[sp] = hz; \ stackD [sp] = dz; \ sp++; } #define mpop(lz,hz,dz) { sp--; \ lz = stackLo[sp]; \ hz = stackHi[sp]; \ dz = stackD [sp]; } #define mnextsize(az) (nextHi[az]-nextLo[az]) #define mnextswap(az,bz) \ { Int32 tz; \ tz = nextLo[az]; nextLo[az] = nextLo[bz]; nextLo[bz] = tz; \ tz = nextHi[az]; nextHi[az] = nextHi[bz]; nextHi[bz] = tz; \ tz = nextD [az]; nextD [az] = nextD [bz]; nextD [bz] = tz; } #define MAIN_QSORT_SMALL_THRESH 20 #define MAIN_QSORT_DEPTH_THRESH (BZ_N_RADIX + BZ_N_QSORT) #define MAIN_QSORT_STACK_SIZE 100 static void mainQSort3 ( UInt32* ptr, UChar* block, UInt16* quadrant, Int32 nblock, Int32 loSt, Int32 hiSt, Int32 dSt, Int32* budget ) { Int32 unLo, unHi, ltLo, gtHi, n, m, med; Int32 sp, lo, hi, d; Int32 stackLo[MAIN_QSORT_STACK_SIZE]; Int32 stackHi[MAIN_QSORT_STACK_SIZE]; Int32 stackD [MAIN_QSORT_STACK_SIZE]; Int32 nextLo[3]; Int32 nextHi[3]; Int32 nextD [3]; sp = 0; mpush ( loSt, hiSt, dSt ); while (sp > 0) { AssertH ( sp < MAIN_QSORT_STACK_SIZE, 1001 ); mpop ( lo, hi, d ); if (hi - lo < MAIN_QSORT_SMALL_THRESH || d > MAIN_QSORT_DEPTH_THRESH) { mainSimpleSort ( ptr, block, quadrant, nblock, lo, hi, d, budget ); if (*budget < 0) return; continue; } med = (Int32) mmed3 ( block[ptr[ lo ]+d], block[ptr[ hi ]+d], block[ptr[ (lo+hi)>>1 ]+d] ); unLo = ltLo = lo; unHi = gtHi = hi; while (True) { while (True) { if (unLo > unHi) break; n = ((Int32)block[ptr[unLo]+d]) - med; if (n == 0) { mswap(ptr[unLo], ptr[ltLo]); ltLo++; unLo++; continue; }; if (n > 0) break; unLo++; } while (True) { if (unLo > unHi) break; n = ((Int32)block[ptr[unHi]+d]) - med; if (n == 0) { mswap(ptr[unHi], ptr[gtHi]); gtHi--; unHi--; continue; }; if (n < 0) break; unHi--; } if (unLo > unHi) break; mswap(ptr[unLo], ptr[unHi]); unLo++; unHi--; } AssertD ( unHi == unLo-1, "mainQSort3(2)" ); if (gtHi < ltLo) { mpush(lo, hi, d+1 ); continue; } n = mmin(ltLo-lo, unLo-ltLo); mvswap(lo, unLo-n, n); m = mmin(hi-gtHi, gtHi-unHi); mvswap(unLo, hi-m+1, m); n = lo + unLo - ltLo - 1; m = hi - (gtHi - unHi) + 1; nextLo[0] = lo; nextHi[0] = n; nextD[0] = d; nextLo[1] = m; nextHi[1] = hi; nextD[1] = d; nextLo[2] = n+1; nextHi[2] = m-1; nextD[2] = d+1; if (mnextsize(0) < mnextsize(1)) mnextswap(0,1); if (mnextsize(1) < mnextsize(2)) mnextswap(1,2); if (mnextsize(0) < mnextsize(1)) mnextswap(0,1); AssertD (mnextsize(0) >= mnextsize(1), "mainQSort3(8)" ); AssertD (mnextsize(1) >= mnextsize(2), "mainQSort3(9)" ); mpush (nextLo[0], nextHi[0], nextD[0]); mpush (nextLo[1], nextHi[1], nextD[1]); mpush (nextLo[2], nextHi[2], nextD[2]); } } #undef mswap #undef mvswap #undef mpush #undef mpop #undef mmin #undef mnextsize #undef mnextswap #undef MAIN_QSORT_SMALL_THRESH #undef MAIN_QSORT_DEPTH_THRESH #undef MAIN_QSORT_STACK_SIZE /*---------------------------------------------*/ /* Pre: nblock > N_OVERSHOOT block32 exists for [0 .. nblock-1 +N_OVERSHOOT] ((UChar*)block32) [0 .. nblock-1] holds block ptr exists for [0 .. nblock-1] Post: ((UChar*)block32) [0 .. nblock-1] holds block All other areas of block32 destroyed ftab [0 .. 65536 ] destroyed ptr [0 .. nblock-1] holds sorted order if (*budget < 0), sorting was abandoned */ #define BIGFREQ(b) (ftab[((b)+1) << 8] - ftab[(b) << 8]) #define SETMASK (1 << 21) #define CLEARMASK (~(SETMASK)) /*static*/ __attribute__((noinline)) void mainSort ( UInt32* ptr, UChar* block, UInt16* quadrant, UInt32* ftab, Int32 nblock, Int32 verb, Int32* budget ) { Int32 i, j, k, ss, sb; Int32 runningOrder[256]; Bool bigDone[256]; Int32 copyStart[256]; Int32 copyEnd [256]; UChar c1; Int32 numQSorted; UInt16 s; if (verb >= 4) VPrintf0 ( " main sort initialise ...\n" ); /*-- set up the 2-byte frequency table --*/ for (i = 65536; i >= 0; i--) ftab[i] = 0; j = block[0] << 8; i = nblock-1; for (; i >= 3; i -= 4) { quadrant[i] = 0; j = (j >> 8) | ( ((UInt16)block[i]) << 8); ftab[j]++; quadrant[i-1] = 0; j = (j >> 8) | ( ((UInt16)block[i-1]) << 8); ftab[j]++; quadrant[i-2] = 0; j = (j >> 8) | ( ((UInt16)block[i-2]) << 8); ftab[j]++; quadrant[i-3] = 0; j = (j >> 8) | ( ((UInt16)block[i-3]) << 8); ftab[j]++; } for (; i >= 0; i--) { quadrant[i] = 0; j = (j >> 8) | ( ((UInt16)block[i]) << 8); ftab[j]++; } /*-- (emphasises close relationship of block & quadrant) --*/ for (i = 0; i < BZ_N_OVERSHOOT; i++) { block [nblock+i] = block[i]; quadrant[nblock+i] = 0; } if (verb >= 4) VPrintf0 ( " bucket sorting ...\n" ); /*-- Complete the initial radix sort --*/ for (i = 1; i <= 65536; i++) ftab[i] += ftab[i-1]; s = block[0] << 8; i = nblock-1; for (; i >= 3; i -= 4) { s = (s >> 8) | (block[i] << 8); j = ftab[s] -1; ftab[s] = j; ptr[j] = i; s = (s >> 8) | (block[i-1] << 8); j = ftab[s] -1; ftab[s] = j; ptr[j] = i-1; s = (s >> 8) | (block[i-2] << 8); j = ftab[s] -1; ftab[s] = j; ptr[j] = i-2; s = (s >> 8) | (block[i-3] << 8); j = ftab[s] -1; ftab[s] = j; ptr[j] = i-3; } for (; i >= 0; i--) { s = (s >> 8) | (block[i] << 8); j = ftab[s] -1; ftab[s] = j; ptr[j] = i; } /*-- Now ftab contains the first loc of every small bucket. Calculate the running order, from smallest to largest big bucket. --*/ for (i = 0; i <= 255; i++) { bigDone [i] = False; runningOrder[i] = i; } { Int32 vv; Int32 h = 1; do h = 3 * h + 1; while (h <= 256); do { h = h / 3; for (i = h; i <= 255; i++) { vv = runningOrder[i]; j = i; while ( BIGFREQ(runningOrder[j-h]) > BIGFREQ(vv) ) { runningOrder[j] = runningOrder[j-h]; j = j - h; if (j <= (h - 1)) goto zero; } zero: runningOrder[j] = vv; } } while (h != 1); } /*-- The main sorting loop. --*/ numQSorted = 0; for (i = 0; i <= 255; i++) { /*-- Process big buckets, starting with the least full. Basically this is a 3-step process in which we call mainQSort3 to sort the small buckets [ss, j], but also make a big effort to avoid the calls if we can. --*/ ss = runningOrder[i]; /*-- Step 1: Complete the big bucket [ss] by quicksorting any unsorted small buckets [ss, j], for j != ss. Hopefully previous pointer-scanning phases have already completed many of the small buckets [ss, j], so we don't have to sort them at all. --*/ for (j = 0; j <= 255; j++) { if (j != ss) { sb = (ss << 8) + j; if ( ! (ftab[sb] & SETMASK) ) { Int32 lo = ftab[sb] & CLEARMASK; Int32 hi = (ftab[sb+1] & CLEARMASK) - 1; if (hi > lo) { if (verb >= 4) VPrintf4 ( " qsort [0x%x, 0x%x] " "done %d this %d\n", ss, j, numQSorted, hi - lo + 1 ); mainQSort3 ( ptr, block, quadrant, nblock, lo, hi, BZ_N_RADIX, budget ); numQSorted += (hi - lo + 1); if (*budget < 0) return; } } ftab[sb] |= SETMASK; } } AssertH ( !bigDone[ss], 1006 ); /*-- Step 2: Now scan this big bucket [ss] so as to synthesise the sorted order for small buckets [t, ss] for all t, including, magically, the bucket [ss,ss] too. This will avoid doing Real Work in subsequent Step 1's. --*/ { for (j = 0; j <= 255; j++) { copyStart[j] = ftab[(j << 8) + ss] & CLEARMASK; copyEnd [j] = (ftab[(j << 8) + ss + 1] & CLEARMASK) - 1; } for (j = ftab[ss << 8] & CLEARMASK; j < copyStart[ss]; j++) { k = ptr[j]-1; if (k < 0) k += nblock; c1 = block[k]; croak( 2 + (char*)budget ); /* should identify decl in calling frame */ if (!bigDone[c1]) ptr[ copyStart[c1]++ ] = k; } for (j = (ftab[(ss+1) << 8] & CLEARMASK) - 1; j > copyEnd[ss]; j--) { k = ptr[j]-1; if (k < 0) k += nblock; c1 = block[k]; if (!bigDone[c1]) ptr[ copyEnd[c1]-- ] = k; } } AssertH ( (copyStart[ss]-1 == copyEnd[ss]) || /* Extremely rare case missing in bzip2-1.0.0 and 1.0.1. Necessity for this case is demonstrated by compressing a sequence of approximately 48.5 million of character 251; 1.0.0/1.0.1 will then die here. */ (copyStart[ss] == 0 && copyEnd[ss] == nblock-1), 1007 ) for (j = 0; j <= 255; j++) ftab[(j << 8) + ss] |= SETMASK; /*-- Step 3: The [ss] big bucket is now done. Record this fact, and update the quadrant descriptors. Remember to update quadrants in the overshoot area too, if necessary. The "if (i < 255)" test merely skips this updating for the last bucket processed, since updating for the last bucket is pointless. The quadrant array provides a way to incrementally cache sort orderings, as they appear, so as to make subsequent comparisons in fullGtU() complete faster. For repetitive blocks this makes a big difference (but not big enough to be able to avoid the fallback sorting mechanism, exponential radix sort). The precise meaning is: at all times: for 0 <= i < nblock and 0 <= j <= nblock if block[i] != block[j], then the relative values of quadrant[i] and quadrant[j] are meaningless. else { if quadrant[i] < quadrant[j] then the string starting at i lexicographically precedes the string starting at j else if quadrant[i] > quadrant[j] then the string starting at j lexicographically precedes the string starting at i else the relative ordering of the strings starting at i and j has not yet been determined. } --*/ bigDone[ss] = True; if (i < 255) { Int32 bbStart = ftab[ss << 8] & CLEARMASK; Int32 bbSize = (ftab[(ss+1) << 8] & CLEARMASK) - bbStart; Int32 shifts = 0; while ((bbSize >> shifts) > 65534) shifts++; for (j = bbSize-1; j >= 0; j--) { Int32 a2update = ptr[bbStart + j]; UInt16 qVal = (UInt16)(j >> shifts); quadrant[a2update] = qVal; if (a2update < BZ_N_OVERSHOOT) quadrant[a2update + nblock] = qVal; } AssertH ( ((bbSize-1) >> shifts) <= 65535, 1002 ); } } if (verb >= 4) VPrintf3 ( " %d pointers, %d sorted, %d scanned\n", nblock, numQSorted, nblock - numQSorted ); } #undef BIGFREQ #undef SETMASK #undef CLEARMASK /*---------------------------------------------*/ /* Pre: nblock > 0 arr2 exists for [0 .. nblock-1 +N_OVERSHOOT] ((UChar*)arr2) [0 .. nblock-1] holds block arr1 exists for [0 .. nblock-1] Post: ((UChar*)arr2) [0 .. nblock-1] holds block All other areas of block destroyed ftab [ 0 .. 65536 ] destroyed arr1 [0 .. nblock-1] holds sorted order */ __attribute__((noinline)) void BZ2_blockSort ( EState* s ) { UInt32* ptr = s->ptr; UChar* block = s->block; UInt32* ftab = s->ftab; Int32 nblock = s->nblock; Int32 verb = s->verbosity; Int32 wfact = s->workFactor; UInt16* quadrant; Int32 budget; Int32 budgetInit; Int32 i; if (nblock < /* 10000 */1000 ) { fallbackSort ( s->arr1, s->arr2, ftab, nblock, verb ); } else { /* Calculate the location for quadrant, remembering to get the alignment right. Assumes that &(block[0]) is at least 2-byte aligned -- this should be ok since block is really the first section of arr2. */ i = nblock+BZ_N_OVERSHOOT; if (i & 1) i++; quadrant = (UInt16*)(&(block[i])); /* (wfact-1) / 3 puts the default-factor-30 transition point at very roughly the same place as with v0.1 and v0.9.0. Not that it particularly matters any more, since the resulting compressed stream is now the same regardless of whether or not we use the main sort or fallback sort. */ if (wfact < 1 ) wfact = 1; if (wfact > 100) wfact = 100; budgetInit = nblock * ((wfact-1) / 3); budget = budgetInit; mainSort ( ptr, block, quadrant, ftab, nblock, verb, &budget ); if (0 && verb >= 3) VPrintf3 ( " %d work, %d block, ratio %5.2f\n", budgetInit - budget, nblock, (float)(budgetInit - budget) / (float)(nblock==0 ? 1 : nblock) ); if (budget < 0) { if (verb >= 2) VPrintf0 ( " too repetitive; using fallback" " sorting algorithm\n" ); fallbackSort ( s->arr1, s->arr2, ftab, nblock, verb ); } } s->origPtr = -1; for (i = 0; i < s->nblock; i++) if (ptr[i] == 0) { s->origPtr = i; break; }; AssertH( s->origPtr != -1, 1003 ); } /*-------------------------------------------------------------*/ /*--- end blocksort.c ---*/ /*-------------------------------------------------------------*/ /*-------------------------------------------------------------*/ /*--- Huffman coding low-level stuff ---*/ /*--- huffman.c ---*/ /*-------------------------------------------------------------*/ /*-- This file is a part of bzip2 and/or libbzip2, a program and library for lossless, block-sorting data compression. Copyright (C) 1996-2004 Julian R Seward. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Julian Seward, Cambridge, UK. jseward@bzip.org bzip2/libbzip2 version 1.0 of 21 March 2000 This program is based on (at least) the work of: Mike Burrows David Wheeler Peter Fenwick Alistair Moffat Radford Neal Ian H. Witten Robert Sedgewick Jon L. Bentley For more information on these sources, see the manual. --*/ /*---------------------------------------------------*/ #define WEIGHTOF(zz0) ((zz0) & 0xffffff00) #define DEPTHOF(zz1) ((zz1) & 0x000000ff) #define MYMAX(zz2,zz3) ((zz2) > (zz3) ? (zz2) : (zz3)) #define ADDWEIGHTS(zw1,zw2) \ (WEIGHTOF(zw1)+WEIGHTOF(zw2)) | \ (1 + MYMAX(DEPTHOF(zw1),DEPTHOF(zw2))) #define UPHEAP(z) \ { \ Int32 zz, tmp; \ zz = z; tmp = heap[zz]; \ while (weight[tmp] < weight[heap[zz >> 1]]) { \ heap[zz] = heap[zz >> 1]; \ zz >>= 1; \ } \ heap[zz] = tmp; \ } #define DOWNHEAP(z) \ { \ Int32 zz, yy, tmp; \ zz = z; tmp = heap[zz]; \ while (True) { \ yy = zz << 1; \ if (yy > nHeap) break; \ if (yy < nHeap && \ weight[heap[yy+1]] < weight[heap[yy]]) \ yy++; \ if (weight[tmp] < weight[heap[yy]]) break; \ heap[zz] = heap[yy]; \ zz = yy; \ } \ heap[zz] = tmp; \ } /*---------------------------------------------------*/ void BZ2_hbMakeCodeLengths ( UChar *len, Int32 *freq, Int32 alphaSize, Int32 maxLen ) { /*-- Nodes and heap entries run from 1. Entry 0 for both the heap and nodes is a sentinel. --*/ Int32 nNodes, nHeap, n1, n2, i, j, k; Bool tooLong; Int32 heap [ BZ_MAX_ALPHA_SIZE + 2 ]; Int32 weight [ BZ_MAX_ALPHA_SIZE * 2 ]; Int32 parent [ BZ_MAX_ALPHA_SIZE * 2 ]; for (i = 0; i < alphaSize; i++) weight[i+1] = (freq[i] == 0 ? 1 : freq[i]) << 8; while (True) { nNodes = alphaSize; nHeap = 0; heap[0] = 0; weight[0] = 0; parent[0] = -2; for (i = 1; i <= alphaSize; i++) { parent[i] = -1; nHeap++; heap[nHeap] = i; UPHEAP(nHeap); } AssertH( nHeap < (BZ_MAX_ALPHA_SIZE+2), 2001 ); while (nHeap > 1) { n1 = heap[1]; heap[1] = heap[nHeap]; nHeap--; DOWNHEAP(1); n2 = heap[1]; heap[1] = heap[nHeap]; nHeap--; DOWNHEAP(1); nNodes++; parent[n1] = parent[n2] = nNodes; weight[nNodes] = ADDWEIGHTS(weight[n1], weight[n2]); parent[nNodes] = -1; nHeap++; heap[nHeap] = nNodes; UPHEAP(nHeap); } AssertH( nNodes < (BZ_MAX_ALPHA_SIZE * 2), 2002 ); tooLong = False; for (i = 1; i <= alphaSize; i++) { j = 0; k = i; while (parent[k] >= 0) { k = parent[k]; j++; } len[i-1] = j; if (j > maxLen) tooLong = True; } if (! tooLong) break; /* 17 Oct 04: keep-going condition for the following loop used to be 'i < alphaSize', which missed the last element, theoretically leading to the possibility of the compressor looping. However, this count-scaling step is only needed if one of the generated Huffman code words is longer than maxLen, which up to and including version 1.0.2 was 20 bits, which is extremely unlikely. In version 1.0.3 maxLen was changed to 17 bits, which has minimal effect on compression ratio, but does mean this scaling step is used from time to time, enough to verify that it works. This means that bzip2-1.0.3 and later will only produce Huffman codes with a maximum length of 17 bits. However, in order to preserve backwards compatibility with bitstreams produced by versions pre-1.0.3, the decompressor must still handle lengths of up to 20. */ for (i = 1; i <= alphaSize; i++) { j = weight[i] >> 8; j = 1 + (j / 2); weight[i] = j << 8; } } } /*---------------------------------------------------*/ void BZ2_hbAssignCodes ( Int32 *code, UChar *length, Int32 minLen, Int32 maxLen, Int32 alphaSize ) { Int32 n, vec, i; vec = 0; for (n = minLen; n <= maxLen; n++) { for (i = 0; i < alphaSize; i++) if (length[i] == n) { code[i] = vec; vec++; }; vec <<= 1; } } /*---------------------------------------------------*/ void BZ2_hbCreateDecodeTables ( Int32 *limit, Int32 *base, Int32 *perm, UChar *length, Int32 minLen, Int32 maxLen, Int32 alphaSize ) { Int32 pp, i, j, vec; pp = 0; for (i = minLen; i <= maxLen; i++) for (j = 0; j < alphaSize; j++) if (length[j] == i) { perm[pp] = j; pp++; }; for (i = 0; i < BZ_MAX_CODE_LEN; i++) base[i] = 0; for (i = 0; i < alphaSize; i++) base[length[i]+1]++; for (i = 1; i < BZ_MAX_CODE_LEN; i++) base[i] += base[i-1]; for (i = 0; i < BZ_MAX_CODE_LEN; i++) limit[i] = 0; vec = 0; for (i = minLen; i <= maxLen; i++) { vec += (base[i+1] - base[i]); limit[i] = vec-1; vec <<= 1; } for (i = minLen + 1; i <= maxLen; i++) base[i] = ((limit[i-1] + 1) << 1) - base[i]; } /*-------------------------------------------------------------*/ /*--- end huffman.c ---*/ /*-------------------------------------------------------------*/ /*-------------------------------------------------------------*/ /*--- Compression machinery (not incl block sorting) ---*/ /*--- compress.c ---*/ /*-------------------------------------------------------------*/ /*-- This file is a part of bzip2 and/or libbzip2, a program and library for lossless, block-sorting data compression. Copyright (C) 1996-2004 Julian R Seward. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Julian Seward, Cambridge, UK. jseward@bzip.org bzip2/libbzip2 version 1.0 of 21 March 2000 This program is based on (at least) the work of: Mike Burrows David Wheeler Peter Fenwick Alistair Moffat Radford Neal Ian H. Witten Robert Sedgewick Jon L. Bentley For more information on these sources, see the manual. --*/ /*-- CHANGES ~~~~~~~ 0.9.0 -- original version. 0.9.0a/b -- no changes in this file. 0.9.0c * changed setting of nGroups in sendMTFValues() so as to do a bit better on small files --*/ /*---------------------------------------------------*/ /*--- Bit stream I/O ---*/ /*---------------------------------------------------*/ /*---------------------------------------------------*/ void BZ2_bsInitWrite ( EState* s ) { s->bsLive = 0; s->bsBuff = 0; } /*---------------------------------------------------*/ static void bsFinishWrite ( EState* s ) { while (s->bsLive > 0) { s->zbits[s->numZ] = (UChar)(s->bsBuff >> 24); s->numZ++; s->bsBuff <<= 8; s->bsLive -= 8; } } /*---------------------------------------------------*/ #define bsNEEDW(nz) \ { \ while (s->bsLive >= 8) { \ s->zbits[s->numZ] \ = (UChar)(s->bsBuff >> 24); \ s->numZ++; \ s->bsBuff <<= 8; \ s->bsLive -= 8; \ } \ } /*---------------------------------------------------*/ static void bsW ( EState* s, Int32 n, UInt32 v ) { bsNEEDW ( n ); s->bsBuff |= (v << (32 - s->bsLive - n)); s->bsLive += n; } /*---------------------------------------------------*/ static void bsPutUInt32 ( EState* s, UInt32 u ) { bsW ( s, 8, (u >> 24) & 0xffL ); bsW ( s, 8, (u >> 16) & 0xffL ); bsW ( s, 8, (u >> 8) & 0xffL ); bsW ( s, 8, u & 0xffL ); } /*---------------------------------------------------*/ static void bsPutUChar ( EState* s, UChar c ) { bsW( s, 8, (UInt32)c ); } /*---------------------------------------------------*/ /*--- The back end proper ---*/ /*---------------------------------------------------*/ /*---------------------------------------------------*/ static void makeMaps_e ( EState* s ) { Int32 i; s->nInUse = 0; for (i = 0; i < 256; i++) if (s->inUse[i]) { s->unseqToSeq[i] = s->nInUse; s->nInUse++; } } /*---------------------------------------------------*/ static void generateMTFValues ( EState* s ) { UChar yy[256]; Int32 i, j; Int32 zPend; Int32 wr; Int32 EOB; /* After sorting (eg, here), s->arr1 [ 0 .. s->nblock-1 ] holds sorted order, and ((UChar*)s->arr2) [ 0 .. s->nblock-1 ] holds the original block data. The first thing to do is generate the MTF values, and put them in ((UInt16*)s->arr1) [ 0 .. s->nblock-1 ]. Because there are strictly fewer or equal MTF values than block values, ptr values in this area are overwritten with MTF values only when they are no longer needed. The final compressed bitstream is generated into the area starting at (UChar*) (&((UChar*)s->arr2)[s->nblock]) These storage aliases are set up in bzCompressInit(), except for the last one, which is arranged in compressBlock(). */ UInt32* ptr = s->ptr; UChar* block = s->block; UInt16* mtfv = s->mtfv; makeMaps_e ( s ); EOB = s->nInUse+1; for (i = 0; i <= EOB; i++) s->mtfFreq[i] = 0; wr = 0; zPend = 0; for (i = 0; i < s->nInUse; i++) yy[i] = (UChar) i; for (i = 0; i < s->nblock; i++) { UChar ll_i; AssertD ( wr <= i, "generateMTFValues(1)" ); j = ptr[i]-1; if (j < 0) j += s->nblock; ll_i = s->unseqToSeq[block[j]]; AssertD ( ll_i < s->nInUse, "generateMTFValues(2a)" ); if (yy[0] == ll_i) { zPend++; } else { if (zPend > 0) { zPend--; while (True) { if (zPend & 1) { mtfv[wr] = BZ_RUNB; wr++; s->mtfFreq[BZ_RUNB]++; } else { mtfv[wr] = BZ_RUNA; wr++; s->mtfFreq[BZ_RUNA]++; } if (zPend < 2) break; zPend = (zPend - 2) / 2; }; zPend = 0; } { register UChar rtmp; register UChar* ryy_j; register UChar rll_i; rtmp = yy[1]; yy[1] = yy[0]; ryy_j = &(yy[1]); rll_i = ll_i; while ( rll_i != rtmp ) { register UChar rtmp2; ryy_j++; rtmp2 = rtmp; rtmp = *ryy_j; *ryy_j = rtmp2; }; yy[0] = rtmp; j = ryy_j - &(yy[0]); mtfv[wr] = j+1; wr++; s->mtfFreq[j+1]++; } } } if (zPend > 0) { zPend--; while (True) { if (zPend & 1) { mtfv[wr] = BZ_RUNB; wr++; s->mtfFreq[BZ_RUNB]++; } else { mtfv[wr] = BZ_RUNA; wr++; s->mtfFreq[BZ_RUNA]++; } if (zPend < 2) break; zPend = (zPend - 2) / 2; }; zPend = 0; } mtfv[wr] = EOB; wr++; s->mtfFreq[EOB]++; s->nMTF = wr; } /*---------------------------------------------------*/ #define BZ_LESSER_ICOST 0 #define BZ_GREATER_ICOST 15 static void sendMTFValues ( EState* s ) { Int32 v, t, i, j, gs, ge, totc, bt, bc, iter; Int32 nSelectors, alphaSize, minLen, maxLen, selCtr; Int32 nGroups, nBytes; /*-- UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; is a global since the decoder also needs it. Int32 code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; Int32 rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; are also globals only used in this proc. Made global to keep stack frame size small. --*/ UInt16 cost[BZ_N_GROUPS]; Int32 fave[BZ_N_GROUPS]; UInt16* mtfv = s->mtfv; if (s->verbosity >= 3) VPrintf3( " %d in block, %d after MTF & 1-2 coding, " "%d+2 syms in use\n", s->nblock, s->nMTF, s->nInUse ); alphaSize = s->nInUse+2; for (t = 0; t < BZ_N_GROUPS; t++) for (v = 0; v < alphaSize; v++) s->len[t][v] = BZ_GREATER_ICOST; /*--- Decide how many coding tables to use ---*/ AssertH ( s->nMTF > 0, 3001 ); if (s->nMTF < 200) nGroups = 2; else if (s->nMTF < 600) nGroups = 3; else if (s->nMTF < 1200) nGroups = 4; else if (s->nMTF < 2400) nGroups = 5; else nGroups = 6; /*--- Generate an initial set of coding tables ---*/ { Int32 nPart, remF, tFreq, aFreq; nPart = nGroups; remF = s->nMTF; gs = 0; while (nPart > 0) { tFreq = remF / nPart; ge = gs-1; aFreq = 0; while (aFreq < tFreq && ge < alphaSize-1) { ge++; aFreq += s->mtfFreq[ge]; } if (ge > gs && nPart != nGroups && nPart != 1 && ((nGroups-nPart) % 2 == 1)) { aFreq -= s->mtfFreq[ge]; ge--; } if (0 && s->verbosity >= 3) VPrintf5( " initial group %d, [%d .. %d], " "has %d syms (%4.1f%%)\n", nPart, gs, ge, aFreq, (100.0 * (float)aFreq) / (float)(s->nMTF) ); for (v = 0; v < alphaSize; v++) if (v >= gs && v <= ge) s->len[nPart-1][v] = BZ_LESSER_ICOST; else s->len[nPart-1][v] = BZ_GREATER_ICOST; nPart--; gs = ge+1; remF -= aFreq; } } /*--- Iterate up to BZ_N_ITERS times to improve the tables. ---*/ for (iter = 0; iter < BZ_N_ITERS; iter++) { for (t = 0; t < nGroups; t++) fave[t] = 0; for (t = 0; t < nGroups; t++) for (v = 0; v < alphaSize; v++) s->rfreq[t][v] = 0; /*--- Set up an auxiliary length table which is used to fast-track the common case (nGroups == 6). ---*/ if (nGroups == 6) { for (v = 0; v < alphaSize; v++) { s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v]; s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v]; s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v]; } } nSelectors = 0; totc = 0; gs = 0; while (True) { /*--- Set group start & end marks. --*/ if (gs >= s->nMTF) break; ge = gs + BZ_G_SIZE - 1; if (ge >= s->nMTF) ge = s->nMTF-1; /*-- Calculate the cost of this group as coded by each of the coding tables. --*/ for (t = 0; t < nGroups; t++) cost[t] = 0; if (nGroups == 6 && 50 == ge-gs+1) { /*--- fast track the common case ---*/ register UInt32 cost01, cost23, cost45; register UInt16 icv; cost01 = cost23 = cost45 = 0; # define BZ_ITER(nn) \ icv = mtfv[gs+(nn)]; \ cost01 += s->len_pack[icv][0]; \ cost23 += s->len_pack[icv][1]; \ cost45 += s->len_pack[icv][2]; \ BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4); BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9); BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14); BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19); BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24); BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29); BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34); BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39); BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44); BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49); # undef BZ_ITER cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16; cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16; cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16; } else { /*--- slow version which correctly handles all situations ---*/ for (i = gs; i <= ge; i++) { UInt16 icv = mtfv[i]; for (t = 0; t < nGroups; t++) cost[t] += s->len[t][icv]; } } /*-- Find the coding table which is best for this group, and record its identity in the selector table. --*/ bc = 999999999; bt = -1; for (t = 0; t < nGroups; t++) if (cost[t] < bc) { bc = cost[t]; bt = t; }; totc += bc; fave[bt]++; s->selector[nSelectors] = bt; nSelectors++; /*-- Increment the symbol frequencies for the selected table. --*/ if (nGroups == 6 && 50 == ge-gs+1) { /*--- fast track the common case ---*/ # define BZ_ITUR(nn) s->rfreq[bt][ mtfv[gs+(nn)] ]++ BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4); BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9); BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14); BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19); BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24); BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29); BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34); BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39); BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44); BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49); # undef BZ_ITUR } else { /*--- slow version which correctly handles all situations ---*/ for (i = gs; i <= ge; i++) s->rfreq[bt][ mtfv[i] ]++; } gs = ge+1; } if (s->verbosity >= 3) { VPrintf2 ( " pass %d: size is %d, grp uses are ", iter+1, totc/8 ); for (t = 0; t < nGroups; t++) VPrintf1 ( "%d ", fave[t] ); VPrintf0 ( "\n" ); } /*-- Recompute the tables based on the accumulated frequencies. --*/ /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See comment in huffman.c for details. */ for (t = 0; t < nGroups; t++) BZ2_hbMakeCodeLengths ( &(s->len[t][0]), &(s->rfreq[t][0]), alphaSize, 17 /*20*/ ); } AssertH( nGroups < 8, 3002 ); AssertH( nSelectors < 32768 && nSelectors <= (2 + (900000 / BZ_G_SIZE)), 3003 ); /*--- Compute MTF values for the selectors. ---*/ { UChar pos[BZ_N_GROUPS], ll_i, tmp2, tmp; for (i = 0; i < nGroups; i++) pos[i] = i; for (i = 0; i < nSelectors; i++) { ll_i = s->selector[i]; j = 0; tmp = pos[j]; while ( ll_i != tmp ) { j++; tmp2 = tmp; tmp = pos[j]; pos[j] = tmp2; }; pos[0] = tmp; s->selectorMtf[i] = j; } }; /*--- Assign actual codes for the tables. --*/ for (t = 0; t < nGroups; t++) { minLen = 32; maxLen = 0; for (i = 0; i < alphaSize; i++) { if (s->len[t][i] > maxLen) maxLen = s->len[t][i]; if (s->len[t][i] < minLen) minLen = s->len[t][i]; } AssertH ( !(maxLen > 17 /*20*/ ), 3004 ); AssertH ( !(minLen < 1), 3005 ); BZ2_hbAssignCodes ( &(s->code[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize ); } /*--- Transmit the mapping table. ---*/ { Bool inUse16[16]; for (i = 0; i < 16; i++) { inUse16[i] = False; for (j = 0; j < 16; j++) if (s->inUse[i * 16 + j]) inUse16[i] = True; } nBytes = s->numZ; for (i = 0; i < 16; i++) if (inUse16[i]) bsW(s,1,1); else bsW(s,1,0); for (i = 0; i < 16; i++) if (inUse16[i]) for (j = 0; j < 16; j++) { if (s->inUse[i * 16 + j]) bsW(s,1,1); else bsW(s,1,0); } if (s->verbosity >= 3) VPrintf1( " bytes: mapping %d, ", s->numZ-nBytes ); } /*--- Now the selectors. ---*/ nBytes = s->numZ; bsW ( s, 3, nGroups ); bsW ( s, 15, nSelectors ); for (i = 0; i < nSelectors; i++) { for (j = 0; j < s->selectorMtf[i]; j++) bsW(s,1,1); bsW(s,1,0); } if (s->verbosity >= 3) VPrintf1( "selectors %d, ", s->numZ-nBytes ); /*--- Now the coding tables. ---*/ nBytes = s->numZ; for (t = 0; t < nGroups; t++) { Int32 curr = s->len[t][0]; bsW ( s, 5, curr ); for (i = 0; i < alphaSize; i++) { while (curr < s->len[t][i]) { bsW(s,2,2); curr++; /* 10 */ }; while (curr > s->len[t][i]) { bsW(s,2,3); curr--; /* 11 */ }; bsW ( s, 1, 0 ); } } if (s->verbosity >= 3) VPrintf1 ( "code lengths %d, ", s->numZ-nBytes ); /*--- And finally, the block data proper ---*/ nBytes = s->numZ; selCtr = 0; gs = 0; while (True) { if (gs >= s->nMTF) break; ge = gs + BZ_G_SIZE - 1; if (ge >= s->nMTF) ge = s->nMTF-1; AssertH ( s->selector[selCtr] < nGroups, 3006 ); if (nGroups == 6 && 50 == ge-gs+1) { /*--- fast track the common case ---*/ UInt16 mtfv_i; UChar* s_len_sel_selCtr = &(s->len[s->selector[selCtr]][0]); Int32* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]); # define BZ_ITAH(nn) \ mtfv_i = mtfv[gs+(nn)]; \ bsW ( s, \ s_len_sel_selCtr[mtfv_i], \ s_code_sel_selCtr[mtfv_i] ) BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4); BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9); BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14); BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19); BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24); BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29); BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34); BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39); BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44); BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49); # undef BZ_ITAH } else { /*--- slow version which correctly handles all situations ---*/ for (i = gs; i <= ge; i++) { bsW ( s, s->len [s->selector[selCtr]] [mtfv[i]], s->code [s->selector[selCtr]] [mtfv[i]] ); } } gs = ge+1; selCtr++; } AssertH( selCtr == nSelectors, 3007 ); if (s->verbosity >= 3) VPrintf1( "codes %d\n", s->numZ-nBytes ); } /*---------------------------------------------------*/ __attribute__((noinline)) void BZ2_compressBlock ( EState* s, Bool is_last_block ) { if (s->nblock > 0) { BZ_FINALISE_CRC ( s->blockCRC ); s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31); s->combinedCRC ^= s->blockCRC; if (s->blockNo > 1) s->numZ = 0; if (s->verbosity >= 2) VPrintf4( " block %d: crc = 0x%08x, " "combined CRC = 0x%08x, size = %d\n", s->blockNo, s->blockCRC, s->combinedCRC, s->nblock ); BZ2_blockSort ( s ); } s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]); /*-- If this is the first block, create the stream header. --*/ if (s->blockNo == 1) { BZ2_bsInitWrite ( s ); bsPutUChar ( s, BZ_HDR_B ); bsPutUChar ( s, BZ_HDR_Z ); bsPutUChar ( s, BZ_HDR_h ); bsPutUChar ( s, (UChar)(BZ_HDR_0 + s->blockSize100k) ); } if (s->nblock > 0) { bsPutUChar ( s, 0x31 ); bsPutUChar ( s, 0x41 ); bsPutUChar ( s, 0x59 ); bsPutUChar ( s, 0x26 ); bsPutUChar ( s, 0x53 ); bsPutUChar ( s, 0x59 ); /*-- Now the block's CRC, so it is in a known place. --*/ bsPutUInt32 ( s, s->blockCRC ); /*-- Now a single bit indicating (non-)randomisation. As of version 0.9.5, we use a better sorting algorithm which makes randomisation unnecessary. So always set the randomised bit to 'no'. Of course, the decoder still needs to be able to handle randomised blocks so as to maintain backwards compatibility with older versions of bzip2. --*/ bsW(s,1,0); bsW ( s, 24, s->origPtr ); generateMTFValues ( s ); sendMTFValues ( s ); } /*-- If this is the last block, add the stream trailer. --*/ if (is_last_block) { bsPutUChar ( s, 0x17 ); bsPutUChar ( s, 0x72 ); bsPutUChar ( s, 0x45 ); bsPutUChar ( s, 0x38 ); bsPutUChar ( s, 0x50 ); bsPutUChar ( s, 0x90 ); bsPutUInt32 ( s, s->combinedCRC ); if (s->verbosity >= 2) VPrintf1( " final combined CRC = 0x%08x\n ", s->combinedCRC ); bsFinishWrite ( s ); } } /*-------------------------------------------------------------*/ /*--- end compress.c ---*/ /*-------------------------------------------------------------*/ /*-------------------------------------------------------------*/ /*--- Table for randomising repetitive blocks ---*/ /*--- randtable.c ---*/ /*-------------------------------------------------------------*/ /*-- This file is a part of bzip2 and/or libbzip2, a program and library for lossless, block-sorting data compression. Copyright (C) 1996-2004 Julian R Seward. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Julian Seward, Cambridge, UK. jseward@bzip.org bzip2/libbzip2 version 1.0 of 21 March 2000 This program is based on (at least) the work of: Mike Burrows David Wheeler Peter Fenwick Alistair Moffat Radford Neal Ian H. Witten Robert Sedgewick Jon L. Bentley For more information on these sources, see the manual. --*/ /*---------------------------------------------*/ Int32 BZ2_rNums[512] = { 619, 720, 127, 481, 931, 816, 813, 233, 566, 247, 985, 724, 205, 454, 863, 491, 741, 242, 949, 214, 733, 859, 335, 708, 621, 574, 73, 654, 730, 472, 419, 436, 278, 496, 867, 210, 399, 680, 480, 51, 878, 465, 811, 169, 869, 675, 611, 697, 867, 561, 862, 687, 507, 283, 482, 129, 807, 591, 733, 623, 150, 238, 59, 379, 684, 877, 625, 169, 643, 105, 170, 607, 520, 932, 727, 476, 693, 425, 174, 647, 73, 122, 335, 530, 442, 853, 695, 249, 445, 515, 909, 545, 703, 919, 874, 474, 882, 500, 594, 612, 641, 801, 220, 162, 819, 984, 589, 513, 495, 799, 161, 604, 958, 533, 221, 400, 386, 867, 600, 782, 382, 596, 414, 171, 516, 375, 682, 485, 911, 276, 98, 553, 163, 354, 666, 933, 424, 341, 533, 870, 227, 730, 475, 186, 263, 647, 537, 686, 600, 224, 469, 68, 770, 919, 190, 373, 294, 822, 808, 206, 184, 943, 795, 384, 383, 461, 404, 758, 839, 887, 715, 67, 618, 276, 204, 918, 873, 777, 604, 560, 951, 160, 578, 722, 79, 804, 96, 409, 713, 940, 652, 934, 970, 447, 318, 353, 859, 672, 112, 785, 645, 863, 803, 350, 139, 93, 354, 99, 820, 908, 609, 772, 154, 274, 580, 184, 79, 626, 630, 742, 653, 282, 762, 623, 680, 81, 927, 626, 789, 125, 411, 521, 938, 300, 821, 78, 343, 175, 128, 250, 170, 774, 972, 275, 999, 639, 495, 78, 352, 126, 857, 956, 358, 619, 580, 124, 737, 594, 701, 612, 669, 112, 134, 694, 363, 992, 809, 743, 168, 974, 944, 375, 748, 52, 600, 747, 642, 182, 862, 81, 344, 805, 988, 739, 511, 655, 814, 334, 249, 515, 897, 955, 664, 981, 649, 113, 974, 459, 893, 228, 433, 837, 553, 268, 926, 240, 102, 654, 459, 51, 686, 754, 806, 760, 493, 403, 415, 394, 687, 700, 946, 670, 656, 610, 738, 392, 760, 799, 887, 653, 978, 321, 576, 617, 626, 502, 894, 679, 243, 440, 680, 879, 194, 572, 640, 724, 926, 56, 204, 700, 707, 151, 457, 449, 797, 195, 791, 558, 945, 679, 297, 59, 87, 824, 713, 663, 412, 693, 342, 606, 134, 108, 571, 364, 631, 212, 174, 643, 304, 329, 343, 97, 430, 751, 497, 314, 983, 374, 822, 928, 140, 206, 73, 263, 980, 736, 876, 478, 430, 305, 170, 514, 364, 692, 829, 82, 855, 953, 676, 246, 369, 970, 294, 750, 807, 827, 150, 790, 288, 923, 804, 378, 215, 828, 592, 281, 565, 555, 710, 82, 896, 831, 547, 261, 524, 462, 293, 465, 502, 56, 661, 821, 976, 991, 658, 869, 905, 758, 745, 193, 768, 550, 608, 933, 378, 286, 215, 979, 792, 961, 61, 688, 793, 644, 986, 403, 106, 366, 905, 644, 372, 567, 466, 434, 645, 210, 389, 550, 919, 135, 780, 773, 635, 389, 707, 100, 626, 958, 165, 504, 920, 176, 193, 713, 857, 265, 203, 50, 668, 108, 645, 990, 626, 197, 510, 357, 358, 850, 858, 364, 936, 638 }; /*-------------------------------------------------------------*/ /*--- end randtable.c ---*/ /*-------------------------------------------------------------*/ /*-------------------------------------------------------------*/ /*--- Table for doing CRCs ---*/ /*--- crctable.c ---*/ /*-------------------------------------------------------------*/ /*-- This file is a part of bzip2 and/or libbzip2, a program and library for lossless, block-sorting data compression. Copyright (C) 1996-2004 Julian R Seward. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Julian Seward, Cambridge, UK. jseward@bzip.org bzip2/libbzip2 version 1.0 of 21 March 2000 This program is based on (at least) the work of: Mike Burrows David Wheeler Peter Fenwick Alistair Moffat Radford Neal Ian H. Witten Robert Sedgewick Jon L. Bentley For more information on these sources, see the manual. --*/ /*-- I think this is an implementation of the AUTODIN-II, Ethernet & FDDI 32-bit CRC standard. Vaguely derived from code by Rob Warnock, in Section 51 of the comp.compression FAQ. --*/ UInt32 BZ2_crc32Table[256] = { /*-- Ugly, innit? --*/ 0x00000000L, 0x04c11db7L, 0x09823b6eL, 0x0d4326d9L, 0x130476dcL, 0x17c56b6bL, 0x1a864db2L, 0x1e475005L, 0x2608edb8L, 0x22c9f00fL, 0x2f8ad6d6L, 0x2b4bcb61L, 0x350c9b64L, 0x31cd86d3L, 0x3c8ea00aL, 0x384fbdbdL, 0x4c11db70L, 0x48d0c6c7L, 0x4593e01eL, 0x4152fda9L, 0x5f15adacL, 0x5bd4b01bL, 0x569796c2L, 0x52568b75L, 0x6a1936c8L, 0x6ed82b7fL, 0x639b0da6L, 0x675a1011L, 0x791d4014L, 0x7ddc5da3L, 0x709f7b7aL, 0x745e66cdL, 0x9823b6e0L, 0x9ce2ab57L, 0x91a18d8eL, 0x95609039L, 0x8b27c03cL, 0x8fe6dd8bL, 0x82a5fb52L, 0x8664e6e5L, 0xbe2b5b58L, 0xbaea46efL, 0xb7a96036L, 0xb3687d81L, 0xad2f2d84L, 0xa9ee3033L, 0xa4ad16eaL, 0xa06c0b5dL, 0xd4326d90L, 0xd0f37027L, 0xddb056feL, 0xd9714b49L, 0xc7361b4cL, 0xc3f706fbL, 0xceb42022L, 0xca753d95L, 0xf23a8028L, 0xf6fb9d9fL, 0xfbb8bb46L, 0xff79a6f1L, 0xe13ef6f4L, 0xe5ffeb43L, 0xe8bccd9aL, 0xec7dd02dL, 0x34867077L, 0x30476dc0L, 0x3d044b19L, 0x39c556aeL, 0x278206abL, 0x23431b1cL, 0x2e003dc5L, 0x2ac12072L, 0x128e9dcfL, 0x164f8078L, 0x1b0ca6a1L, 0x1fcdbb16L, 0x018aeb13L, 0x054bf6a4L, 0x0808d07dL, 0x0cc9cdcaL, 0x7897ab07L, 0x7c56b6b0L, 0x71159069L, 0x75d48ddeL, 0x6b93dddbL, 0x6f52c06cL, 0x6211e6b5L, 0x66d0fb02L, 0x5e9f46bfL, 0x5a5e5b08L, 0x571d7dd1L, 0x53dc6066L, 0x4d9b3063L, 0x495a2dd4L, 0x44190b0dL, 0x40d816baL, 0xaca5c697L, 0xa864db20L, 0xa527fdf9L, 0xa1e6e04eL, 0xbfa1b04bL, 0xbb60adfcL, 0xb6238b25L, 0xb2e29692L, 0x8aad2b2fL, 0x8e6c3698L, 0x832f1041L, 0x87ee0df6L, 0x99a95df3L, 0x9d684044L, 0x902b669dL, 0x94ea7b2aL, 0xe0b41de7L, 0xe4750050L, 0xe9362689L, 0xedf73b3eL, 0xf3b06b3bL, 0xf771768cL, 0xfa325055L, 0xfef34de2L, 0xc6bcf05fL, 0xc27dede8L, 0xcf3ecb31L, 0xcbffd686L, 0xd5b88683L, 0xd1799b34L, 0xdc3abdedL, 0xd8fba05aL, 0x690ce0eeL, 0x6dcdfd59L, 0x608edb80L, 0x644fc637L, 0x7a089632L, 0x7ec98b85L, 0x738aad5cL, 0x774bb0ebL, 0x4f040d56L, 0x4bc510e1L, 0x46863638L, 0x42472b8fL, 0x5c007b8aL, 0x58c1663dL, 0x558240e4L, 0x51435d53L, 0x251d3b9eL, 0x21dc2629L, 0x2c9f00f0L, 0x285e1d47L, 0x36194d42L, 0x32d850f5L, 0x3f9b762cL, 0x3b5a6b9bL, 0x0315d626L, 0x07d4cb91L, 0x0a97ed48L, 0x0e56f0ffL, 0x1011a0faL, 0x14d0bd4dL, 0x19939b94L, 0x1d528623L, 0xf12f560eL, 0xf5ee4bb9L, 0xf8ad6d60L, 0xfc6c70d7L, 0xe22b20d2L, 0xe6ea3d65L, 0xeba91bbcL, 0xef68060bL, 0xd727bbb6L, 0xd3e6a601L, 0xdea580d8L, 0xda649d6fL, 0xc423cd6aL, 0xc0e2d0ddL, 0xcda1f604L, 0xc960ebb3L, 0xbd3e8d7eL, 0xb9ff90c9L, 0xb4bcb610L, 0xb07daba7L, 0xae3afba2L, 0xaafbe615L, 0xa7b8c0ccL, 0xa379dd7bL, 0x9b3660c6L, 0x9ff77d71L, 0x92b45ba8L, 0x9675461fL, 0x8832161aL, 0x8cf30badL, 0x81b02d74L, 0x857130c3L, 0x5d8a9099L, 0x594b8d2eL, 0x5408abf7L, 0x50c9b640L, 0x4e8ee645L, 0x4a4ffbf2L, 0x470cdd2bL, 0x43cdc09cL, 0x7b827d21L, 0x7f436096L, 0x7200464fL, 0x76c15bf8L, 0x68860bfdL, 0x6c47164aL, 0x61043093L, 0x65c52d24L, 0x119b4be9L, 0x155a565eL, 0x18197087L, 0x1cd86d30L, 0x029f3d35L, 0x065e2082L, 0x0b1d065bL, 0x0fdc1becL, 0x3793a651L, 0x3352bbe6L, 0x3e119d3fL, 0x3ad08088L, 0x2497d08dL, 0x2056cd3aL, 0x2d15ebe3L, 0x29d4f654L, 0xc5a92679L, 0xc1683bceL, 0xcc2b1d17L, 0xc8ea00a0L, 0xd6ad50a5L, 0xd26c4d12L, 0xdf2f6bcbL, 0xdbee767cL, 0xe3a1cbc1L, 0xe760d676L, 0xea23f0afL, 0xeee2ed18L, 0xf0a5bd1dL, 0xf464a0aaL, 0xf9278673L, 0xfde69bc4L, 0x89b8fd09L, 0x8d79e0beL, 0x803ac667L, 0x84fbdbd0L, 0x9abc8bd5L, 0x9e7d9662L, 0x933eb0bbL, 0x97ffad0cL, 0xafb010b1L, 0xab710d06L, 0xa6322bdfL, 0xa2f33668L, 0xbcb4666dL, 0xb8757bdaL, 0xb5365d03L, 0xb1f740b4L }; /*-------------------------------------------------------------*/ /*--- end crctable.c ---*/ /*-------------------------------------------------------------*/ /*-------------------------------------------------------------*/ /*--- Library top-level functions. ---*/ /*--- bzlib.c ---*/ /*-------------------------------------------------------------*/ /*-- This file is a part of bzip2 and/or libbzip2, a program and library for lossless, block-sorting data compression. Copyright (C) 1996-2004 Julian R Seward. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Julian Seward, Cambridge, UK. jseward@bzip.org bzip2/libbzip2 version 1.0 of 21 March 2000 This program is based on (at least) the work of: Mike Burrows David Wheeler Peter Fenwick Alistair Moffat Radford Neal Ian H. Witten Robert Sedgewick Jon L. Bentley For more information on these sources, see the manual. --*/ /*-- CHANGES ~~~~~~~ 0.9.0 -- original version. 0.9.0a/b -- no changes in this file. 0.9.0c * made zero-length BZ_FLUSH work correctly in bzCompress(). * fixed bzWrite/bzRead to ignore zero-length requests. * fixed bzread to correctly handle read requests after EOF. * wrong parameter order in call to bzDecompressInit in bzBuffToBuffDecompress. Fixed. --*/ /*---------------------------------------------------*/ /*--- Compression stuff ---*/ /*---------------------------------------------------*/ /*---------------------------------------------------*/ void BZ2_bz__AssertH__fail ( int errcode ) { vex_printf("BZ2_bz__AssertH__fail(%d) called, exiting\n", errcode); (*serviceFn)(0,0); } void bz_internal_error ( int errcode ) { vex_printf("bz_internal_error called, exiting\n", errcode); (*serviceFn)(0,0); } /*---------------------------------------------------*/ static int bz_config_ok ( void ) { if (sizeof(int) != 4) return 0; if (sizeof(short) != 2) return 0; if (sizeof(char) != 1) return 0; return 1; } /*---------------------------------------------------*/ static void* default_bzalloc ( void* opaque, Int32 items, Int32 size ) { void* v = (void*) (*serviceFn)(2, items * size ); return v; } static void default_bzfree ( void* opaque, void* addr ) { if (addr != NULL) (*serviceFn)( 3, (HWord)addr ); } /*---------------------------------------------------*/ static void prepare_new_block ( EState* s ) { Int32 i; s->nblock = 0; s->numZ = 0; s->state_out_pos = 0; BZ_INITIALISE_CRC ( s->blockCRC ); for (i = 0; i < 256; i++) s->inUse[i] = False; s->blockNo++; } /*---------------------------------------------------*/ static void init_RL ( EState* s ) { s->state_in_ch = 256; s->state_in_len = 0; } static Bool isempty_RL ( EState* s ) { if (s->state_in_ch < 256 && s->state_in_len > 0) return False; else return True; } /*---------------------------------------------------*/ int BZ_API(BZ2_bzCompressInit) ( bz_stream* strm, int blockSize100k, int verbosity, int workFactor ) { Int32 n; EState* s; if (!bz_config_ok()) return BZ_CONFIG_ERROR; if (strm == NULL || blockSize100k < 1 || blockSize100k > 9 || workFactor < 0 || workFactor > 250) return BZ_PARAM_ERROR; if (workFactor == 0) workFactor = 30; if (strm->bzalloc == NULL) strm->bzalloc = default_bzalloc; if (strm->bzfree == NULL) strm->bzfree = default_bzfree; s = BZALLOC( sizeof(EState) ); if (s == NULL) return BZ_MEM_ERROR; s->strm = strm; s->arr1 = NULL; s->arr2 = NULL; s->ftab = NULL; n = 100000 * blockSize100k; s->arr1 = BZALLOC( n * sizeof(UInt32) ); s->arr2 = BZALLOC( (n+BZ_N_OVERSHOOT) * sizeof(UInt32) ); s->ftab = BZALLOC( 65537 * sizeof(UInt32) ); if (s->arr1 == NULL || s->arr2 == NULL || s->ftab == NULL) { if (s->arr1 != NULL) BZFREE(s->arr1); if (s->arr2 != NULL) BZFREE(s->arr2); if (s->ftab != NULL) BZFREE(s->ftab); if (s != NULL) BZFREE(s); return BZ_MEM_ERROR; } s->blockNo = 0; s->state = BZ_S_INPUT; s->mode = BZ_M_RUNNING; s->combinedCRC = 0; s->blockSize100k = blockSize100k; s->nblockMAX = 100000 * blockSize100k - 19; s->verbosity = verbosity; s->workFactor = workFactor; s->block = (UChar*)s->arr2; s->mtfv = (UInt16*)s->arr1; s->zbits = NULL; s->ptr = (UInt32*)s->arr1; strm->state = s; strm->total_in_lo32 = 0; strm->total_in_hi32 = 0; strm->total_out_lo32 = 0; strm->total_out_hi32 = 0; init_RL ( s ); prepare_new_block ( s ); return BZ_OK; } /*---------------------------------------------------*/ static void add_pair_to_block ( EState* s ) { Int32 i; UChar ch = (UChar)(s->state_in_ch); for (i = 0; i < s->state_in_len; i++) { BZ_UPDATE_CRC( s->blockCRC, ch ); } s->inUse[s->state_in_ch] = True; switch (s->state_in_len) { case 1: s->block[s->nblock] = (UChar)ch; s->nblock++; break; case 2: s->block[s->nblock] = (UChar)ch; s->nblock++; s->block[s->nblock] = (UChar)ch; s->nblock++; break; case 3: s->block[s->nblock] = (UChar)ch; s->nblock++; s->block[s->nblock] = (UChar)ch; s->nblock++; s->block[s->nblock] = (UChar)ch; s->nblock++; break; default: s->inUse[s->state_in_len-4] = True; s->block[s->nblock] = (UChar)ch; s->nblock++; s->block[s->nblock] = (UChar)ch; s->nblock++; s->block[s->nblock] = (UChar)ch; s->nblock++; s->block[s->nblock] = (UChar)ch; s->nblock++; s->block[s->nblock] = ((UChar)(s->state_in_len-4)); s->nblock++; break; } } /*---------------------------------------------------*/ static void flush_RL ( EState* s ) { if (s->state_in_ch < 256) add_pair_to_block ( s ); init_RL ( s ); } /*---------------------------------------------------*/ #define ADD_CHAR_TO_BLOCK(zs,zchh0) \ { \ UInt32 zchh = (UInt32)(zchh0); \ /*-- fast track the common case --*/ \ if (zchh != zs->state_in_ch && \ zs->state_in_len == 1) { \ UChar ch = (UChar)(zs->state_in_ch); \ BZ_UPDATE_CRC( zs->blockCRC, ch ); \ zs->inUse[zs->state_in_ch] = True; \ zs->block[zs->nblock] = (UChar)ch; \ zs->nblock++; \ zs->state_in_ch = zchh; \ } \ else \ /*-- general, uncommon cases --*/ \ if (zchh != zs->state_in_ch || \ zs->state_in_len == 255) { \ if (zs->state_in_ch < 256) \ add_pair_to_block ( zs ); \ zs->state_in_ch = zchh; \ zs->state_in_len = 1; \ } else { \ zs->state_in_len++; \ } \ } /*---------------------------------------------------*/ static Bool copy_input_until_stop ( EState* s ) { Bool progress_in = False; if (s->mode == BZ_M_RUNNING) { /*-- fast track the common case --*/ while (True) { /*-- block full? --*/ if (s->nblock >= s->nblockMAX) break; /*-- no input? --*/ if (s->strm->avail_in == 0) break; progress_in = True; ADD_CHAR_TO_BLOCK ( s, (UInt32)(*((UChar*)(s->strm->next_in))) ); s->strm->next_in++; s->strm->avail_in--; s->strm->total_in_lo32++; if (s->strm->total_in_lo32 == 0) s->strm->total_in_hi32++; } } else { /*-- general, uncommon case --*/ while (True) { /*-- block full? --*/ if (s->nblock >= s->nblockMAX) break; /*-- no input? --*/ if (s->strm->avail_in == 0) break; /*-- flush/finish end? --*/ if (s->avail_in_expect == 0) break; progress_in = True; ADD_CHAR_TO_BLOCK ( s, (UInt32)(*((UChar*)(s->strm->next_in))) ); s->strm->next_in++; s->strm->avail_in--; s->strm->total_in_lo32++; if (s->strm->total_in_lo32 == 0) s->strm->total_in_hi32++; s->avail_in_expect--; } } return progress_in; } /*---------------------------------------------------*/ static Bool copy_output_until_stop ( EState* s ) { Bool progress_out = False; while (True) { /*-- no output space? --*/ if (s->strm->avail_out == 0) break; /*-- block done? --*/ if (s->state_out_pos >= s->numZ) break; progress_out = True; *(s->strm->next_out) = s->zbits[s->state_out_pos]; s->state_out_pos++; s->strm->avail_out--; s->strm->next_out++; s->strm->total_out_lo32++; if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++; } return progress_out; } /*---------------------------------------------------*/ static Bool handle_compress ( bz_stream* strm ) { Bool progress_in = False; Bool progress_out = False; EState* s = strm->state; while (True) { if (s->state == BZ_S_OUTPUT) { progress_out |= copy_output_until_stop ( s ); if (s->state_out_pos < s->numZ) break; if (s->mode == BZ_M_FINISHING && s->avail_in_expect == 0 && isempty_RL(s)) break; prepare_new_block ( s ); s->state = BZ_S_INPUT; if (s->mode == BZ_M_FLUSHING && s->avail_in_expect == 0 && isempty_RL(s)) break; } if (s->state == BZ_S_INPUT) { progress_in |= copy_input_until_stop ( s ); if (s->mode != BZ_M_RUNNING && s->avail_in_expect == 0) { flush_RL ( s ); BZ2_compressBlock ( s, (Bool)(s->mode == BZ_M_FINISHING) ); s->state = BZ_S_OUTPUT; } else if (s->nblock >= s->nblockMAX) { BZ2_compressBlock ( s, False ); s->state = BZ_S_OUTPUT; } else if (s->strm->avail_in == 0) { break; } } } return progress_in || progress_out; } /*---------------------------------------------------*/ int BZ_API(BZ2_bzCompress) ( bz_stream *strm, int action ) { Bool progress; EState* s; if (strm == NULL) return BZ_PARAM_ERROR; s = strm->state; if (s == NULL) return BZ_PARAM_ERROR; if (s->strm != strm) return BZ_PARAM_ERROR; preswitch: switch (s->mode) { case BZ_M_IDLE: return BZ_SEQUENCE_ERROR; case BZ_M_RUNNING: if (action == BZ_RUN) { progress = handle_compress ( strm ); return progress ? BZ_RUN_OK : BZ_PARAM_ERROR; } else if (action == BZ_FLUSH) { s->avail_in_expect = strm->avail_in; s->mode = BZ_M_FLUSHING; goto preswitch; } else if (action == BZ_FINISH) { s->avail_in_expect = strm->avail_in; s->mode = BZ_M_FINISHING; goto preswitch; } else return BZ_PARAM_ERROR; case BZ_M_FLUSHING: if (action != BZ_FLUSH) return BZ_SEQUENCE_ERROR; if (s->avail_in_expect != s->strm->avail_in) return BZ_SEQUENCE_ERROR; progress = handle_compress ( strm ); if (s->avail_in_expect > 0 || !isempty_RL(s) || s->state_out_pos < s->numZ) return BZ_FLUSH_OK; s->mode = BZ_M_RUNNING; return BZ_RUN_OK; case BZ_M_FINISHING: if (action != BZ_FINISH) return BZ_SEQUENCE_ERROR; if (s->avail_in_expect != s->strm->avail_in) return BZ_SEQUENCE_ERROR; progress = handle_compress ( strm ); if (!progress) return BZ_SEQUENCE_ERROR; if (s->avail_in_expect > 0 || !isempty_RL(s) || s->state_out_pos < s->numZ) return BZ_FINISH_OK; s->mode = BZ_M_IDLE; return BZ_STREAM_END; } return BZ_OK; /*--not reached--*/ } /*---------------------------------------------------*/ int BZ_API(BZ2_bzCompressEnd) ( bz_stream *strm ) { EState* s; if (strm == NULL) return BZ_PARAM_ERROR; s = strm->state; if (s == NULL) return BZ_PARAM_ERROR; if (s->strm != strm) return BZ_PARAM_ERROR; if (s->arr1 != NULL) BZFREE(s->arr1); if (s->arr2 != NULL) BZFREE(s->arr2); if (s->ftab != NULL) BZFREE(s->ftab); BZFREE(strm->state); strm->state = NULL; return BZ_OK; } /*---------------------------------------------------*/ /*--- Decompression stuff ---*/ /*---------------------------------------------------*/ /*---------------------------------------------------*/ int BZ_API(BZ2_bzDecompressInit) ( bz_stream* strm, int verbosity, int small ) { DState* s; if (!bz_config_ok()) return BZ_CONFIG_ERROR; if (strm == NULL) return BZ_PARAM_ERROR; if (small != 0 && small != 1) return BZ_PARAM_ERROR; if (verbosity < 0 || verbosity > 4) return BZ_PARAM_ERROR; if (strm->bzalloc == NULL) strm->bzalloc = default_bzalloc; if (strm->bzfree == NULL) strm->bzfree = default_bzfree; s = BZALLOC( sizeof(DState) ); if (s == NULL) return BZ_MEM_ERROR; s->strm = strm; strm->state = s; s->state = BZ_X_MAGIC_1; s->bsLive = 0; s->bsBuff = 0; s->calculatedCombinedCRC = 0; strm->total_in_lo32 = 0; strm->total_in_hi32 = 0; strm->total_out_lo32 = 0; strm->total_out_hi32 = 0; s->smallDecompress = (Bool)small; s->ll4 = NULL; s->ll16 = NULL; s->tt = NULL; s->currBlockNo = 0; s->verbosity = verbosity; return BZ_OK; } /*---------------------------------------------------*/ /* Return True iff data corruption is discovered. Returns False if there is no problem. */ static Bool unRLE_obuf_to_output_FAST ( DState* s ) { UChar k1; if (s->blockRandomised) { while (True) { /* try to finish existing run */ while (True) { if (s->strm->avail_out == 0) return False; if (s->state_out_len == 0) break; *( (UChar*)(s->strm->next_out) ) = s->state_out_ch; BZ_UPDATE_CRC ( s->calculatedBlockCRC, s->state_out_ch ); s->state_out_len--; s->strm->next_out++; s->strm->avail_out--; s->strm->total_out_lo32++; if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++; } /* can a new run be started? */ if (s->nblock_used == s->save_nblock+1) return False; /* Only caused by corrupt data stream? */ if (s->nblock_used > s->save_nblock+1) return True; s->state_out_len = 1; s->state_out_ch = s->k0; BZ_GET_FAST(k1); BZ_RAND_UPD_MASK; k1 ^= BZ_RAND_MASK; s->nblock_used++; if (s->nblock_used == s->save_nblock+1) continue; if (k1 != s->k0) { s->k0 = k1; continue; }; s->state_out_len = 2; BZ_GET_FAST(k1); BZ_RAND_UPD_MASK; k1 ^= BZ_RAND_MASK; s->nblock_used++; if (s->nblock_used == s->save_nblock+1) continue; if (k1 != s->k0) { s->k0 = k1; continue; }; s->state_out_len = 3; BZ_GET_FAST(k1); BZ_RAND_UPD_MASK; k1 ^= BZ_RAND_MASK; s->nblock_used++; if (s->nblock_used == s->save_nblock+1) continue; if (k1 != s->k0) { s->k0 = k1; continue; }; BZ_GET_FAST(k1); BZ_RAND_UPD_MASK; k1 ^= BZ_RAND_MASK; s->nblock_used++; s->state_out_len = ((Int32)k1) + 4; BZ_GET_FAST(s->k0); BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; s->nblock_used++; } } else { /* restore */ UInt32 c_calculatedBlockCRC = s->calculatedBlockCRC; UChar c_state_out_ch = s->state_out_ch; Int32 c_state_out_len = s->state_out_len; Int32 c_nblock_used = s->nblock_used; Int32 c_k0 = s->k0; UInt32* c_tt = s->tt; UInt32 c_tPos = s->tPos; char* cs_next_out = s->strm->next_out; unsigned int cs_avail_out = s->strm->avail_out; /* end restore */ UInt32 avail_out_INIT = cs_avail_out; Int32 s_save_nblockPP = s->save_nblock+1; unsigned int total_out_lo32_old; while (True) { /* try to finish existing run */ if (c_state_out_len > 0) { while (True) { if (cs_avail_out == 0) goto return_notr; if (c_state_out_len == 1) break; *( (UChar*)(cs_next_out) ) = c_state_out_ch; BZ_UPDATE_CRC ( c_calculatedBlockCRC, c_state_out_ch ); c_state_out_len--; cs_next_out++; cs_avail_out--; } s_state_out_len_eq_one: { if (cs_avail_out == 0) { c_state_out_len = 1; goto return_notr; }; *( (UChar*)(cs_next_out) ) = c_state_out_ch; BZ_UPDATE_CRC ( c_calculatedBlockCRC, c_state_out_ch ); cs_next_out++; cs_avail_out--; } } /* Only caused by corrupt data stream? */ if (c_nblock_used > s_save_nblockPP) return True; /* can a new run be started? */ if (c_nblock_used == s_save_nblockPP) { c_state_out_len = 0; goto return_notr; }; c_state_out_ch = c_k0; BZ_GET_FAST_C(k1); c_nblock_used++; if (k1 != c_k0) { c_k0 = k1; goto s_state_out_len_eq_one; }; if (c_nblock_used == s_save_nblockPP) goto s_state_out_len_eq_one; c_state_out_len = 2; BZ_GET_FAST_C(k1); c_nblock_used++; if (c_nblock_used == s_save_nblockPP) continue; if (k1 != c_k0) { c_k0 = k1; continue; }; c_state_out_len = 3; BZ_GET_FAST_C(k1); c_nblock_used++; if (c_nblock_used == s_save_nblockPP) continue; if (k1 != c_k0) { c_k0 = k1; continue; }; BZ_GET_FAST_C(k1); c_nblock_used++; c_state_out_len = ((Int32)k1) + 4; BZ_GET_FAST_C(c_k0); c_nblock_used++; } return_notr: total_out_lo32_old = s->strm->total_out_lo32; s->strm->total_out_lo32 += (avail_out_INIT - cs_avail_out); if (s->strm->total_out_lo32 < total_out_lo32_old) s->strm->total_out_hi32++; /* save */ s->calculatedBlockCRC = c_calculatedBlockCRC; s->state_out_ch = c_state_out_ch; s->state_out_len = c_state_out_len; s->nblock_used = c_nblock_used; s->k0 = c_k0; s->tt = c_tt; s->tPos = c_tPos; s->strm->next_out = cs_next_out; s->strm->avail_out = cs_avail_out; /* end save */ } return False; } /*---------------------------------------------------*/ /* Return True iff data corruption is discovered. Returns False if there is no problem. */ static Bool unRLE_obuf_to_output_SMALL ( DState* s ) { UChar k1; if (s->blockRandomised) { while (True) { /* try to finish existing run */ while (True) { if (s->strm->avail_out == 0) return False; if (s->state_out_len == 0) break; *( (UChar*)(s->strm->next_out) ) = s->state_out_ch; BZ_UPDATE_CRC ( s->calculatedBlockCRC, s->state_out_ch ); s->state_out_len--; s->strm->next_out++; s->strm->avail_out--; s->strm->total_out_lo32++; if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++; } /* can a new run be started? */ if (s->nblock_used == s->save_nblock+1) return False; /* Only caused by corrupt data stream? */ if (s->nblock_used > s->save_nblock+1) return True; s->state_out_len = 1; s->state_out_ch = s->k0; BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK; k1 ^= BZ_RAND_MASK; s->nblock_used++; if (s->nblock_used == s->save_nblock+1) continue; if (k1 != s->k0) { s->k0 = k1; continue; }; s->state_out_len = 2; BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK; k1 ^= BZ_RAND_MASK; s->nblock_used++; if (s->nblock_used == s->save_nblock+1) continue; if (k1 != s->k0) { s->k0 = k1; continue; }; s->state_out_len = 3; BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK; k1 ^= BZ_RAND_MASK; s->nblock_used++; if (s->nblock_used == s->save_nblock+1) continue; if (k1 != s->k0) { s->k0 = k1; continue; }; BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK; k1 ^= BZ_RAND_MASK; s->nblock_used++; s->state_out_len = ((Int32)k1) + 4; BZ_GET_SMALL(s->k0); BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; s->nblock_used++; } } else { while (True) { /* try to finish existing run */ while (True) { if (s->strm->avail_out == 0) return False; if (s->state_out_len == 0) break; *( (UChar*)(s->strm->next_out) ) = s->state_out_ch; BZ_UPDATE_CRC ( s->calculatedBlockCRC, s->state_out_ch ); s->state_out_len--; s->strm->next_out++; s->strm->avail_out--; s->strm->total_out_lo32++; if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++; } /* can a new run be started? */ if (s->nblock_used == s->save_nblock+1) return False; /* Only caused by corrupt data stream? */ if (s->nblock_used > s->save_nblock+1) return True; s->state_out_len = 1; s->state_out_ch = s->k0; BZ_GET_SMALL(k1); s->nblock_used++; if (s->nblock_used == s->save_nblock+1) continue; if (k1 != s->k0) { s->k0 = k1; continue; }; s->state_out_len = 2; BZ_GET_SMALL(k1); s->nblock_used++; if (s->nblock_used == s->save_nblock+1) continue; if (k1 != s->k0) { s->k0 = k1; continue; }; s->state_out_len = 3; BZ_GET_SMALL(k1); s->nblock_used++; if (s->nblock_used == s->save_nblock+1) continue; if (k1 != s->k0) { s->k0 = k1; continue; }; BZ_GET_SMALL(k1); s->nblock_used++; s->state_out_len = ((Int32)k1) + 4; BZ_GET_SMALL(s->k0); s->nblock_used++; } } } /*---------------------------------------------------*/ int BZ_API(BZ2_bzDecompress) ( bz_stream *strm ) { Bool corrupt; DState* s; if (strm == NULL) return BZ_PARAM_ERROR; s = strm->state; if (s == NULL) return BZ_PARAM_ERROR; if (s->strm != strm) return BZ_PARAM_ERROR; while (True) { if (s->state == BZ_X_IDLE) return BZ_SEQUENCE_ERROR; if (s->state == BZ_X_OUTPUT) { if (s->smallDecompress) corrupt = unRLE_obuf_to_output_SMALL ( s ); else corrupt = unRLE_obuf_to_output_FAST ( s ); if (corrupt) return BZ_DATA_ERROR; if (s->nblock_used == s->save_nblock+1 && s->state_out_len == 0) { BZ_FINALISE_CRC ( s->calculatedBlockCRC ); if (s->verbosity >= 3) VPrintf2 ( " {0x%08x, 0x%08x}", s->storedBlockCRC, s->calculatedBlockCRC ); if (s->verbosity >= 2) VPrintf0 ( "]" ); if (s->calculatedBlockCRC != s->storedBlockCRC) return BZ_DATA_ERROR; s->calculatedCombinedCRC = (s->calculatedCombinedCRC << 1) | (s->calculatedCombinedCRC >> 31); s->calculatedCombinedCRC ^= s->calculatedBlockCRC; s->state = BZ_X_BLKHDR_1; } else { return BZ_OK; } } if (s->state >= BZ_X_MAGIC_1) { Int32 r = BZ2_decompress ( s ); if (r == BZ_STREAM_END) { if (s->verbosity >= 3) VPrintf2 ( "\n combined CRCs: stored = 0x%08x, computed = 0x%08x", s->storedCombinedCRC, s->calculatedCombinedCRC ); if (s->calculatedCombinedCRC != s->storedCombinedCRC) return BZ_DATA_ERROR; return r; } if (s->state != BZ_X_OUTPUT) return r; } } AssertH ( 0, 6001 ); return 0; /*NOTREACHED*/ } /*---------------------------------------------------*/ int BZ_API(BZ2_bzDecompressEnd) ( bz_stream *strm ) { DState* s; if (strm == NULL) return BZ_PARAM_ERROR; s = strm->state; if (s == NULL) return BZ_PARAM_ERROR; if (s->strm != strm) return BZ_PARAM_ERROR; if (s->tt != NULL) BZFREE(s->tt); if (s->ll16 != NULL) BZFREE(s->ll16); if (s->ll4 != NULL) BZFREE(s->ll4); BZFREE(strm->state); strm->state = NULL; return BZ_OK; } #ifndef BZ_NO_STDIO /*---------------------------------------------------*/ /*--- File I/O stuff ---*/ /*---------------------------------------------------*/ #define BZ_SETERR(eee) \ { \ if (bzerror != NULL) *bzerror = eee; \ if (bzf != NULL) bzf->lastErr = eee; \ } typedef struct { FILE* handle; Char buf[BZ_MAX_UNUSED]; Int32 bufN; Bool writing; bz_stream strm; Int32 lastErr; Bool initialisedOk; } bzFile; /*---------------------------------------------*/ static Bool myfeof ( FILE* f ) { Int32 c = fgetc ( f ); if (c == EOF) return True; ungetc ( c, f ); return False; } /*---------------------------------------------------*/ BZFILE* BZ_API(BZ2_bzWriteOpen) ( int* bzerror, FILE* f, int blockSize100k, int verbosity, int workFactor ) { Int32 ret; bzFile* bzf = NULL; BZ_SETERR(BZ_OK); if (f == NULL || (blockSize100k < 1 || blockSize100k > 9) || (workFactor < 0 || workFactor > 250) || (verbosity < 0 || verbosity > 4)) { BZ_SETERR(BZ_PARAM_ERROR); return NULL; }; if (ferror(f)) { BZ_SETERR(BZ_IO_ERROR); return NULL; }; bzf = malloc ( sizeof(bzFile) ); if (bzf == NULL) { BZ_SETERR(BZ_MEM_ERROR); return NULL; }; BZ_SETERR(BZ_OK); bzf->initialisedOk = False; bzf->bufN = 0; bzf->handle = f; bzf->writing = True; bzf->strm.bzalloc = NULL; bzf->strm.bzfree = NULL; bzf->strm.opaque = NULL; if (workFactor == 0) workFactor = 30; ret = BZ2_bzCompressInit ( &(bzf->strm), blockSize100k, verbosity, workFactor ); if (ret != BZ_OK) { BZ_SETERR(ret); free(bzf); return NULL; }; bzf->strm.avail_in = 0; bzf->initialisedOk = True; return bzf; } /*---------------------------------------------------*/ void BZ_API(BZ2_bzWrite) ( int* bzerror, BZFILE* b, void* buf, int len ) { Int32 n, n2, ret; bzFile* bzf = (bzFile*)b; BZ_SETERR(BZ_OK); if (bzf == NULL || buf == NULL || len < 0) { BZ_SETERR(BZ_PARAM_ERROR); return; }; if (!(bzf->writing)) { BZ_SETERR(BZ_SEQUENCE_ERROR); return; }; if (ferror(bzf->handle)) { BZ_SETERR(BZ_IO_ERROR); return; }; if (len == 0) { BZ_SETERR(BZ_OK); return; }; bzf->strm.avail_in = len; bzf->strm.next_in = buf; while (True) { bzf->strm.avail_out = BZ_MAX_UNUSED; bzf->strm.next_out = bzf->buf; ret = BZ2_bzCompress ( &(bzf->strm), BZ_RUN ); if (ret != BZ_RUN_OK) { BZ_SETERR(ret); return; }; if (bzf->strm.avail_out < BZ_MAX_UNUSED) { n = BZ_MAX_UNUSED - bzf->strm.avail_out; n2 = fwrite ( (void*)(bzf->buf), sizeof(UChar), n, bzf->handle ); if (n != n2 || ferror(bzf->handle)) { BZ_SETERR(BZ_IO_ERROR); return; }; } if (bzf->strm.avail_in == 0) { BZ_SETERR(BZ_OK); return; }; } } /*---------------------------------------------------*/ void BZ_API(BZ2_bzWriteClose) ( int* bzerror, BZFILE* b, int abandon, unsigned int* nbytes_in, unsigned int* nbytes_out ) { BZ2_bzWriteClose64 ( bzerror, b, abandon, nbytes_in, NULL, nbytes_out, NULL ); } void BZ_API(BZ2_bzWriteClose64) ( int* bzerror, BZFILE* b, int abandon, unsigned int* nbytes_in_lo32, unsigned int* nbytes_in_hi32, unsigned int* nbytes_out_lo32, unsigned int* nbytes_out_hi32 ) { Int32 n, n2, ret; bzFile* bzf = (bzFile*)b; if (bzf == NULL) { BZ_SETERR(BZ_OK); return; }; if (!(bzf->writing)) { BZ_SETERR(BZ_SEQUENCE_ERROR); return; }; if (ferror(bzf->handle)) { BZ_SETERR(BZ_IO_ERROR); return; }; if (nbytes_in_lo32 != NULL) *nbytes_in_lo32 = 0; if (nbytes_in_hi32 != NULL) *nbytes_in_hi32 = 0; if (nbytes_out_lo32 != NULL) *nbytes_out_lo32 = 0; if (nbytes_out_hi32 != NULL) *nbytes_out_hi32 = 0; if ((!abandon) && bzf->lastErr == BZ_OK) { while (True) { bzf->strm.avail_out = BZ_MAX_UNUSED; bzf->strm.next_out = bzf->buf; ret = BZ2_bzCompress ( &(bzf->strm), BZ_FINISH ); if (ret != BZ_FINISH_OK && ret != BZ_STREAM_END) { BZ_SETERR(ret); return; }; if (bzf->strm.avail_out < BZ_MAX_UNUSED) { n = BZ_MAX_UNUSED - bzf->strm.avail_out; n2 = fwrite ( (void*)(bzf->buf), sizeof(UChar), n, bzf->handle ); if (n != n2 || ferror(bzf->handle)) { BZ_SETERR(BZ_IO_ERROR); return; }; } if (ret == BZ_STREAM_END) break; } } if ( !abandon && !ferror ( bzf->handle ) ) { fflush ( bzf->handle ); if (ferror(bzf->handle)) { BZ_SETERR(BZ_IO_ERROR); return; }; } if (nbytes_in_lo32 != NULL) *nbytes_in_lo32 = bzf->strm.total_in_lo32; if (nbytes_in_hi32 != NULL) *nbytes_in_hi32 = bzf->strm.total_in_hi32; if (nbytes_out_lo32 != NULL) *nbytes_out_lo32 = bzf->strm.total_out_lo32; if (nbytes_out_hi32 != NULL) *nbytes_out_hi32 = bzf->strm.total_out_hi32; BZ_SETERR(BZ_OK); BZ2_bzCompressEnd ( &(bzf->strm) ); free ( bzf ); } /*---------------------------------------------------*/ BZFILE* BZ_API(BZ2_bzReadOpen) ( int* bzerror, FILE* f, int verbosity, int small, void* unused, int nUnused ) { bzFile* bzf = NULL; int ret; BZ_SETERR(BZ_OK); if (f == NULL || (small != 0 && small != 1) || (verbosity < 0 || verbosity > 4) || (unused == NULL && nUnused != 0) || (unused != NULL && (nUnused < 0 || nUnused > BZ_MAX_UNUSED))) { BZ_SETERR(BZ_PARAM_ERROR); return NULL; }; if (ferror(f)) { BZ_SETERR(BZ_IO_ERROR); return NULL; }; bzf = malloc ( sizeof(bzFile) ); if (bzf == NULL) { BZ_SETERR(BZ_MEM_ERROR); return NULL; }; BZ_SETERR(BZ_OK); bzf->initialisedOk = False; bzf->handle = f; bzf->bufN = 0; bzf->writing = False; bzf->strm.bzalloc = NULL; bzf->strm.bzfree = NULL; bzf->strm.opaque = NULL; while (nUnused > 0) { bzf->buf[bzf->bufN] = *((UChar*)(unused)); bzf->bufN++; unused = ((void*)( 1 + ((UChar*)(unused)) )); nUnused--; } ret = BZ2_bzDecompressInit ( &(bzf->strm), verbosity, small ); if (ret != BZ_OK) { BZ_SETERR(ret); free(bzf); return NULL; }; bzf->strm.avail_in = bzf->bufN; bzf->strm.next_in = bzf->buf; bzf->initialisedOk = True; return bzf; } /*---------------------------------------------------*/ void BZ_API(BZ2_bzReadClose) ( int *bzerror, BZFILE *b ) { bzFile* bzf = (bzFile*)b; BZ_SETERR(BZ_OK); if (bzf == NULL) { BZ_SETERR(BZ_OK); return; }; if (bzf->writing) { BZ_SETERR(BZ_SEQUENCE_ERROR); return; }; if (bzf->initialisedOk) (void)BZ2_bzDecompressEnd ( &(bzf->strm) ); free ( bzf ); } /*---------------------------------------------------*/ int BZ_API(BZ2_bzRead) ( int* bzerror, BZFILE* b, void* buf, int len ) { Int32 n, ret; bzFile* bzf = (bzFile*)b; BZ_SETERR(BZ_OK); if (bzf == NULL || buf == NULL || len < 0) { BZ_SETERR(BZ_PARAM_ERROR); return 0; }; if (bzf->writing) { BZ_SETERR(BZ_SEQUENCE_ERROR); return 0; }; if (len == 0) { BZ_SETERR(BZ_OK); return 0; }; bzf->strm.avail_out = len; bzf->strm.next_out = buf; while (True) { if (ferror(bzf->handle)) { BZ_SETERR(BZ_IO_ERROR); return 0; }; if (bzf->strm.avail_in == 0 && !myfeof(bzf->handle)) { n = fread ( bzf->buf, sizeof(UChar), BZ_MAX_UNUSED, bzf->handle ); if (ferror(bzf->handle)) { BZ_SETERR(BZ_IO_ERROR); return 0; }; bzf->bufN = n; bzf->strm.avail_in = bzf->bufN; bzf->strm.next_in = bzf->buf; } ret = BZ2_bzDecompress ( &(bzf->strm) ); if (ret != BZ_OK && ret != BZ_STREAM_END) { BZ_SETERR(ret); return 0; }; if (ret == BZ_OK && myfeof(bzf->handle) && bzf->strm.avail_in == 0 && bzf->strm.avail_out > 0) { BZ_SETERR(BZ_UNEXPECTED_EOF); return 0; }; if (ret == BZ_STREAM_END) { BZ_SETERR(BZ_STREAM_END); return len - bzf->strm.avail_out; }; if (bzf->strm.avail_out == 0) { BZ_SETERR(BZ_OK); return len; }; } return 0; /*not reached*/ } /*---------------------------------------------------*/ void BZ_API(BZ2_bzReadGetUnused) ( int* bzerror, BZFILE* b, void** unused, int* nUnused ) { bzFile* bzf = (bzFile*)b; if (bzf == NULL) { BZ_SETERR(BZ_PARAM_ERROR); return; }; if (bzf->lastErr != BZ_STREAM_END) { BZ_SETERR(BZ_SEQUENCE_ERROR); return; }; if (unused == NULL || nUnused == NULL) { BZ_SETERR(BZ_PARAM_ERROR); return; }; BZ_SETERR(BZ_OK); *nUnused = bzf->strm.avail_in; *unused = bzf->strm.next_in; } #endif /*---------------------------------------------------*/ /*--- Misc convenience stuff ---*/ /*---------------------------------------------------*/ /*---------------------------------------------------*/ int BZ_API(BZ2_bzBuffToBuffCompress) ( char* dest, unsigned int* destLen, char* source, unsigned int sourceLen, int blockSize100k, int verbosity, int workFactor ) { bz_stream strm; int ret; if (dest == NULL || destLen == NULL || source == NULL || blockSize100k < 1 || blockSize100k > 9 || verbosity < 0 || verbosity > 4 || workFactor < 0 || workFactor > 250) return BZ_PARAM_ERROR; if (workFactor == 0) workFactor = 30; strm.bzalloc = NULL; strm.bzfree = NULL; strm.opaque = NULL; ret = BZ2_bzCompressInit ( &strm, blockSize100k, verbosity, workFactor ); if (ret != BZ_OK) return ret; strm.next_in = source; strm.next_out = dest; strm.avail_in = sourceLen; strm.avail_out = *destLen; ret = BZ2_bzCompress ( &strm, BZ_FINISH ); if (ret == BZ_FINISH_OK) goto output_overflow; if (ret != BZ_STREAM_END) goto errhandler; /* normal termination */ *destLen -= strm.avail_out; BZ2_bzCompressEnd ( &strm ); return BZ_OK; output_overflow: BZ2_bzCompressEnd ( &strm ); return BZ_OUTBUFF_FULL; errhandler: BZ2_bzCompressEnd ( &strm ); return ret; } /*---------------------------------------------------*/ int BZ_API(BZ2_bzBuffToBuffDecompress) ( char* dest, unsigned int* destLen, char* source, unsigned int sourceLen, int small, int verbosity ) { bz_stream strm; int ret; if (dest == NULL || destLen == NULL || source == NULL || (small != 0 && small != 1) || verbosity < 0 || verbosity > 4) return BZ_PARAM_ERROR; strm.bzalloc = NULL; strm.bzfree = NULL; strm.opaque = NULL; ret = BZ2_bzDecompressInit ( &strm, verbosity, small ); if (ret != BZ_OK) return ret; strm.next_in = source; strm.next_out = dest; strm.avail_in = sourceLen; strm.avail_out = *destLen; ret = BZ2_bzDecompress ( &strm ); if (ret == BZ_OK) goto output_overflow_or_eof; if (ret != BZ_STREAM_END) goto errhandler; /* normal termination */ *destLen -= strm.avail_out; BZ2_bzDecompressEnd ( &strm ); return BZ_OK; output_overflow_or_eof: if (strm.avail_out > 0) { BZ2_bzDecompressEnd ( &strm ); return BZ_UNEXPECTED_EOF; } else { BZ2_bzDecompressEnd ( &strm ); return BZ_OUTBUFF_FULL; }; errhandler: BZ2_bzDecompressEnd ( &strm ); return ret; } /*---------------------------------------------------*/ /*-- Code contributed by Yoshioka Tsuneo (QWF00133@niftyserve.or.jp/tsuneo-y@is.aist-nara.ac.jp), to support better zlib compatibility. This code is not _officially_ part of libbzip2 (yet); I haven't tested it, documented it, or considered the threading-safeness of it. If this code breaks, please contact both Yoshioka and me. --*/ /*---------------------------------------------------*/ /*---------------------------------------------------*/ /*-- return version like "0.9.0c". --*/ const char * BZ_API(BZ2_bzlibVersion)(void) { return BZ_VERSION; } #ifndef BZ_NO_STDIO /*---------------------------------------------------*/ #if defined(_WIN32) || defined(OS2) || defined(MSDOS) # include <fcntl.h> # include <io.h> # define SET_BINARY_MODE(file) setmode(fileno(file),O_BINARY) #else # define SET_BINARY_MODE(file) #endif static BZFILE * bzopen_or_bzdopen ( const char *path, /* no use when bzdopen */ int fd, /* no use when bzdopen */ const char *mode, int open_mode) /* bzopen: 0, bzdopen:1 */ { int bzerr; char unused[BZ_MAX_UNUSED]; int blockSize100k = 9; int writing = 0; char mode2[10] = ""; FILE *fp = NULL; BZFILE *bzfp = NULL; int verbosity = 0; int workFactor = 30; int smallMode = 0; int nUnused = 0; if (mode == NULL) return NULL; while (*mode) { switch (*mode) { case 'r': writing = 0; break; case 'w': writing = 1; break; case 's': smallMode = 1; break; default: if (isdigit((int)(*mode))) { blockSize100k = *mode-BZ_HDR_0; } } mode++; } strcat(mode2, writing ? "w" : "r" ); strcat(mode2,"b"); /* binary mode */ if (open_mode==0) { if (path==NULL || strcmp(path,"")==0) { fp = (writing ? stdout : stdin); SET_BINARY_MODE(fp); } else { fp = fopen(path,mode2); } } else { #ifdef BZ_STRICT_ANSI fp = NULL; #else fp = fdopen(fd,mode2); #endif } if (fp == NULL) return NULL; if (writing) { /* Guard against total chaos and anarchy -- JRS */ if (blockSize100k < 1) blockSize100k = 1; if (blockSize100k > 9) blockSize100k = 9; bzfp = BZ2_bzWriteOpen(&bzerr,fp,blockSize100k, verbosity,workFactor); } else { bzfp = BZ2_bzReadOpen(&bzerr,fp,verbosity,smallMode, unused,nUnused); } if (bzfp == NULL) { if (fp != stdin && fp != stdout) fclose(fp); return NULL; } return bzfp; } /*---------------------------------------------------*/ /*-- open file for read or write. ex) bzopen("file","w9") case path="" or NULL => use stdin or stdout. --*/ BZFILE * BZ_API(BZ2_bzopen) ( const char *path, const char *mode ) { return bzopen_or_bzdopen(path,-1,mode,/*bzopen*/0); } /*---------------------------------------------------*/ BZFILE * BZ_API(BZ2_bzdopen) ( int fd, const char *mode ) { return bzopen_or_bzdopen(NULL,fd,mode,/*bzdopen*/1); } /*---------------------------------------------------*/ int BZ_API(BZ2_bzread) (BZFILE* b, void* buf, int len ) { int bzerr, nread; if (((bzFile*)b)->lastErr == BZ_STREAM_END) return 0; nread = BZ2_bzRead(&bzerr,b,buf,len); if (bzerr == BZ_OK || bzerr == BZ_STREAM_END) { return nread; } else { return -1; } } /*---------------------------------------------------*/ int BZ_API(BZ2_bzwrite) (BZFILE* b, void* buf, int len ) { int bzerr; BZ2_bzWrite(&bzerr,b,buf,len); if(bzerr == BZ_OK){ return len; }else{ return -1; } } /*---------------------------------------------------*/ int BZ_API(BZ2_bzflush) (BZFILE *b) { /* do nothing now... */ return 0; } /*---------------------------------------------------*/ void BZ_API(BZ2_bzclose) (BZFILE* b) { int bzerr; FILE *fp = ((bzFile *)b)->handle; if (b==NULL) {return;} if(((bzFile*)b)->writing){ BZ2_bzWriteClose(&bzerr,b,0,NULL,NULL); if(bzerr != BZ_OK){ BZ2_bzWriteClose(NULL,b,1,NULL,NULL); } }else{ BZ2_bzReadClose(&bzerr,b); } if(fp!=stdin && fp!=stdout){ fclose(fp); } } /*---------------------------------------------------*/ /*-- return last error code --*/ static char *bzerrorstrings[] = { "OK" ,"SEQUENCE_ERROR" ,"PARAM_ERROR" ,"MEM_ERROR" ,"DATA_ERROR" ,"DATA_ERROR_MAGIC" ,"IO_ERROR" ,"UNEXPECTED_EOF" ,"OUTBUFF_FULL" ,"CONFIG_ERROR" ,"???" /* for future */ ,"???" /* for future */ ,"???" /* for future */ ,"???" /* for future */ ,"???" /* for future */ ,"???" /* for future */ }; const char * BZ_API(BZ2_bzerror) (BZFILE *b, int *errnum) { int err = ((bzFile *)b)->lastErr; if(err>0) err = 0; *errnum = err; return bzerrorstrings[err*-1]; } #endif /*-------------------------------------------------------------*/ /*--- end bzlib.c ---*/ /*-------------------------------------------------------------*/ ///////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////// /* A test program written to test robustness to decompression of corrupted data. Usage is unzcrash filename and the program will read the specified file, compress it (in memory), and then repeatedly decompress it, each time with a different bit of the compressed data inverted, so as to test all possible one-bit errors. This should not cause any invalid memory accesses. If it does, I want to know about it! p.s. As you can see from the above description, the process is incredibly slow. A file of size eg 5KB will cause it to run for many hours. */ //#include <stdio.h> //#include <assert.h> //#include "bzlib.h" #define M_BLOCK 1000000 #define M_BLOCK_OUT (M_BLOCK + 1000000) char inbuf[M_BLOCK]; char outbuf[M_BLOCK_OUT]; char zbuf[M_BLOCK + 600 + (M_BLOCK / 100)]; int nIn; unsigned int nOut; unsigned int nZ; #if 0 static char *bzerrorstrings[] = { "OK" ,"SEQUENCE_ERROR" ,"PARAM_ERROR" ,"MEM_ERROR" ,"DATA_ERROR" ,"DATA_ERROR_MAGIC" ,"IO_ERROR" ,"UNEXPECTED_EOF" ,"OUTBUFF_FULL" ,"???" /* for future */ ,"???" /* for future */ ,"???" /* for future */ ,"???" /* for future */ ,"???" /* for future */ ,"???" /* for future */ }; #endif void flip_bit ( int bit ) { int byteno = bit / 8; int bitno = bit % 8; UChar mask = 1 << bitno; //fprintf ( stderr, "(byte %d bit %d mask %d)", // byteno, bitno, (int)mask ); zbuf[byteno] ^= mask; } void set_inbuf ( void ) { inbuf[0] = 0; my_strcat(inbuf, "At her sixtieth birthday party, Margaret Thatcher "); my_strcat(inbuf, "blew on the cake to light the candles.\n"); my_strcat(inbuf, "This program, bzip2, the associated library libbzip2, and all\n"); my_strcat(inbuf, "documentation, are copyright (C) 1996-2004 Julian R Seward. All\n"); my_strcat(inbuf, "rights reserved.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "Redistribution and use in source and binary forms, with or without\n"); my_strcat(inbuf, "modification, are permitted provided that the following conditions\n"); my_strcat(inbuf, "are met:\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "1. Redistributions of source code must retain the above copyright\n"); my_strcat(inbuf, " notice, this list of conditions and the following disclaimer.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "2. The origin of this software must not be misrepresented; you must\n"); my_strcat(inbuf, " not claim that you wrote the original software. If you use this\n"); my_strcat(inbuf, " software in a product, an acknowledgment in the product\n"); my_strcat(inbuf, " documentation would be appreciated but is not required.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "3. Altered source versions must be plainly marked as such, and must\n"); my_strcat(inbuf, " not be misrepresented as being the original software.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "4. The name of the author may not be used to endorse or promote\n"); my_strcat(inbuf, " products derived from this software without specific prior written\n"); my_strcat(inbuf, " permission.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS\n"); my_strcat(inbuf, "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n"); my_strcat(inbuf, "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n"); my_strcat(inbuf, "ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY\n"); my_strcat(inbuf, "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n"); my_strcat(inbuf, "DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE\n"); my_strcat(inbuf, "GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n"); my_strcat(inbuf, "INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n"); my_strcat(inbuf, "WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n"); my_strcat(inbuf, "NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n"); my_strcat(inbuf, "SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, "ababababababababababababababababababababababababababababababab"); my_strcat(inbuf, " GNU GENERAL PUBLIC LICENSE\n"); my_strcat(inbuf, " Version 2, June 1991\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " Copyright (C) 1989, 1991 Free Software Foundation, Inc.\n"); my_strcat(inbuf, " 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n"); my_strcat(inbuf, " Everyone is permitted to copy and distribute verbatim copies\n"); my_strcat(inbuf, " of this license document, but changing it is not allowed.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " Preamble\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " The licenses for most software are designed to take away your\n"); my_strcat(inbuf, "freedom to share and change it. By contrast, the GNU General Public\n"); my_strcat(inbuf, "License is intended to guarantee your freedom to share and change free\n"); my_strcat(inbuf, "software--to make sure the software is free for all its users. This\n"); my_strcat(inbuf, "General Public License applies to most of the Free Software\n"); my_strcat(inbuf, "Foundation's software and to any other program whose authors commit to\n"); my_strcat(inbuf, "using it. (Some other Free Software Foundation software is covered by\n"); my_strcat(inbuf, "the GNU Library General Public License instead.) You can apply it to\n"); my_strcat(inbuf, "your programs, too.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " When we speak of free software, we are referring to freedom, not\n"); my_strcat(inbuf, "price. Our General Public Licenses are designed to make sure that you\n"); my_strcat(inbuf, "have the freedom to distribute copies of free software (and charge for\n"); my_strcat(inbuf, "this service if you wish), that you receive source code or can get it\n"); my_strcat(inbuf, "if you want it, that you can change the software or use pieces of it\n"); my_strcat(inbuf, "in new free programs; and that you know you can do these things.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " To protect your rights, we need to make restrictions that forbid\n"); my_strcat(inbuf, "anyone to deny you these rights or to ask you to surrender the rights.\n"); my_strcat(inbuf, "These restrictions translate to certain responsibilities for you if you\n"); my_strcat(inbuf, "distribute copies of the software, or if you modify it.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " For example, if you distribute copies of such a program, whether\n"); my_strcat(inbuf, "gratis or for a fee, you must give the recipients all the rights that\n"); my_strcat(inbuf, "you have. You must make sure that they, too, receive or can get the\n"); my_strcat(inbuf, "source code. And you must show them these terms so they know their\n"); my_strcat(inbuf, "rights.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " We protect your rights with two steps: (1) copyright the software, and\n"); my_strcat(inbuf, "(2) offer you this license which gives you legal permission to copy,\n"); my_strcat(inbuf, "distribute and/or modify the software.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " Also, for each author's protection and ours, we want to make certain\n"); my_strcat(inbuf, "that everyone understands that there is no warranty for this free\n"); my_strcat(inbuf, "software. If the software is modified by someone else and passed on, we\n"); my_strcat(inbuf, "want its recipients to know that what they have is not the original, so\n"); my_strcat(inbuf, "that any problems introduced by others will not reflect on the original\n"); my_strcat(inbuf, "authors' reputations.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " Finally, any free program is threatened constantly by software\n"); my_strcat(inbuf, "patents. We wish to avoid the danger that redistributors of a free\n"); my_strcat(inbuf, "program will individually obtain patent licenses, in effect making the\n"); my_strcat(inbuf, "program proprietary. To prevent this, we have made it clear that any\n"); my_strcat(inbuf, "patent must be licensed for everyone's free use or not licensed at all.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " The precise terms and conditions for copying, distribution and\n"); my_strcat(inbuf, "modification follow.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " GNU GENERAL PUBLIC LICENSE\n"); my_strcat(inbuf, " TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " 0. This License applies to any program or other work which contains\n"); my_strcat(inbuf, "a notice placed by the copyright holder saying it may be distributed\n"); my_strcat(inbuf, "under the terms of this General Public License. The Program, below,\n"); my_strcat(inbuf, "refers to any such program or work, and a work based on the Program\n"); my_strcat(inbuf, "means either the Program or any derivative work under copyright law:\n"); my_strcat(inbuf, "that is to say, a work containing the Program or a portion of it,\n"); my_strcat(inbuf, "either verbatim or with modifications and/or translated into another\n"); my_strcat(inbuf, "language. (Hereinafter, translation is included without limitation in\n"); my_strcat(inbuf, "the term modification.) Each licensee is addressed as you.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "Activities other than copying, distribution and modification are not\n"); my_strcat(inbuf, "covered by this License; they are outside its scope. The act of\n"); my_strcat(inbuf, "running the Program is not restricted, and the output from the Program\n"); my_strcat(inbuf, "is covered only if its contents constitute a work based on the\n"); my_strcat(inbuf, "Program (independent of having been made by running the Program).\n"); my_strcat(inbuf, "Whether that is true depends on what the Program does.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " 1. You may copy and distribute verbatim copies of the Program's\n"); my_strcat(inbuf, "source code as you receive it, in any medium, provided that you\n"); my_strcat(inbuf, "conspicuously and appropriately publish on each copy an appropriate\n"); my_strcat(inbuf, "copyright notice and disclaimer of warranty; keep intact all the\n"); my_strcat(inbuf, "notices that refer to this License and to the absence of any warranty;\n"); my_strcat(inbuf, "and give any other recipients of the Program a copy of this License\n"); my_strcat(inbuf, "along with the Program.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "You may charge a fee for the physical act of transferring a copy, and\n"); my_strcat(inbuf, "you may at your option offer warranty protection in exchange for a fee.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " 2. You may modify your copy or copies of the Program or any portion\n"); my_strcat(inbuf, "of it, thus forming a work based on the Program, and copy and\n"); my_strcat(inbuf, "distribute such modifications or work under the terms of Section 1\n"); my_strcat(inbuf, "above, provided that you also meet all of these conditions:\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " a) You must cause the modified files to carry prominent notices\n"); my_strcat(inbuf, " stating that you changed the files and the date of any change.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " b) You must cause any work that you distribute or publish, that in\n"); my_strcat(inbuf, " whole or in part contains or is derived from the Program or any\n"); my_strcat(inbuf, " part thereof, to be licensed as a whole at no charge to all third\n"); my_strcat(inbuf, " parties under the terms of this License.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " c) If the modified program normally reads commands interactively\n"); my_strcat(inbuf, " when run, you must cause it, when started running for such\n"); my_strcat(inbuf, " interactive use in the most ordinary way, to print or display an\n"); my_strcat(inbuf, " announcement including an appropriate copyright notice and a\n"); my_strcat(inbuf, " notice that there is no warranty (or else, saying that you provide\n"); my_strcat(inbuf, " a warranty) and that users may redistribute the program under\n"); my_strcat(inbuf, " these conditions, and telling the user how to view a copy of this\n"); my_strcat(inbuf, " License. (Exception: if the Program itself is interactive but\n"); my_strcat(inbuf, " does not normally print such an announcement, your work based on\n"); my_strcat(inbuf, " the Program is not required to print an announcement.)\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "These requirements apply to the modified work as a whole. If\n"); my_strcat(inbuf, "identifiable sections of that work are not derived from the Program,\n"); my_strcat(inbuf, "and can be reasonably considered independent and separate works in\n"); my_strcat(inbuf, "themselves, then this License, and its terms, do not apply to those\n"); my_strcat(inbuf, "sections when you distribute them as separate works. But when you\n"); my_strcat(inbuf, "distribute the same sections as part of a whole which is a work based\n"); my_strcat(inbuf, "on the Program, the distribution of the whole must be on the terms of\n"); my_strcat(inbuf, "this License, whose permissions for other licensees extend to the\n"); my_strcat(inbuf, "entire whole, and thus to each and every part regardless of who wrote it.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "Thus, it is not the intent of this section to claim rights or contest\n"); my_strcat(inbuf, "your rights to work written entirely by you; rather, the intent is to\n"); my_strcat(inbuf, "exercise the right to control the distribution of derivative or\n"); my_strcat(inbuf, "collective works based on the Program.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "In addition, mere aggregation of another work not based on the Program\n"); my_strcat(inbuf, "with the Program (or with a work based on the Program) on a volume of\n"); my_strcat(inbuf, "a storage or distribution medium does not bring the other work under\n"); my_strcat(inbuf, "the scope of this License.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " 3. You may copy and distribute the Program (or a work based on it,\n"); my_strcat(inbuf, "under Section 2) in object code or executable form under the terms of\n"); my_strcat(inbuf, "Sections 1 and 2 above provided that you also do one of the following:\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " a) Accompany it with the complete corresponding machine-readable\n"); my_strcat(inbuf, " source code, which must be distributed under the terms of Sections\n"); my_strcat(inbuf, " 1 and 2 above on a medium customarily used for software interchange; or,\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " b) Accompany it with a written offer, valid for at least three\n"); my_strcat(inbuf, " years, to give any third party, for a charge no more than your\n"); my_strcat(inbuf, " cost of physically performing source distribution, a complete\n"); my_strcat(inbuf, " machine-readable copy of the corresponding source code, to be\n"); my_strcat(inbuf, " distributed under the terms of Sections 1 and 2 above on a medium\n"); my_strcat(inbuf, " customarily used for software interchange; or,\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " c) Accompany it with the information you received as to the offer\n"); my_strcat(inbuf, " to distribute corresponding source code. (This alternative is\n"); my_strcat(inbuf, " allowed only for noncommercial distribution and only if you\n"); my_strcat(inbuf, " received the program in object code or executable form with such\n"); my_strcat(inbuf, " an offer, in accord with Subsection b above.)\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "The source code for a work means the preferred form of the work for\n"); my_strcat(inbuf, "making modifications to it. For an executable work, complete source\n"); my_strcat(inbuf, "code means all the source code for all modules it contains, plus any\n"); my_strcat(inbuf, "associated interface definition files, plus the scripts used to\n"); my_strcat(inbuf, "control compilation and installation of the executable. However, as a\n"); my_strcat(inbuf, "special exception, the source code distributed need not include\n"); my_strcat(inbuf, "anything that is normally distributed (in either source or binary\n"); my_strcat(inbuf, "form) with the major components (compiler, kernel, and so on) of the\n"); my_strcat(inbuf, "operating system on which the executable runs, unless that component\n"); my_strcat(inbuf, "itself accompanies the executable.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "If distribution of executable or object code is made by offering\n"); my_strcat(inbuf, "access to copy from a designated place, then offering equivalent\n"); my_strcat(inbuf, "access to copy the source code from the same place counts as\n"); my_strcat(inbuf, "distribution of the source code, even though third parties are not\n"); my_strcat(inbuf, "compelled to copy the source along with the object code.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " 4. You may not copy, modify, sublicense, or distribute the Program\n"); my_strcat(inbuf, "except as expressly provided under this License. Any attempt\n"); my_strcat(inbuf, "otherwise to copy, modify, sublicense or distribute the Program is\n"); my_strcat(inbuf, "void, and will automatically terminate your rights under this License.\n"); my_strcat(inbuf, "However, parties who have received copies, or rights, from you under\n"); my_strcat(inbuf, "this License will not have their licenses terminated so long as such\n"); my_strcat(inbuf, "parties remain in full compliance.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " 5. You are not required to accept this License, since you have not\n"); my_strcat(inbuf, "signed it. However, nothing else grants you permission to modify or\n"); my_strcat(inbuf, "distribute the Program or its derivative works. These actions are\n"); my_strcat(inbuf, "prohibited by law if you do not accept this License. Therefore, by\n"); my_strcat(inbuf, "modifying or distributing the Program (or any work based on the\n"); my_strcat(inbuf, "Program), you indicate your acceptance of this License to do so, and\n"); my_strcat(inbuf, "all its terms and conditions for copying, distributing or modifying\n"); my_strcat(inbuf, "the Program or works based on it.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " 6. Each time you redistribute the Program (or any work based on the\n"); my_strcat(inbuf, "Program), the recipient automatically receives a license from the\n"); my_strcat(inbuf, "original licensor to copy, distribute or modify the Program subject to\n"); my_strcat(inbuf, "these terms and conditions. You may not impose any further\n"); my_strcat(inbuf, "restrictions on the recipients' exercise of the rights granted herein.\n"); my_strcat(inbuf, "You are not responsible for enforcing compliance by third parties to\n"); my_strcat(inbuf, "this License.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " 7. If, as a consequence of a court judgment or allegation of patent\n"); my_strcat(inbuf, "infringement or for any other reason (not limited to patent issues),\n"); my_strcat(inbuf, "conditions are imposed on you (whether by court order, agreement or\n"); my_strcat(inbuf, "otherwise) that contradict the conditions of this License, they do not\n"); my_strcat(inbuf, "excuse you from the conditions of this License. If you cannot\n"); my_strcat(inbuf, "distribute so as to satisfy simultaneously your obligations under this\n"); my_strcat(inbuf, "License and any other pertinent obligations, then as a consequence you\n"); my_strcat(inbuf, "may not distribute the Program at all. For example, if a patent\n"); my_strcat(inbuf, "license would not permit royalty-free redistribution of the Program by\n"); my_strcat(inbuf, "all those who receive copies directly or indirectly through you, then\n"); my_strcat(inbuf, "the only way you could satisfy both it and this License would be to\n"); my_strcat(inbuf, "refrain entirely from distribution of the Program.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "If any portion of this section is held invalid or unenforceable under\n"); my_strcat(inbuf, "any particular circumstance, the balance of the section is intended to\n"); my_strcat(inbuf, "apply and the section as a whole is intended to apply in other\n"); my_strcat(inbuf, "circumstances.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "It is not the purpose of this section to induce you to infringe any\n"); my_strcat(inbuf, "patents or other property right claims or to contest validity of any\n"); my_strcat(inbuf, "such claims; this section has the sole purpose of protecting the\n"); my_strcat(inbuf, "integrity of the free software distribution system, which is\n"); my_strcat(inbuf, "implemented by public license practices. Many people have made\n"); my_strcat(inbuf, "generous contributions to the wide range of software distributed\n"); my_strcat(inbuf, "through that system in reliance on consistent application of that\n"); my_strcat(inbuf, "system; it is up to the author/donor to decide if he or she is willing\n"); my_strcat(inbuf, "to distribute software through any other system and a licensee cannot\n"); my_strcat(inbuf, "impose that choice.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "This section is intended to make thoroughly clear what is believed to\n"); my_strcat(inbuf, "be a consequence of the rest of this License.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " 8. If the distribution and/or use of the Program is restricted in\n"); my_strcat(inbuf, "certain countries either by patents or by copyrighted interfaces, the\n"); my_strcat(inbuf, "original copyright holder who places the Program under this License\n"); my_strcat(inbuf, "may add an explicit geographical distribution limitation excluding\n"); my_strcat(inbuf, "those countries, so that distribution is permitted only in or among\n"); my_strcat(inbuf, "countries not thus excluded. In such case, this License incorporates\n"); my_strcat(inbuf, "the limitation as if written in the body of this License.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " 9. The Free Software Foundation may publish revised and/or new versions\n"); my_strcat(inbuf, "of the General Public License from time to time. Such new versions will\n"); my_strcat(inbuf, "be similar in spirit to the present version, but may differ in detail to\n"); my_strcat(inbuf, "address new problems or concerns.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "Each version is given a distinguishing version number. If the Program\n"); my_strcat(inbuf, "specifies a version number of this License which applies to it and any\n"); my_strcat(inbuf, "later version, you have the option of following the terms and conditions\n"); my_strcat(inbuf, "either of that version or of any later version published by the Free\n"); my_strcat(inbuf, "Software Foundation. If the Program does not specify a version number of\n"); my_strcat(inbuf, "this License, you may choose any version ever published by the Free Software\n"); my_strcat(inbuf, "Foundation.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " 10. If you wish to incorporate parts of the Program into other free\n"); my_strcat(inbuf, "programs whose distribution conditions are different, write to the author\n"); my_strcat(inbuf, "to ask for permission. For software which is copyrighted by the Free\n"); my_strcat(inbuf, "Software Foundation, write to the Free Software Foundation; we sometimes\n"); my_strcat(inbuf, "make exceptions for this. Our decision will be guided by the two goals\n"); my_strcat(inbuf, "of preserving the free status of all derivatives of our free software and\n"); my_strcat(inbuf, "of promoting the sharing and reuse of software generally.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " NO WARRANTY\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY\n"); my_strcat(inbuf, "FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN\n"); my_strcat(inbuf, "OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES\n"); my_strcat(inbuf, "PROVIDE THE PROGRAM AS IS WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED\n"); my_strcat(inbuf, "OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n"); my_strcat(inbuf, "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS\n"); my_strcat(inbuf, "TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE\n"); my_strcat(inbuf, "PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,\n"); my_strcat(inbuf, "REPAIR OR CORRECTION.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\n"); my_strcat(inbuf, "WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR\n"); my_strcat(inbuf, "REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,\n"); my_strcat(inbuf, "INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING\n"); my_strcat(inbuf, "OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED\n"); my_strcat(inbuf, "TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY\n"); my_strcat(inbuf, "YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER\n"); my_strcat(inbuf, "PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE\n"); my_strcat(inbuf, "POSSIBILITY OF SUCH DAMAGES.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " END OF TERMS AND CONDITIONS\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " How to Apply These Terms to Your New Programs\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " If you develop a new program, and you want it to be of the greatest\n"); my_strcat(inbuf, "possible use to the public, the best way to achieve this is to make it\n"); my_strcat(inbuf, "free software which everyone can redistribute and change under these terms.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " To do so, attach the following notices to the program. It is safest\n"); my_strcat(inbuf, "to attach them to the start of each source file to most effectively\n"); my_strcat(inbuf, "convey the exclusion of warranty; and each file should have at least\n"); my_strcat(inbuf, "the copyright line and a pointer to where the full notice is found.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " <one line to give the program's name and a brief idea of what it does.>\n"); my_strcat(inbuf, " Copyright (C) <year> <name of author>\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " This program is free software; you can redistribute it and/or modify\n"); my_strcat(inbuf, " it under the terms of the GNU General Public License as published by\n"); my_strcat(inbuf, " the Free Software Foundation; either version 2 of the License, or\n"); my_strcat(inbuf, " (at your option) any later version.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " This program is distributed in the hope that it will be useful,\n"); my_strcat(inbuf, " but WITHOUT ANY WARRANTY; without even the implied warranty of\n"); my_strcat(inbuf, " MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n"); my_strcat(inbuf, " GNU General Public License for more details.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " You should have received a copy of the GNU General Public License\n"); my_strcat(inbuf, " along with this program; if not, write to the Free Software\n"); my_strcat(inbuf, " Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "Also add information on how to contact you by electronic and paper mail.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "If the program is interactive, make it output a short notice like this\n"); my_strcat(inbuf, "when it starts in an interactive mode:\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " Gnomovision version 69, Copyright (C) year name of author\n"); my_strcat(inbuf, " Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n"); my_strcat(inbuf, " This is free software, and you are welcome to redistribute it\n"); my_strcat(inbuf, " under certain conditions; type `show c' for details.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "The hypothetical commands `show w' and `show c' should show the appropriate\n"); my_strcat(inbuf, "parts of the General Public License. Of course, the commands you use may\n"); my_strcat(inbuf, "be called something other than `show w' and `show c'; they could even be\n"); my_strcat(inbuf, "mouse-clicks or menu items--whatever suits your program.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "You should also get your employer (if you work as a programmer) or your\n"); my_strcat(inbuf, "school, if any, to sign a copyright disclaimer for the program, if\n"); my_strcat(inbuf, "necessary. Here is a sample; alter the names:\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " Yoyodyne, Inc., hereby disclaims all copyright interest in the program\n"); my_strcat(inbuf, " `Gnomovision' (which makes passes at compilers) written by James Hacker.\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, " <signature of Ty Coon>, 1 April 1989\n"); my_strcat(inbuf, " Ty Coon, President of Vice\n"); my_strcat(inbuf, "\n"); my_strcat(inbuf, "This General Public License does not permit incorporating your program into\n"); my_strcat(inbuf, "proprietary programs. If your program is a subroutine library, you may\n"); my_strcat(inbuf, "consider it more useful to permit linking proprietary applications with the\n"); my_strcat(inbuf, "library. If this is what you want to do, use the GNU Library General\n"); my_strcat(inbuf, "Public License instead of this License.\n"); my_strcat(inbuf, "\n"); } #include <stdio.h> #include <assert.h> /* For providing services. */ static HWord g_serviceFn ( HWord arg1, HWord arg2 ) { switch (arg1) { case 0: /* EXIT */ exit(0); case 1: /* PUTC */ putchar(arg2); return 0; case 2: /* MALLOC */ return (HWord)malloc(arg2); case 3: /* FREE */ free((void*)arg2); return 0; default: assert(0); } return 0; } static char *bzerrorstrings[] = { "OK" ,"SEQUENCE_ERROR" ,"PARAM_ERROR" ,"MEM_ERROR" ,"DATA_ERROR" ,"DATA_ERROR_MAGIC" ,"IO_ERROR" ,"UNEXPECTED_EOF" ,"OUTBUFF_FULL" ,"CONFIG_ERROR" ,"???" /* for future */ ,"???" /* for future */ ,"???" /* for future */ ,"???" /* for future */ ,"???" /* for future */ ,"???" /* for future */ }; // If given a cmd line arg, behave as a correctness regtest // (run fast and be verbose). If not, run for a long time // which is what is needed for the performance suite. int main ( int argc, char** argv ) { int r; int bit; int i; int regtest; assert(argc == 1 || argc == 2); regtest = argc==2; /* hardwire one particular behaviour */ regtest = 1; serviceFn = g_serviceFn; set_inbuf(); nIn = vex_strlen(inbuf)+1; vex_printf( "%d bytes read\n", nIn ); nZ = M_BLOCK; r = BZ2_bzBuffToBuffCompress ( zbuf, &nZ, inbuf, nIn, 9, 3/*verb*/, 30 ); if (r != BZ_OK) { vex_printf("initial compress failed!\n"); (*serviceFn)(0,0); } vex_printf( "%d after compression\n", nZ ); for (bit = 0; bit < nZ*8; bit += (bit < 35 ? 3 : (regtest?2377:137))) { if (bit >= 11920) break; if (regtest) vex_printf( "bit %d ", bit ); flip_bit ( bit ); nOut = M_BLOCK_OUT; r = BZ2_bzBuffToBuffDecompress ( outbuf, &nOut, zbuf, nZ, 1/*small*/, 0 ); if (regtest) vex_printf( " %d %s ", r, bzerrorstrings[-r] ); if (r != BZ_OK) { if (regtest) vex_printf( "\n" ); } else { if (nOut != nIn) { vex_printf( "nIn/nOut mismatch %d %d\n", nIn, nOut ); (*serviceFn)(0,0); } else { for (i = 0; i < nOut; i++) if (inbuf[i] != outbuf[i]) { vex_printf( "mismatch at %d\n", i ); (*serviceFn)(0,0); } if (i == nOut) vex_printf( "really ok!\n" ); } } flip_bit ( bit ); } #if 0 assert (nOut == nIn); for (i = 0; i < nOut; i++) { if (inbuf[i] != outbuf[i]) { vex_printf( "difference at %d !\n", i ); return 1; } } #endif vex_printf( "all ok\n" ); (*serviceFn)(0,0); /*NOTREACHED*/ return 0; }
gpl-2.0
fedya/aircam-openwrt
build_dir/linux-gm812x/linux-2.6.28.fa2/net/decnet/sysctl_net_decnet.c
26
10016
/* * DECnet An implementation of the DECnet protocol suite for the LINUX * operating system. DECnet is implemented using the BSD Socket * interface as the means of communication with the user level. * * DECnet sysctl support functions * * Author: Steve Whitehouse <SteveW@ACM.org> * * * Changes: * Steve Whitehouse - C99 changes and default device handling * Steve Whitehouse - Memory buffer settings, like the tcp ones * */ #include <linux/mm.h> #include <linux/sysctl.h> #include <linux/fs.h> #include <linux/netdevice.h> #include <linux/string.h> #include <net/neighbour.h> #include <net/dst.h> #include <net/flow.h> #include <asm/uaccess.h> #include <net/dn.h> #include <net/dn_dev.h> #include <net/dn_route.h> int decnet_debug_level; int decnet_time_wait = 30; int decnet_dn_count = 1; int decnet_di_count = 3; int decnet_dr_count = 3; int decnet_log_martians = 1; int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW; /* Reasonable defaults, I hope, based on tcp's defaults */ int sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 }; int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; #ifdef CONFIG_SYSCTL extern int decnet_dst_gc_interval; static int min_decnet_time_wait[] = { 5 }; static int max_decnet_time_wait[] = { 600 }; static int min_state_count[] = { 1 }; static int max_state_count[] = { NSP_MAXRXTSHIFT }; static int min_decnet_dst_gc_interval[] = { 1 }; static int max_decnet_dst_gc_interval[] = { 60 }; static int min_decnet_no_fc_max_cwnd[] = { NSP_MIN_WINDOW }; static int max_decnet_no_fc_max_cwnd[] = { NSP_MAX_WINDOW }; static char node_name[7] = "???"; static struct ctl_table_header *dn_table_header = NULL; /* * ctype.h :-) */ #define ISNUM(x) (((x) >= '0') && ((x) <= '9')) #define ISLOWER(x) (((x) >= 'a') && ((x) <= 'z')) #define ISUPPER(x) (((x) >= 'A') && ((x) <= 'Z')) #define ISALPHA(x) (ISLOWER(x) || ISUPPER(x)) #define INVALID_END_CHAR(x) (ISNUM(x) || ISALPHA(x)) static void strip_it(char *str) { for(;;) { switch(*str) { case ' ': case '\n': case '\r': case ':': *str = 0; case 0: return; } str++; } } /* * Simple routine to parse an ascii DECnet address * into a network order address. */ static int parse_addr(__le16 *addr, char *str) { __u16 area, node; while(*str && !ISNUM(*str)) str++; if (*str == 0) return -1; area = (*str++ - '0'); if (ISNUM(*str)) { area *= 10; area += (*str++ - '0'); } if (*str++ != '.') return -1; if (!ISNUM(*str)) return -1; node = *str++ - '0'; if (ISNUM(*str)) { node *= 10; node += (*str++ - '0'); } if (ISNUM(*str)) { node *= 10; node += (*str++ - '0'); } if (ISNUM(*str)) { node *= 10; node += (*str++ - '0'); } if ((node > 1023) || (area > 63)) return -1; if (INVALID_END_CHAR(*str)) return -1; *addr = dn_htons((area << 10) | node); return 0; } static int dn_node_address_strategy(ctl_table *table, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen) { size_t len; __le16 addr; if (oldval && oldlenp) { if (get_user(len, oldlenp)) return -EFAULT; if (len) { if (len != sizeof(unsigned short)) return -EINVAL; if (put_user(decnet_address, (__le16 __user *)oldval)) return -EFAULT; } } if (newval && newlen) { if (newlen != sizeof(unsigned short)) return -EINVAL; if (get_user(addr, (__le16 __user *)newval)) return -EFAULT; dn_dev_devices_off(); decnet_address = addr; dn_dev_devices_on(); } return 0; } static int dn_node_address_handler(ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { char addr[DN_ASCBUF_LEN]; size_t len; __le16 dnaddr; if (!*lenp || (*ppos && !write)) { *lenp = 0; return 0; } if (write) { int len = (*lenp < DN_ASCBUF_LEN) ? *lenp : (DN_ASCBUF_LEN-1); if (copy_from_user(addr, buffer, len)) return -EFAULT; addr[len] = 0; strip_it(addr); if (parse_addr(&dnaddr, addr)) return -EINVAL; dn_dev_devices_off(); decnet_address = dnaddr; dn_dev_devices_on(); *ppos += len; return 0; } dn_addr2asc(dn_ntohs(decnet_address), addr); len = strlen(addr); addr[len++] = '\n'; if (len > *lenp) len = *lenp; if (copy_to_user(buffer, addr, len)) return -EFAULT; *lenp = len; *ppos += len; return 0; } static int dn_def_dev_strategy(ctl_table *table, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen) { size_t len; struct net_device *dev; char devname[17]; size_t namel; int rv = 0; devname[0] = 0; if (oldval && oldlenp) { if (get_user(len, oldlenp)) return -EFAULT; if (len) { dev = dn_dev_get_default(); if (dev) { strcpy(devname, dev->name); dev_put(dev); } namel = strlen(devname) + 1; if (len > namel) len = namel; if (copy_to_user(oldval, devname, len)) return -EFAULT; if (put_user(len, oldlenp)) return -EFAULT; } } if (newval && newlen) { if (newlen > 16) return -E2BIG; if (copy_from_user(devname, newval, newlen)) return -EFAULT; devname[newlen] = 0; dev = dev_get_by_name(&init_net, devname); if (dev == NULL) return -ENODEV; rv = -ENODEV; if (dev->dn_ptr != NULL) { rv = dn_dev_set_default(dev, 1); if (rv) dev_put(dev); } } return rv; } static int dn_def_dev_handler(ctl_table *table, int write, struct file * filp, void __user *buffer, size_t *lenp, loff_t *ppos) { size_t len; struct net_device *dev; char devname[17]; if (!*lenp || (*ppos && !write)) { *lenp = 0; return 0; } if (write) { if (*lenp > 16) return -E2BIG; if (copy_from_user(devname, buffer, *lenp)) return -EFAULT; devname[*lenp] = 0; strip_it(devname); dev = dev_get_by_name(&init_net, devname); if (dev == NULL) return -ENODEV; if (dev->dn_ptr == NULL) { dev_put(dev); return -ENODEV; } if (dn_dev_set_default(dev, 1)) { dev_put(dev); return -ENODEV; } *ppos += *lenp; return 0; } dev = dn_dev_get_default(); if (dev == NULL) { *lenp = 0; return 0; } strcpy(devname, dev->name); dev_put(dev); len = strlen(devname); devname[len++] = '\n'; if (len > *lenp) len = *lenp; if (copy_to_user(buffer, devname, len)) return -EFAULT; *lenp = len; *ppos += len; return 0; } static ctl_table dn_table[] = { { .ctl_name = NET_DECNET_NODE_ADDRESS, .procname = "node_address", .maxlen = 7, .mode = 0644, .proc_handler = dn_node_address_handler, .strategy = dn_node_address_strategy, }, { .ctl_name = NET_DECNET_NODE_NAME, .procname = "node_name", .data = node_name, .maxlen = 7, .mode = 0644, .proc_handler = &proc_dostring, .strategy = &sysctl_string, }, { .ctl_name = NET_DECNET_DEFAULT_DEVICE, .procname = "default_device", .maxlen = 16, .mode = 0644, .proc_handler = dn_def_dev_handler, .strategy = dn_def_dev_strategy, }, { .ctl_name = NET_DECNET_TIME_WAIT, .procname = "time_wait", .data = &decnet_time_wait, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec_minmax, .strategy = &sysctl_intvec, .extra1 = &min_decnet_time_wait, .extra2 = &max_decnet_time_wait }, { .ctl_name = NET_DECNET_DN_COUNT, .procname = "dn_count", .data = &decnet_dn_count, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec_minmax, .strategy = &sysctl_intvec, .extra1 = &min_state_count, .extra2 = &max_state_count }, { .ctl_name = NET_DECNET_DI_COUNT, .procname = "di_count", .data = &decnet_di_count, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec_minmax, .strategy = &sysctl_intvec, .extra1 = &min_state_count, .extra2 = &max_state_count }, { .ctl_name = NET_DECNET_DR_COUNT, .procname = "dr_count", .data = &decnet_dr_count, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec_minmax, .strategy = &sysctl_intvec, .extra1 = &min_state_count, .extra2 = &max_state_count }, { .ctl_name = NET_DECNET_DST_GC_INTERVAL, .procname = "dst_gc_interval", .data = &decnet_dst_gc_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec_minmax, .strategy = &sysctl_intvec, .extra1 = &min_decnet_dst_gc_interval, .extra2 = &max_decnet_dst_gc_interval }, { .ctl_name = NET_DECNET_NO_FC_MAX_CWND, .procname = "no_fc_max_cwnd", .data = &decnet_no_fc_max_cwnd, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec_minmax, .strategy = &sysctl_intvec, .extra1 = &min_decnet_no_fc_max_cwnd, .extra2 = &max_decnet_no_fc_max_cwnd }, { .ctl_name = NET_DECNET_MEM, .procname = "decnet_mem", .data = &sysctl_decnet_mem, .maxlen = sizeof(sysctl_decnet_mem), .mode = 0644, .proc_handler = &proc_dointvec, .strategy = &sysctl_intvec, }, { .ctl_name = NET_DECNET_RMEM, .procname = "decnet_rmem", .data = &sysctl_decnet_rmem, .maxlen = sizeof(sysctl_decnet_rmem), .mode = 0644, .proc_handler = &proc_dointvec, .strategy = &sysctl_intvec, }, { .ctl_name = NET_DECNET_WMEM, .procname = "decnet_wmem", .data = &sysctl_decnet_wmem, .maxlen = sizeof(sysctl_decnet_wmem), .mode = 0644, .proc_handler = &proc_dointvec, .strategy = &sysctl_intvec, }, { .ctl_name = NET_DECNET_DEBUG_LEVEL, .procname = "debug", .data = &decnet_debug_level, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec, .strategy = &sysctl_intvec, }, {0} }; static struct ctl_path dn_path[] = { { .procname = "net", .ctl_name = CTL_NET, }, { .procname = "decnet", .ctl_name = NET_DECNET, }, { } }; void dn_register_sysctl(void) { dn_table_header = register_sysctl_paths(dn_path, dn_table); } void dn_unregister_sysctl(void) { unregister_sysctl_table(dn_table_header); } #else /* CONFIG_SYSCTL */ void dn_unregister_sysctl(void) { } void dn_register_sysctl(void) { } #endif
gpl-2.0
chettyharish/polling-server-deadline
drivers/staging/rtl8723au/os_dep/os_intfs.c
26
28190
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * ******************************************************************************/ #define _OS_INTFS_C_ #include <osdep_service.h> #include <drv_types.h> #include <xmit_osdep.h> #include <recv_osdep.h> #include <hal_intf.h> #include <rtw_version.h> #include <ethernet.h> #include <usb_osintf.h> #include <linux/version.h> MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek Wireless Lan Driver"); MODULE_AUTHOR("Realtek Semiconductor Corp."); MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>"); MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>"); MODULE_VERSION(DRIVERVERSION); MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin"); /* module param defaults */ static int rtw_chip_version = 0x00; static int rtw_rfintfs = HWPI; static int rtw_debug = 1; static int rtw_channel = 1;/* ad-hoc support requirement */ static int rtw_wireless_mode = WIRELESS_11BG_24N; static int rtw_vrtl_carrier_sense = AUTO_VCS; static int rtw_vcs_type = RTS_CTS;/* */ static int rtw_rts_thresh = 2347;/* */ static int rtw_frag_thresh = 2346;/* */ static int rtw_preamble = PREAMBLE_LONG;/* long, short, auto */ static int rtw_scan_mode = 1;/* active, passive */ static int rtw_adhoc_tx_pwr = 1; static int rtw_soft_ap; static int rtw_power_mgnt = 1; static int rtw_ips_mode = IPS_NORMAL; static int rtw_smart_ps = 2; module_param(rtw_ips_mode, int, 0644); MODULE_PARM_DESC(rtw_ips_mode, "The default IPS mode"); static int rtw_long_retry_lmt = 7; static int rtw_short_retry_lmt = 7; static int rtw_busy_thresh = 40; static int rtw_ack_policy = NORMAL_ACK; static int rtw_acm_method;/* 0:By SW 1:By HW. */ static int rtw_wmm_enable = 1;/* default is set to enable the wmm. */ static int rtw_uapsd_enable; int rtw_ht_enable23A = 1; /* 0 :diable, bit(0): enable 2.4g, bit(1): enable 5g */ int rtw_cbw40_enable23A = 3; int rtw_ampdu_enable23A = 1;/* for enable tx_ampdu */ /* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, default is set to enable * 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */ static int rtw_rx_stbc = 1; static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto */ /* Use 2 path Tx to transmit MCS0~7 and legacy mode */ static int rtw_lowrate_two_xmit = 1; /* int rf_config = RF_1T2R; 1T2R */ static int rtw_rf_config = RF_819X_MAX_TYPE; /* auto */ static int rtw_low_power; static int rtw_wifi_spec; static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX; #ifdef CONFIG_8723AU_BT_COEXIST static int rtw_btcoex_enable = 1; static int rtw_bt_iso = 2;/* 0:Low, 1:High, 2:From Efuse */ /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter, 4.Busy, 5.OtherBusy */ static int rtw_bt_sco = 3; /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */ static int rtw_bt_ampdu = 1 ; #endif /* 0:Reject AP's Add BA req, 1:Accept AP's Add BA req. */ static int rtw_AcceptAddbaReq = true; static int rtw_antdiv_cfg = 2; /* 0:OFF , 1:ON, 2:decide by Efuse config */ static int rtw_antdiv_type; /* 0:decide by efuse */ static int rtw_enusbss;/* 0:disable, 1:enable */ static int rtw_hwpdn_mode = 2;/* 0:disable, 1:enable, 2: by EFUSE config */ static int rtw_hwpwrp_detect; /* HW power ping detect 0:disable , 1:enable */ static int rtw_hw_wps_pbc = 1; static int rtw_80211d; static int rtw_regulatory_id = 0xff;/* Regulatory tab id, 0xff = follow efuse's setting */ module_param(rtw_regulatory_id, int, 0644); static char *ifname = "wlan%d"; module_param(ifname, charp, 0644); MODULE_PARM_DESC(ifname, "The default name to allocate for first interface"); static char *if2name = "wlan%d"; module_param(if2name, charp, 0644); MODULE_PARM_DESC(if2name, "The default name to allocate for second interface"); module_param(rtw_channel_plan, int, 0644); module_param(rtw_chip_version, int, 0644); module_param(rtw_rfintfs, int, 0644); module_param(rtw_channel, int, 0644); module_param(rtw_wmm_enable, int, 0644); module_param(rtw_vrtl_carrier_sense, int, 0644); module_param(rtw_vcs_type, int, 0644); module_param(rtw_busy_thresh, int, 0644); module_param(rtw_ht_enable23A, int, 0644); module_param(rtw_cbw40_enable23A, int, 0644); module_param(rtw_ampdu_enable23A, int, 0644); module_param(rtw_rx_stbc, int, 0644); module_param(rtw_ampdu_amsdu, int, 0644); module_param(rtw_lowrate_two_xmit, int, 0644); module_param(rtw_rf_config, int, 0644); module_param(rtw_power_mgnt, int, 0644); module_param(rtw_smart_ps, int, 0644); module_param(rtw_low_power, int, 0644); module_param(rtw_wifi_spec, int, 0644); module_param(rtw_antdiv_cfg, int, 0644); module_param(rtw_enusbss, int, 0644); module_param(rtw_hwpdn_mode, int, 0644); module_param(rtw_hwpwrp_detect, int, 0644); module_param(rtw_hw_wps_pbc, int, 0644); static uint rtw_max_roaming_times = 2; module_param(rtw_max_roaming_times, uint, 0644); MODULE_PARM_DESC(rtw_max_roaming_times, "The max roaming times to try"); module_param(rtw_80211d, int, 0644); MODULE_PARM_DESC(rtw_80211d, "Enable 802.11d mechanism"); #ifdef CONFIG_8723AU_BT_COEXIST module_param(rtw_btcoex_enable, int, 0644); MODULE_PARM_DESC(rtw_btcoex_enable, "Enable BT co-existence mechanism"); #endif static uint rtw_notch_filter; module_param(rtw_notch_filter, uint, 0644); MODULE_PARM_DESC(rtw_notch_filter, "0:Disable, 1:Enable, 2:Enable only for P2P"); module_param_named(debug, rtw_debug, int, 0444); MODULE_PARM_DESC(debug, "Set debug level (1-9) (default 1)"); static int netdev_close(struct net_device *pnetdev); static uint loadparam(struct rtw_adapter *padapter, struct net_device *pnetdev) { struct registry_priv *registry_par = &padapter->registrypriv; uint status = _SUCCESS; GlobalDebugLevel23A = rtw_debug; registry_par->chip_version = (u8)rtw_chip_version; registry_par->rfintfs = (u8)rtw_rfintfs; memcpy(registry_par->ssid.ssid, "ANY", 3); registry_par->ssid.ssid_len = 3; registry_par->channel = (u8)rtw_channel; registry_par->wireless_mode = (u8)rtw_wireless_mode; registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense; registry_par->vcs_type = (u8)rtw_vcs_type; registry_par->rts_thresh = (u16)rtw_rts_thresh; registry_par->frag_thresh = (u16)rtw_frag_thresh; registry_par->preamble = (u8)rtw_preamble; registry_par->scan_mode = (u8)rtw_scan_mode; registry_par->adhoc_tx_pwr = (u8)rtw_adhoc_tx_pwr; registry_par->soft_ap = (u8)rtw_soft_ap; registry_par->smart_ps = (u8)rtw_smart_ps; registry_par->power_mgnt = (u8)rtw_power_mgnt; registry_par->ips_mode = (u8)rtw_ips_mode; registry_par->long_retry_lmt = (u8)rtw_long_retry_lmt; registry_par->short_retry_lmt = (u8)rtw_short_retry_lmt; registry_par->busy_thresh = (u16)rtw_busy_thresh; registry_par->ack_policy = (u8)rtw_ack_policy; registry_par->acm_method = (u8)rtw_acm_method; /* UAPSD */ registry_par->wmm_enable = (u8)rtw_wmm_enable; registry_par->uapsd_enable = (u8)rtw_uapsd_enable; registry_par->ht_enable = (u8)rtw_ht_enable23A; registry_par->cbw40_enable = (u8)rtw_cbw40_enable23A; registry_par->ampdu_enable = (u8)rtw_ampdu_enable23A; registry_par->rx_stbc = (u8)rtw_rx_stbc; registry_par->ampdu_amsdu = (u8)rtw_ampdu_amsdu; registry_par->lowrate_two_xmit = (u8)rtw_lowrate_two_xmit; registry_par->rf_config = (u8)rtw_rf_config; registry_par->low_power = (u8)rtw_low_power; registry_par->wifi_spec = (u8)rtw_wifi_spec; registry_par->channel_plan = (u8)rtw_channel_plan; #ifdef CONFIG_8723AU_BT_COEXIST registry_par->btcoex = (u8)rtw_btcoex_enable; registry_par->bt_iso = (u8)rtw_bt_iso; registry_par->bt_sco = (u8)rtw_bt_sco; registry_par->bt_ampdu = (u8)rtw_bt_ampdu; #endif registry_par->bAcceptAddbaReq = (u8)rtw_AcceptAddbaReq; registry_par->antdiv_cfg = (u8)rtw_antdiv_cfg; registry_par->antdiv_type = (u8)rtw_antdiv_type; /* 0:disable, 1:enable, 2:by EFUSE config */ registry_par->hwpdn_mode = (u8)rtw_hwpdn_mode; /* 0:disable, 1:enable */ registry_par->hwpwrp_detect = (u8)rtw_hwpwrp_detect; registry_par->hw_wps_pbc = (u8)rtw_hw_wps_pbc; registry_par->max_roaming_times = (u8)rtw_max_roaming_times; registry_par->enable80211d = (u8)rtw_80211d; snprintf(registry_par->ifname, 16, "%s", ifname); snprintf(registry_par->if2name, 16, "%s", if2name); registry_par->notch_filter = (u8)rtw_notch_filter; registry_par->regulatory_tid = (u8)rtw_regulatory_id; return status; } static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p) { struct rtw_adapter *padapter = netdev_priv(pnetdev); struct sockaddr *addr = p; if (!padapter->bup) ether_addr_copy(padapter->eeprompriv.mac_addr, addr->sa_data); return 0; } static struct net_device_stats *rtw_net_get_stats(struct net_device *pnetdev) { struct rtw_adapter *padapter = netdev_priv(pnetdev); struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct recv_priv *precvpriv = &padapter->recvpriv; padapter->stats.tx_packets = pxmitpriv->tx_pkts; padapter->stats.rx_packets = precvpriv->rx_pkts; padapter->stats.tx_dropped = pxmitpriv->tx_drop; padapter->stats.rx_dropped = precvpriv->rx_drop; padapter->stats.tx_bytes = pxmitpriv->tx_bytes; padapter->stats.rx_bytes = precvpriv->rx_bytes; return &padapter->stats; } /* * AC to queue mapping * * AC_VO -> queue 0 * AC_VI -> queue 1 * AC_BE -> queue 2 * AC_BK -> queue 3 */ static const u16 rtw_1d_to_queue[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; /* Given a data frame determine the 802.1p/1d tag to use. */ static unsigned int rtw_classify8021d(struct sk_buff *skb) { unsigned int dscp; /* skb->priority values from 256->263 are magic values to * directly indicate a specific 802.1d priority. This is used * to allow 802.1d priority to be passed directly in from VLAN * tags, etc. */ if (skb->priority >= 256 && skb->priority <= 263) return skb->priority - 256; switch (skb->protocol) { case htons(ETH_P_IP): dscp = ip_hdr(skb)->tos & 0xfc; break; default: return 0; } return dscp >> 5; } static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { struct rtw_adapter *padapter = netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; skb->priority = rtw_classify8021d(skb); if (pmlmepriv->acm_mask != 0) skb->priority = qos_acm23a(pmlmepriv->acm_mask, skb->priority); return rtw_1d_to_queue[skb->priority]; } u16 rtw_recv_select_queue23a(struct sk_buff *skb) { struct iphdr *piphdr; unsigned int dscp; u16 eth_type; u32 priority; u8 *pdata = skb->data; memcpy(&eth_type, pdata + (ETH_ALEN << 1), 2); switch (eth_type) { case htons(ETH_P_IP): piphdr = (struct iphdr *)(pdata + ETH_HLEN); dscp = piphdr->tos & 0xfc; priority = dscp >> 5; break; default: priority = 0; } return rtw_1d_to_queue[priority]; } static const struct net_device_ops rtw_netdev_ops = { .ndo_open = netdev_open23a, .ndo_stop = netdev_close, .ndo_start_xmit = rtw_xmit23a_entry23a, .ndo_select_queue = rtw_select_queue, .ndo_set_mac_address = rtw_net_set_mac_address, .ndo_get_stats = rtw_net_get_stats, }; int rtw_init_netdev23a_name23a(struct net_device *pnetdev, const char *ifname) { if (dev_alloc_name(pnetdev, ifname) < 0) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("dev_alloc_name, fail!\n")); } netif_carrier_off(pnetdev); return 0; } static const struct device_type wlan_type = { .name = "wlan", }; struct net_device *rtw_init_netdev23a(struct rtw_adapter *old_padapter) { struct rtw_adapter *padapter; struct net_device *pnetdev; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+init_net_dev\n")); pnetdev = alloc_etherdev_mq(sizeof(struct rtw_adapter), 4); if (!pnetdev) return NULL; pnetdev->dev.type = &wlan_type; padapter = netdev_priv(pnetdev); padapter->pnetdev = pnetdev; DBG_8723A("register rtw_netdev_ops to netdev_ops\n"); pnetdev->netdev_ops = &rtw_netdev_ops; pnetdev->watchdog_timeo = HZ*3; /* 3 second timeout */ /* step 2. */ loadparam(padapter, pnetdev); return pnetdev; } u32 rtw_start_drv_threads23a(struct rtw_adapter *padapter) { u32 _status = _SUCCESS; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_start_drv_threads23a\n")); padapter->cmdThread = kthread_run(rtw_cmd_thread23a, padapter, "RTW_CMD_THREAD"); if (IS_ERR(padapter->cmdThread)) { _status = _FAIL; } else { /* wait for cmd_thread to run */ down(&padapter->cmdpriv.terminate_cmdthread_sema); } rtw_hal_start_thread23a(padapter); return _status; } void rtw_stop_drv_threads23a(struct rtw_adapter *padapter) { RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_stop_drv_threads23a\n")); /* Below is to termindate rtw_cmd_thread23a & event_thread... */ up(&padapter->cmdpriv.cmd_queue_sema); if (padapter->cmdThread) down(&padapter->cmdpriv.terminate_cmdthread_sema); rtw_hal_stop_thread23a(padapter); } static u8 rtw_init_default_value(struct rtw_adapter *padapter) { struct registry_priv *pregistrypriv = &padapter->registrypriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct security_priv *psecuritypriv = &padapter->securitypriv; u8 ret = _SUCCESS; /* xmit_priv */ pxmitpriv->vcs_setting = pregistrypriv->vrtl_carrier_sense; pxmitpriv->vcs = pregistrypriv->vcs_type; pxmitpriv->vcs_type = pregistrypriv->vcs_type; /* pxmitpriv->rts_thresh = pregistrypriv->rts_thresh; */ pxmitpriv->frag_len = pregistrypriv->frag_thresh; /* mlme_priv */ pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */ pmlmepriv->scan_mode = SCAN_ACTIVE; /* ht_priv */ pmlmepriv->htpriv.ampdu_enable = false;/* set to disabled */ /* security_priv */ psecuritypriv->binstallGrpkey = _FAIL; /* open system */ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_; psecuritypriv->dot11PrivacyKeyIndex = 0; psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_; psecuritypriv->dot118021XGrpKeyid = 1; psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen; psecuritypriv->ndisencryptstatus = Ndis802_11WEPDisabled; /* registry_priv */ rtw_init_registrypriv_dev_network23a(padapter); rtw_update_registrypriv_dev_network23a(padapter); /* hal_priv */ rtw_hal_def_value_init23a(padapter); /* misc. */ padapter->bReadPortCancel = false; padapter->bWritePortCancel = false; padapter->bRxRSSIDisplay = 0; padapter->bNotifyChannelChange = 0; #ifdef CONFIG_8723AU_P2P padapter->bShowGetP2PState = 1; #endif return ret; } u8 rtw_reset_drv_sw23a(struct rtw_adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv; u8 ret8 = _SUCCESS; /* hal_priv */ rtw_hal_def_value_init23a(padapter); padapter->bReadPortCancel = false; padapter->bWritePortCancel = false; padapter->bRxRSSIDisplay = 0; pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */ padapter->xmitpriv.tx_pkts = 0; padapter->recvpriv.rx_pkts = 0; pmlmepriv->LinkDetectInfo.bBusyTraffic = false; _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING); rtw_hal_sreset_reset23a_value23a(padapter); pwrctrlpriv->pwr_state_check_cnts = 0; /* mlmeextpriv */ padapter->mlmeextpriv.sitesurvey_res.state = SCAN_DISABLE; rtw_set_signal_stat_timer(&padapter->recvpriv); return ret8; } u8 rtw_init_drv_sw23a(struct rtw_adapter *padapter) { u8 ret8 = _SUCCESS; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_init_drv_sw23a\n")); if ((rtw_init_cmd_priv23a(&padapter->cmdpriv)) == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init cmd_priv\n")); ret8 = _FAIL; goto exit; } padapter->cmdpriv.padapter = padapter; if (rtw_init_evt_priv23a(&padapter->evtpriv) == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init evt_priv\n")); ret8 = _FAIL; goto exit; } if (rtw_init_mlme_priv23a(padapter) == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init mlme_priv\n")); ret8 = _FAIL; goto exit; } #ifdef CONFIG_8723AU_P2P rtw_init_wifidirect_timers23a(padapter); init_wifidirect_info23a(padapter, P2P_ROLE_DISABLE); reset_global_wifidirect_info23a(padapter); rtw_init_cfg80211_wifidirect_info(padapter); #ifdef CONFIG_8723AU_P2P if (rtw_init_wifi_display_info(padapter) == _FAIL) RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init init_wifi_display_info\n")); #endif #endif /* CONFIG_8723AU_P2P */ if (init_mlme_ext_priv23a(padapter) == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init mlme_ext_priv\n")); ret8 = _FAIL; goto exit; } if (_rtw_init_xmit_priv23a(&padapter->xmitpriv, padapter) == _FAIL) { DBG_8723A("Can't _rtw_init_xmit_priv23a\n"); ret8 = _FAIL; goto exit; } if (_rtw_init_recv_priv23a(&padapter->recvpriv, padapter) == _FAIL) { DBG_8723A("Can't _rtw_init_recv_priv23a\n"); ret8 = _FAIL; goto exit; } if (_rtw_init_sta_priv23a(&padapter->stapriv) == _FAIL) { DBG_8723A("Can't _rtw_init_sta_priv23a\n"); ret8 = _FAIL; goto exit; } padapter->stapriv.padapter = padapter; padapter->setband = GHZ24_50; rtw_init_bcmc_stainfo23a(padapter); rtw_init_pwrctrl_priv23a(padapter); ret8 = rtw_init_default_value(padapter); rtw_hal_dm_init23a(padapter); rtw_hal_sw_led_init23a(padapter); rtw_hal_sreset_init23a(padapter); exit: RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw23a\n")); return ret8; } void rtw_cancel_all_timer23a(struct rtw_adapter *padapter) { RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_cancel_all_timer23a\n")); del_timer_sync(&padapter->mlmepriv.assoc_timer); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer23a:cancel association timer complete!\n")); del_timer_sync(&padapter->mlmepriv.scan_to_timer); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer23a:cancel scan_to_timer!\n")); del_timer_sync(&padapter->mlmepriv.dynamic_chk_timer); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer23a:cancel dynamic_chk_timer!\n")); /* cancel sw led timer */ rtw_hal_sw_led_deinit23a(padapter); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer23a:cancel DeInitSwLeds!\n")); del_timer_sync(&padapter->pwrctrlpriv.pwr_state_check_timer); #ifdef CONFIG_8723AU_P2P del_timer_sync(&padapter->cfg80211_wdinfo.remain_on_ch_timer); #endif /* CONFIG_8723AU_P2P */ del_timer_sync(&padapter->mlmepriv.set_scan_deny_timer); rtw_clear_scan_deny(padapter); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer23a:cancel set_scan_deny_timer!\n")); del_timer_sync(&padapter->recvpriv.signal_stat_timer); /* cancel dm timer */ rtw_hal_dm_deinit23a(padapter); } u8 rtw_free_drv_sw23a(struct rtw_adapter *padapter) { #ifdef CONFIG_8723AU_P2P struct wifidirect_info *pwdinfo; #endif RT_TRACE(_module_os_intfs_c_, _drv_info_, ("==>rtw_free_drv_sw23a")); /* we can call rtw_p2p_enable23a here, but: * 1. rtw_p2p_enable23a may have IO operation * 2. rtw_p2p_enable23a is bundled with wext interface */ #ifdef CONFIG_8723AU_P2P pwdinfo = &padapter->wdinfo; if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) { del_timer_sync(&pwdinfo->find_phase_timer); del_timer_sync(&pwdinfo->restore_p2p_state_timer); del_timer_sync(&pwdinfo->pre_tx_scan_timer); rtw_p2p_set_state(pwdinfo, P2P_STATE_NONE); } #endif free_mlme_ext_priv23a(&padapter->mlmeextpriv); rtw_free_cmd_priv23a(&padapter->cmdpriv); rtw_free_evt_priv23a(&padapter->evtpriv); rtw_free_mlme_priv23a(&padapter->mlmepriv); _rtw_free_xmit_priv23a(&padapter->xmitpriv); _rtw_free_sta_priv23a(&padapter->stapriv);/* will free bcmc_stainfo here */ _rtw_free_recv_priv23a(&padapter->recvpriv); rtw_free_pwrctrl_priv(padapter); rtw_hal_free_data23a(padapter); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("<== rtw_free_drv_sw23a\n")); /* free the old_pnetdev */ if (padapter->rereg_nd_name_priv.old_pnetdev) { free_netdev(padapter->rereg_nd_name_priv.old_pnetdev); padapter->rereg_nd_name_priv.old_pnetdev = NULL; } /* clear pbuddy_adapter to avoid access wrong pointer. */ if (padapter->pbuddy_adapter != NULL) padapter->pbuddy_adapter->pbuddy_adapter = NULL; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_free_drv_sw23a\n")); return _SUCCESS; } static int _rtw_drv_register_netdev(struct rtw_adapter *padapter, char *name) { struct net_device *pnetdev = padapter->pnetdev; int ret = _SUCCESS; /* alloc netdev name */ rtw_init_netdev23a_name23a(pnetdev, name); ether_addr_copy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr); /* Tell the network stack we exist */ if (register_netdev(pnetdev)) { DBG_8723A(FUNC_NDEV_FMT "Failed!\n", FUNC_NDEV_ARG(pnetdev)); ret = _FAIL; goto error_register_netdev; } DBG_8723A("%s, MAC Address (if%d) = " MAC_FMT "\n", __func__, (padapter->iface_id + 1), MAC_ARG(pnetdev->dev_addr)); return ret; error_register_netdev: if (padapter->iface_id > IFACE_ID0) { rtw_free_drv_sw23a(padapter); free_netdev(pnetdev); } return ret; } int rtw_drv_register_netdev(struct rtw_adapter *if1) { struct dvobj_priv *dvobj = if1->dvobj; int i, status = _SUCCESS; if (dvobj->iface_nums < IFACE_ID_MAX) { for (i = 0; i < dvobj->iface_nums; i++) { struct rtw_adapter *padapter = dvobj->padapters[i]; if (padapter) { char *name; if (padapter->iface_id == IFACE_ID0) name = if1->registrypriv.ifname; else if (padapter->iface_id == IFACE_ID1) name = if1->registrypriv.if2name; else name = "wlan%d"; status = _rtw_drv_register_netdev(padapter, name); if (status != _SUCCESS) break; } } } return status; } int netdev_open23a(struct net_device *pnetdev) { struct rtw_adapter *padapter = netdev_priv(pnetdev); struct pwrctrl_priv *pwrctrlpriv; int ret = 0; uint status; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+871x_drv - dev_open\n")); DBG_8723A("+871x_drv - drv_open, bup =%d\n", padapter->bup); mutex_lock(&adapter_to_dvobj(padapter)->hw_init_mutex); pwrctrlpriv = &padapter->pwrctrlpriv; if (pwrctrlpriv->ps_flag) { padapter->net_closed = false; goto netdev_open23a_normal_process; } if (!padapter->bup) { padapter->bDriverStopped = false; padapter->bSurpriseRemoved = false; padapter->bCardDisableWOHSM = false; status = rtw_hal_init23a(padapter); if (status == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("rtl871x_hal_init(): Can't init h/w!\n")); goto netdev_open23a_error; } DBG_8723A("MAC Address = "MAC_FMT"\n", MAC_ARG(pnetdev->dev_addr)); status = rtw_start_drv_threads23a(padapter); if (status == _FAIL) { DBG_8723A("Initialize driver software resource Failed!\n"); goto netdev_open23a_error; } if (init_hw_mlme_ext23a(padapter) == _FAIL) { DBG_8723A("can't init mlme_ext_priv\n"); goto netdev_open23a_error; } if (padapter->intf_start) padapter->intf_start(padapter); rtw_cfg80211_init_wiphy(padapter); rtw_led_control(padapter, LED_CTL_NO_LINK); padapter->bup = true; } padapter->net_closed = false; mod_timer(&padapter->mlmepriv.dynamic_chk_timer, jiffies + msecs_to_jiffies(2000)); padapter->pwrctrlpriv.bips_processing = false; rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv); /* netif_carrier_on(pnetdev);call this func when rtw23a_joinbss_event_cb return success */ if (!rtw_netif_queue_stopped(pnetdev)) netif_tx_start_all_queues(pnetdev); else netif_tx_wake_all_queues(pnetdev); netdev_open23a_normal_process: RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - dev_open\n")); DBG_8723A("-871x_drv - drv_open, bup =%d\n", padapter->bup); exit: mutex_unlock(&adapter_to_dvobj(padapter)->hw_init_mutex); return ret; netdev_open23a_error: padapter->bup = false; netif_carrier_off(pnetdev); netif_tx_stop_all_queues(pnetdev); RT_TRACE(_module_os_intfs_c_, _drv_err_, ("-871x_drv - dev_open, fail!\n")); DBG_8723A("-871x_drv - drv_open fail, bup =%d\n", padapter->bup); ret = -1; goto exit; } static int ips_netdrv_open(struct rtw_adapter *padapter) { int status = _SUCCESS; padapter->net_closed = false; DBG_8723A("===> %s.........\n", __func__); padapter->bDriverStopped = false; padapter->bSurpriseRemoved = false; padapter->bCardDisableWOHSM = false; status = rtw_hal_init23a(padapter); if (status == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("ips_netdrv_open(): Can't init h/w!\n")); goto netdev_open23a_error; } if (padapter->intf_start) padapter->intf_start(padapter); rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv); mod_timer(&padapter->mlmepriv.dynamic_chk_timer, jiffies + msecs_to_jiffies(5000)); return _SUCCESS; netdev_open23a_error: /* padapter->bup = false; */ DBG_8723A("-ips_netdrv_open - drv_open failure, bup =%d\n", padapter->bup); return _FAIL; } int rtw_ips_pwr_up23a(struct rtw_adapter *padapter) { int result; unsigned long start_time = jiffies; DBG_8723A("===> rtw_ips_pwr_up23a..............\n"); rtw_reset_drv_sw23a(padapter); result = ips_netdrv_open(padapter); rtw_led_control(padapter, LED_CTL_NO_LINK); DBG_8723A("<=== rtw_ips_pwr_up23a.............. in %dms\n", jiffies_to_msecs(jiffies - start_time)); return result; } void rtw_ips_pwr_down23a(struct rtw_adapter *padapter) { unsigned long start_time = jiffies; DBG_8723A("===> rtw_ips_pwr_down23a...................\n"); padapter->bCardDisableWOHSM = true; padapter->net_closed = true; rtw_led_control(padapter, LED_CTL_POWER_OFF); rtw_ips_dev_unload23a(padapter); padapter->bCardDisableWOHSM = false; DBG_8723A("<=== rtw_ips_pwr_down23a..................... in %dms\n", jiffies_to_msecs(jiffies - start_time)); } void rtw_ips_dev_unload23a(struct rtw_adapter *padapter) { rtw_hal_set_hwreg23a(padapter, HW_VAR_FIFO_CLEARN_UP, NULL); if (padapter->intf_stop) padapter->intf_stop(padapter); /* s5. */ if (!padapter->bSurpriseRemoved) rtw_hal_deinit23a(padapter); } int pm_netdev_open23a(struct net_device *pnetdev, u8 bnormal) { int status; if (bnormal) status = netdev_open23a(pnetdev); else status = (_SUCCESS == ips_netdrv_open(netdev_priv(pnetdev))) ? (0) : (-1); return status; } static int netdev_close(struct net_device *pnetdev) { struct rtw_adapter *padapter = netdev_priv(pnetdev); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+871x_drv - drv_close\n")); if (padapter->pwrctrlpriv.bInternalAutoSuspend) { if (padapter->pwrctrlpriv.rf_pwrstate == rf_off) padapter->pwrctrlpriv.ps_flag = true; } padapter->net_closed = true; if (padapter->pwrctrlpriv.rf_pwrstate == rf_on) { DBG_8723A("(2)871x_drv - drv_close, bup =%d, hw_init_completed =%d\n", padapter->bup, padapter->hw_init_completed); /* s1. */ if (pnetdev) { if (!rtw_netif_queue_stopped(pnetdev)) netif_tx_stop_all_queues(pnetdev); } /* s2. */ LeaveAllPowerSaveMode23a(padapter); rtw_disassoc_cmd23a(padapter, 500, false); /* s2-2. indicate disconnect to os */ rtw_indicate_disconnect23a(padapter); /* s2-3. */ rtw_free_assoc_resources23a(padapter, 1); /* s2-4. */ rtw_free_network_queue23a(padapter, true); /* Close LED */ rtw_led_control(padapter, LED_CTL_POWER_OFF); } #ifdef CONFIG_8723AU_P2P if (wdev_to_priv(padapter->rtw_wdev)->p2p_enabled) wdev_to_priv(padapter->rtw_wdev)->p2p_enabled = false; rtw_p2p_enable23a(padapter, P2P_ROLE_DISABLE); #endif /* CONFIG_8723AU_P2P */ rtw_scan_abort23a(padapter); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n")); DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup); return 0; } void rtw_ndev_destructor(struct net_device *ndev) { DBG_8723A(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev)); kfree(ndev->ieee80211_ptr); free_netdev(ndev); }
gpl-2.0
fengli/gcc
libgfortran/generated/rrspacing_r16.c
26
1865
/* Implementation of the RRSPACING intrinsic Copyright 2006, 2007, 2009, 2010 Free Software Foundation, Inc. Contributed by Steven G. Kargl <kargl@gcc.gnu.org> This file is part of the GNU Fortran 95 runtime library (libgfortran). Libgfortran is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. Libgfortran is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "libgfortran.h" #if defined(GFC_REAL_16_IS_FLOAT128) #define MATHFUNC(funcname) funcname ## q #else #define MATHFUNC(funcname) funcname ## l #endif #if defined (HAVE_GFC_REAL_16) && (defined(GFC_REAL_16_IS_FLOAT128) || defined(HAVE_FABSL)) && (defined(GFC_REAL_16_IS_FLOAT128) || defined(HAVE_FREXPL)) extern GFC_REAL_16 rrspacing_r16 (GFC_REAL_16 s, int p); export_proto(rrspacing_r16); GFC_REAL_16 rrspacing_r16 (GFC_REAL_16 s, int p) { int e; GFC_REAL_16 x; x = MATHFUNC(fabs) (s); if (x == 0.) return 0.; MATHFUNC(frexp) (s, &e); #if (defined(GFC_REAL_16_IS_FLOAT128) || defined(HAVE_LDEXPL)) return MATHFUNC(ldexp) (x, p - e); #else return MATHFUNC(scalbn) (x, p - e); #endif } #endif
gpl-2.0
jpabferreira/linux-pcsws
drivers/media/usb/usbvision/usbvision-core.c
282
75402
/* * usbvision-core.c - driver for NT100x USB video capture devices * * * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de> * Dwaine Garden <dwainegarden@rogers.com> * * This module is part of usbvision driver project. * Updates to driver completed by Dwaine P. Garden * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/videodev2.h> #include <linux/i2c.h> #include <media/saa7115.h> #include <media/v4l2-common.h> #include <media/tuner.h> #include <linux/workqueue.h> #include "usbvision.h" static unsigned int core_debug; module_param(core_debug, int, 0644); MODULE_PARM_DESC(core_debug, "enable debug messages [core]"); static int adjust_compression = 1; /* Set the compression to be adaptive */ module_param(adjust_compression, int, 0444); MODULE_PARM_DESC(adjust_compression, " Set the ADPCM compression for the device. Default: 1 (On)"); /* To help people with Black and White output with using s-video input. * Some cables and input device are wired differently. */ static int switch_svideo_input; module_param(switch_svideo_input, int, 0444); MODULE_PARM_DESC(switch_svideo_input, " Set the S-Video input. Some cables and input device are wired differently. Default: 0 (Off)"); static unsigned int adjust_x_offset = -1; module_param(adjust_x_offset, int, 0644); MODULE_PARM_DESC(adjust_x_offset, "adjust X offset display [core]"); static unsigned int adjust_y_offset = -1; module_param(adjust_y_offset, int, 0644); MODULE_PARM_DESC(adjust_y_offset, "adjust Y offset display [core]"); #define ENABLE_HEXDUMP 0 /* Enable if you need it */ #ifdef USBVISION_DEBUG #define PDEBUG(level, fmt, args...) { \ if (core_debug & (level)) \ printk(KERN_INFO KBUILD_MODNAME ":[%s:%d] " fmt, \ __func__, __LINE__ , ## args); \ } #else #define PDEBUG(level, fmt, args...) do {} while (0) #endif #define DBG_HEADER (1 << 0) #define DBG_IRQ (1 << 1) #define DBG_ISOC (1 << 2) #define DBG_PARSE (1 << 3) #define DBG_SCRATCH (1 << 4) #define DBG_FUNC (1 << 5) static const int max_imgwidth = MAX_FRAME_WIDTH; static const int max_imgheight = MAX_FRAME_HEIGHT; static const int min_imgwidth = MIN_FRAME_WIDTH; static const int min_imgheight = MIN_FRAME_HEIGHT; /* The value of 'scratch_buf_size' affects quality of the picture * in many ways. Shorter buffers may cause loss of data when client * is too slow. Larger buffers are memory-consuming and take longer * to work with. This setting can be adjusted, but the default value * should be OK for most desktop users. */ #define DEFAULT_SCRATCH_BUF_SIZE (0x20000) /* 128kB memory scratch buffer */ static const int scratch_buf_size = DEFAULT_SCRATCH_BUF_SIZE; /* Function prototypes */ static int usbvision_request_intra(struct usb_usbvision *usbvision); static int usbvision_unrequest_intra(struct usb_usbvision *usbvision); static int usbvision_adjust_compression(struct usb_usbvision *usbvision); static int usbvision_measure_bandwidth(struct usb_usbvision *usbvision); /*******************************/ /* Memory management functions */ /*******************************/ /* * Here we want the physical address of the memory. * This is used when initializing the contents of the area. */ static void *usbvision_rvmalloc(unsigned long size) { void *mem; unsigned long adr; size = PAGE_ALIGN(size); mem = vmalloc_32(size); if (!mem) return NULL; memset(mem, 0, size); /* Clear the ram out, no junk to the user */ adr = (unsigned long) mem; while (size > 0) { SetPageReserved(vmalloc_to_page((void *)adr)); adr += PAGE_SIZE; size -= PAGE_SIZE; } return mem; } static void usbvision_rvfree(void *mem, unsigned long size) { unsigned long adr; if (!mem) return; size = PAGE_ALIGN(size); adr = (unsigned long) mem; while ((long) size > 0) { ClearPageReserved(vmalloc_to_page((void *)adr)); adr += PAGE_SIZE; size -= PAGE_SIZE; } vfree(mem); } #if ENABLE_HEXDUMP static void usbvision_hexdump(const unsigned char *data, int len) { char tmp[80]; int i, k; for (i = k = 0; len > 0; i++, len--) { if (i > 0 && (i % 16 == 0)) { printk("%s\n", tmp); k = 0; } k += sprintf(&tmp[k], "%02x ", data[i]); } if (k > 0) printk(KERN_CONT "%s\n", tmp); } #endif /******************************** * scratch ring buffer handling ********************************/ static int scratch_len(struct usb_usbvision *usbvision) /* This returns the amount of data actually in the buffer */ { int len = usbvision->scratch_write_ptr - usbvision->scratch_read_ptr; if (len < 0) len += scratch_buf_size; PDEBUG(DBG_SCRATCH, "scratch_len() = %d\n", len); return len; } /* This returns the free space left in the buffer */ static int scratch_free(struct usb_usbvision *usbvision) { int free = usbvision->scratch_read_ptr - usbvision->scratch_write_ptr; if (free <= 0) free += scratch_buf_size; if (free) { free -= 1; /* at least one byte in the buffer must */ /* left blank, otherwise there is no chance to differ between full and empty */ } PDEBUG(DBG_SCRATCH, "return %d\n", free); return free; } /* This puts data into the buffer */ static int scratch_put(struct usb_usbvision *usbvision, unsigned char *data, int len) { int len_part; if (usbvision->scratch_write_ptr + len < scratch_buf_size) { memcpy(usbvision->scratch + usbvision->scratch_write_ptr, data, len); usbvision->scratch_write_ptr += len; } else { len_part = scratch_buf_size - usbvision->scratch_write_ptr; memcpy(usbvision->scratch + usbvision->scratch_write_ptr, data, len_part); if (len == len_part) { usbvision->scratch_write_ptr = 0; /* just set write_ptr to zero */ } else { memcpy(usbvision->scratch, data + len_part, len - len_part); usbvision->scratch_write_ptr = len - len_part; } } PDEBUG(DBG_SCRATCH, "len=%d, new write_ptr=%d\n", len, usbvision->scratch_write_ptr); return len; } /* This marks the write_ptr as position of new frame header */ static void scratch_mark_header(struct usb_usbvision *usbvision) { PDEBUG(DBG_SCRATCH, "header at write_ptr=%d\n", usbvision->scratch_headermarker_write_ptr); usbvision->scratch_headermarker[usbvision->scratch_headermarker_write_ptr] = usbvision->scratch_write_ptr; usbvision->scratch_headermarker_write_ptr += 1; usbvision->scratch_headermarker_write_ptr %= USBVISION_NUM_HEADERMARKER; } /* This gets data from the buffer at the given "ptr" position */ static int scratch_get_extra(struct usb_usbvision *usbvision, unsigned char *data, int *ptr, int len) { int len_part; if (*ptr + len < scratch_buf_size) { memcpy(data, usbvision->scratch + *ptr, len); *ptr += len; } else { len_part = scratch_buf_size - *ptr; memcpy(data, usbvision->scratch + *ptr, len_part); if (len == len_part) { *ptr = 0; /* just set the y_ptr to zero */ } else { memcpy(data + len_part, usbvision->scratch, len - len_part); *ptr = len - len_part; } } PDEBUG(DBG_SCRATCH, "len=%d, new ptr=%d\n", len, *ptr); return len; } /* This sets the scratch extra read pointer */ static void scratch_set_extra_ptr(struct usb_usbvision *usbvision, int *ptr, int len) { *ptr = (usbvision->scratch_read_ptr + len) % scratch_buf_size; PDEBUG(DBG_SCRATCH, "ptr=%d\n", *ptr); } /* This increments the scratch extra read pointer */ static void scratch_inc_extra_ptr(int *ptr, int len) { *ptr = (*ptr + len) % scratch_buf_size; PDEBUG(DBG_SCRATCH, "ptr=%d\n", *ptr); } /* This gets data from the buffer */ static int scratch_get(struct usb_usbvision *usbvision, unsigned char *data, int len) { int len_part; if (usbvision->scratch_read_ptr + len < scratch_buf_size) { memcpy(data, usbvision->scratch + usbvision->scratch_read_ptr, len); usbvision->scratch_read_ptr += len; } else { len_part = scratch_buf_size - usbvision->scratch_read_ptr; memcpy(data, usbvision->scratch + usbvision->scratch_read_ptr, len_part); if (len == len_part) { usbvision->scratch_read_ptr = 0; /* just set the read_ptr to zero */ } else { memcpy(data + len_part, usbvision->scratch, len - len_part); usbvision->scratch_read_ptr = len - len_part; } } PDEBUG(DBG_SCRATCH, "len=%d, new read_ptr=%d\n", len, usbvision->scratch_read_ptr); return len; } /* This sets read pointer to next header and returns it */ static int scratch_get_header(struct usb_usbvision *usbvision, struct usbvision_frame_header *header) { int err_code = 0; PDEBUG(DBG_SCRATCH, "from read_ptr=%d", usbvision->scratch_headermarker_read_ptr); while (usbvision->scratch_headermarker_write_ptr - usbvision->scratch_headermarker_read_ptr != 0) { usbvision->scratch_read_ptr = usbvision->scratch_headermarker[usbvision->scratch_headermarker_read_ptr]; usbvision->scratch_headermarker_read_ptr += 1; usbvision->scratch_headermarker_read_ptr %= USBVISION_NUM_HEADERMARKER; scratch_get(usbvision, (unsigned char *)header, USBVISION_HEADER_LENGTH); if ((header->magic_1 == USBVISION_MAGIC_1) && (header->magic_2 == USBVISION_MAGIC_2) && (header->header_length == USBVISION_HEADER_LENGTH)) { err_code = USBVISION_HEADER_LENGTH; header->frame_width = header->frame_width_lo + (header->frame_width_hi << 8); header->frame_height = header->frame_height_lo + (header->frame_height_hi << 8); break; } } return err_code; } /* This removes len bytes of old data from the buffer */ static void scratch_rm_old(struct usb_usbvision *usbvision, int len) { usbvision->scratch_read_ptr += len; usbvision->scratch_read_ptr %= scratch_buf_size; PDEBUG(DBG_SCRATCH, "read_ptr is now %d\n", usbvision->scratch_read_ptr); } /* This resets the buffer - kills all data in it too */ static void scratch_reset(struct usb_usbvision *usbvision) { PDEBUG(DBG_SCRATCH, "\n"); usbvision->scratch_read_ptr = 0; usbvision->scratch_write_ptr = 0; usbvision->scratch_headermarker_read_ptr = 0; usbvision->scratch_headermarker_write_ptr = 0; usbvision->isocstate = isoc_state_no_frame; } int usbvision_scratch_alloc(struct usb_usbvision *usbvision) { usbvision->scratch = vmalloc_32(scratch_buf_size); scratch_reset(usbvision); if (usbvision->scratch == NULL) { dev_err(&usbvision->dev->dev, "%s: unable to allocate %d bytes for scratch\n", __func__, scratch_buf_size); return -ENOMEM; } return 0; } void usbvision_scratch_free(struct usb_usbvision *usbvision) { vfree(usbvision->scratch); usbvision->scratch = NULL; } /* * usbvision_decompress_alloc() * * allocates intermediate buffer for decompression */ int usbvision_decompress_alloc(struct usb_usbvision *usbvision) { int IFB_size = MAX_FRAME_WIDTH * MAX_FRAME_HEIGHT * 3 / 2; usbvision->intra_frame_buffer = vmalloc_32(IFB_size); if (usbvision->intra_frame_buffer == NULL) { dev_err(&usbvision->dev->dev, "%s: unable to allocate %d for compr. frame buffer\n", __func__, IFB_size); return -ENOMEM; } return 0; } /* * usbvision_decompress_free() * * frees intermediate buffer for decompression */ void usbvision_decompress_free(struct usb_usbvision *usbvision) { vfree(usbvision->intra_frame_buffer); usbvision->intra_frame_buffer = NULL; } /************************************************************ * Here comes the data parsing stuff that is run as interrupt ************************************************************/ /* * usbvision_find_header() * * Locate one of supported header markers in the scratch buffer. */ static enum parse_state usbvision_find_header(struct usb_usbvision *usbvision) { struct usbvision_frame *frame; int found_header = 0; frame = usbvision->cur_frame; while (scratch_get_header(usbvision, &frame->isoc_header) == USBVISION_HEADER_LENGTH) { /* found header in scratch */ PDEBUG(DBG_HEADER, "found header: 0x%02x%02x %d %d %d %d %#x 0x%02x %u %u", frame->isoc_header.magic_2, frame->isoc_header.magic_1, frame->isoc_header.header_length, frame->isoc_header.frame_num, frame->isoc_header.frame_phase, frame->isoc_header.frame_latency, frame->isoc_header.data_format, frame->isoc_header.format_param, frame->isoc_header.frame_width, frame->isoc_header.frame_height); if (usbvision->request_intra) { if (frame->isoc_header.format_param & 0x80) { found_header = 1; usbvision->last_isoc_frame_num = -1; /* do not check for lost frames this time */ usbvision_unrequest_intra(usbvision); break; } } else { found_header = 1; break; } } if (found_header) { frame->frmwidth = frame->isoc_header.frame_width * usbvision->stretch_width; frame->frmheight = frame->isoc_header.frame_height * usbvision->stretch_height; frame->v4l2_linesize = (frame->frmwidth * frame->v4l2_format.depth) >> 3; } else { /* no header found */ PDEBUG(DBG_HEADER, "skipping scratch data, no header"); scratch_reset(usbvision); return parse_state_end_parse; } /* found header */ if (frame->isoc_header.data_format == ISOC_MODE_COMPRESS) { /* check isoc_header.frame_num for lost frames */ if (usbvision->last_isoc_frame_num >= 0) { if (((usbvision->last_isoc_frame_num + 1) % 32) != frame->isoc_header.frame_num) { /* unexpected frame drop: need to request new intra frame */ PDEBUG(DBG_HEADER, "Lost frame before %d on USB", frame->isoc_header.frame_num); usbvision_request_intra(usbvision); return parse_state_next_frame; } } usbvision->last_isoc_frame_num = frame->isoc_header.frame_num; } usbvision->header_count++; frame->scanstate = scan_state_lines; frame->curline = 0; return parse_state_continue; } static enum parse_state usbvision_parse_lines_422(struct usb_usbvision *usbvision, long *pcopylen) { volatile struct usbvision_frame *frame; unsigned char *f; int len; int i; unsigned char yuyv[4] = { 180, 128, 10, 128 }; /* YUV components */ unsigned char rv, gv, bv; /* RGB components */ int clipmask_index, bytes_per_pixel; int stretch_bytes, clipmask_add; frame = usbvision->cur_frame; f = frame->data + (frame->v4l2_linesize * frame->curline); /* Make sure there's enough data for the entire line */ len = (frame->isoc_header.frame_width * 2) + 5; if (scratch_len(usbvision) < len) { PDEBUG(DBG_PARSE, "out of data in line %d, need %u.\n", frame->curline, len); return parse_state_out; } if ((frame->curline + 1) >= frame->frmheight) return parse_state_next_frame; bytes_per_pixel = frame->v4l2_format.bytes_per_pixel; stretch_bytes = (usbvision->stretch_width - 1) * bytes_per_pixel; clipmask_index = frame->curline * MAX_FRAME_WIDTH; clipmask_add = usbvision->stretch_width; for (i = 0; i < frame->frmwidth; i += (2 * usbvision->stretch_width)) { scratch_get(usbvision, &yuyv[0], 4); if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { *f++ = yuyv[0]; /* Y */ *f++ = yuyv[3]; /* U */ } else { YUV_TO_RGB_BY_THE_BOOK(yuyv[0], yuyv[1], yuyv[3], rv, gv, bv); switch (frame->v4l2_format.format) { case V4L2_PIX_FMT_RGB565: *f++ = (0x1F & rv) | (0xE0 & (gv << 5)); *f++ = (0x07 & (gv >> 3)) | (0xF8 & bv); break; case V4L2_PIX_FMT_RGB24: *f++ = rv; *f++ = gv; *f++ = bv; break; case V4L2_PIX_FMT_RGB32: *f++ = rv; *f++ = gv; *f++ = bv; f++; break; case V4L2_PIX_FMT_RGB555: *f++ = (0x1F & rv) | (0xE0 & (gv << 5)); *f++ = (0x03 & (gv >> 3)) | (0x7C & (bv << 2)); break; } } clipmask_index += clipmask_add; f += stretch_bytes; if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { *f++ = yuyv[2]; /* Y */ *f++ = yuyv[1]; /* V */ } else { YUV_TO_RGB_BY_THE_BOOK(yuyv[2], yuyv[1], yuyv[3], rv, gv, bv); switch (frame->v4l2_format.format) { case V4L2_PIX_FMT_RGB565: *f++ = (0x1F & rv) | (0xE0 & (gv << 5)); *f++ = (0x07 & (gv >> 3)) | (0xF8 & bv); break; case V4L2_PIX_FMT_RGB24: *f++ = rv; *f++ = gv; *f++ = bv; break; case V4L2_PIX_FMT_RGB32: *f++ = rv; *f++ = gv; *f++ = bv; f++; break; case V4L2_PIX_FMT_RGB555: *f++ = (0x1F & rv) | (0xE0 & (gv << 5)); *f++ = (0x03 & (gv >> 3)) | (0x7C & (bv << 2)); break; } } clipmask_index += clipmask_add; f += stretch_bytes; } frame->curline += usbvision->stretch_height; *pcopylen += frame->v4l2_linesize * usbvision->stretch_height; if (frame->curline >= frame->frmheight) return parse_state_next_frame; return parse_state_continue; } /* The decompression routine */ static int usbvision_decompress(struct usb_usbvision *usbvision, unsigned char *compressed, unsigned char *decompressed, int *start_pos, int *block_typestart_pos, int len) { int rest_pixel, idx, pos, extra_pos, block_len, block_type_pos, block_type_len; unsigned char block_byte, block_code, block_type, block_type_byte, integrator; integrator = 0; pos = *start_pos; block_type_pos = *block_typestart_pos; extra_pos = pos; block_len = 0; block_byte = 0; block_code = 0; block_type = 0; block_type_byte = 0; block_type_len = 0; rest_pixel = len; for (idx = 0; idx < len; idx++) { if (block_len == 0) { if (block_type_len == 0) { block_type_byte = compressed[block_type_pos]; block_type_pos++; block_type_len = 4; } block_type = (block_type_byte & 0xC0) >> 6; /* statistic: */ usbvision->compr_block_types[block_type]++; pos = extra_pos; if (block_type == 0) { if (rest_pixel >= 24) { idx += 23; rest_pixel -= 24; integrator = decompressed[idx]; } else { idx += rest_pixel - 1; rest_pixel = 0; } } else { block_code = compressed[pos]; pos++; if (rest_pixel >= 24) block_len = 24; else block_len = rest_pixel; rest_pixel -= block_len; extra_pos = pos + (block_len / 4); } block_type_byte <<= 2; block_type_len -= 1; } if (block_len > 0) { if ((block_len % 4) == 0) { block_byte = compressed[pos]; pos++; } if (block_type == 1) /* inter Block */ integrator = decompressed[idx]; switch (block_byte & 0xC0) { case 0x03 << 6: integrator += compressed[extra_pos]; extra_pos++; break; case 0x02 << 6: integrator += block_code; break; case 0x00: integrator -= block_code; break; } decompressed[idx] = integrator; block_byte <<= 2; block_len -= 1; } } *start_pos = extra_pos; *block_typestart_pos = block_type_pos; return idx; } /* * usbvision_parse_compress() * * Parse compressed frame from the scratch buffer, put * decoded RGB value into the current frame buffer and add the written * number of bytes (RGB) to the *pcopylen. * */ static enum parse_state usbvision_parse_compress(struct usb_usbvision *usbvision, long *pcopylen) { #define USBVISION_STRIP_MAGIC 0x5A #define USBVISION_STRIP_LEN_MAX 400 #define USBVISION_STRIP_HEADER_LEN 3 struct usbvision_frame *frame; unsigned char *f, *u = NULL, *v = NULL; unsigned char strip_data[USBVISION_STRIP_LEN_MAX]; unsigned char strip_header[USBVISION_STRIP_HEADER_LEN]; int idx, idx_end, strip_len, strip_ptr, startblock_pos, block_pos, block_type_pos; int clipmask_index; int image_size; unsigned char rv, gv, bv; static unsigned char *Y, *U, *V; frame = usbvision->cur_frame; image_size = frame->frmwidth * frame->frmheight; if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) || (frame->v4l2_format.format == V4L2_PIX_FMT_YVU420)) { /* this is a planar format */ /* ... v4l2_linesize not used here. */ f = frame->data + (frame->width * frame->curline); } else f = frame->data + (frame->v4l2_linesize * frame->curline); if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { /* initialise u and v pointers */ /* get base of u and b planes add halfoffset */ u = frame->data + image_size + (frame->frmwidth >> 1) * frame->curline; v = u + (image_size >> 1); } else if (frame->v4l2_format.format == V4L2_PIX_FMT_YVU420) { v = frame->data + image_size + ((frame->curline * (frame->width)) >> 2); u = v + (image_size >> 2); } if (frame->curline == 0) usbvision_adjust_compression(usbvision); if (scratch_len(usbvision) < USBVISION_STRIP_HEADER_LEN) return parse_state_out; /* get strip header without changing the scratch_read_ptr */ scratch_set_extra_ptr(usbvision, &strip_ptr, 0); scratch_get_extra(usbvision, &strip_header[0], &strip_ptr, USBVISION_STRIP_HEADER_LEN); if (strip_header[0] != USBVISION_STRIP_MAGIC) { /* wrong strip magic */ usbvision->strip_magic_errors++; return parse_state_next_frame; } if (frame->curline != (int)strip_header[2]) { /* line number mismatch error */ usbvision->strip_line_number_errors++; } strip_len = 2 * (unsigned int)strip_header[1]; if (strip_len > USBVISION_STRIP_LEN_MAX) { /* strip overrun */ /* I think this never happens */ usbvision_request_intra(usbvision); } if (scratch_len(usbvision) < strip_len) { /* there is not enough data for the strip */ return parse_state_out; } if (usbvision->intra_frame_buffer) { Y = usbvision->intra_frame_buffer + frame->frmwidth * frame->curline; U = usbvision->intra_frame_buffer + image_size + (frame->frmwidth / 2) * (frame->curline / 2); V = usbvision->intra_frame_buffer + image_size / 4 * 5 + (frame->frmwidth / 2) * (frame->curline / 2); } else { return parse_state_next_frame; } clipmask_index = frame->curline * MAX_FRAME_WIDTH; scratch_get(usbvision, strip_data, strip_len); idx_end = frame->frmwidth; block_type_pos = USBVISION_STRIP_HEADER_LEN; startblock_pos = block_type_pos + (idx_end - 1) / 96 + (idx_end / 2 - 1) / 96 + 2; block_pos = startblock_pos; usbvision->block_pos = block_pos; usbvision_decompress(usbvision, strip_data, Y, &block_pos, &block_type_pos, idx_end); if (strip_len > usbvision->max_strip_len) usbvision->max_strip_len = strip_len; if (frame->curline % 2) usbvision_decompress(usbvision, strip_data, V, &block_pos, &block_type_pos, idx_end / 2); else usbvision_decompress(usbvision, strip_data, U, &block_pos, &block_type_pos, idx_end / 2); if (block_pos > usbvision->comprblock_pos) usbvision->comprblock_pos = block_pos; if (block_pos > strip_len) usbvision->strip_len_errors++; for (idx = 0; idx < idx_end; idx++) { if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { *f++ = Y[idx]; *f++ = idx & 0x01 ? U[idx / 2] : V[idx / 2]; } else if (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) { *f++ = Y[idx]; if (idx & 0x01) *u++ = U[idx >> 1]; else *v++ = V[idx >> 1]; } else if (frame->v4l2_format.format == V4L2_PIX_FMT_YVU420) { *f++ = Y[idx]; if (!((idx & 0x01) | (frame->curline & 0x01))) { /* only need do this for 1 in 4 pixels */ /* intraframe buffer is YUV420 format */ *u++ = U[idx >> 1]; *v++ = V[idx >> 1]; } } else { YUV_TO_RGB_BY_THE_BOOK(Y[idx], U[idx / 2], V[idx / 2], rv, gv, bv); switch (frame->v4l2_format.format) { case V4L2_PIX_FMT_GREY: *f++ = Y[idx]; break; case V4L2_PIX_FMT_RGB555: *f++ = (0x1F & rv) | (0xE0 & (gv << 5)); *f++ = (0x03 & (gv >> 3)) | (0x7C & (bv << 2)); break; case V4L2_PIX_FMT_RGB565: *f++ = (0x1F & rv) | (0xE0 & (gv << 5)); *f++ = (0x07 & (gv >> 3)) | (0xF8 & bv); break; case V4L2_PIX_FMT_RGB24: *f++ = rv; *f++ = gv; *f++ = bv; break; case V4L2_PIX_FMT_RGB32: *f++ = rv; *f++ = gv; *f++ = bv; f++; break; } } clipmask_index++; } /* Deal with non-integer no. of bytes for YUV420P */ if (frame->v4l2_format.format != V4L2_PIX_FMT_YVU420) *pcopylen += frame->v4l2_linesize; else *pcopylen += frame->curline & 0x01 ? frame->v4l2_linesize : frame->v4l2_linesize << 1; frame->curline += 1; if (frame->curline >= frame->frmheight) return parse_state_next_frame; return parse_state_continue; } /* * usbvision_parse_lines_420() * * Parse two lines from the scratch buffer, put * decoded RGB value into the current frame buffer and add the written * number of bytes (RGB) to the *pcopylen. * */ static enum parse_state usbvision_parse_lines_420(struct usb_usbvision *usbvision, long *pcopylen) { struct usbvision_frame *frame; unsigned char *f_even = NULL, *f_odd = NULL; unsigned int pixel_per_line, block; int pixel, block_split; int y_ptr, u_ptr, v_ptr, y_odd_offset; const int y_block_size = 128; const int uv_block_size = 64; const int sub_block_size = 32; const int y_step[] = { 0, 0, 0, 2 }, y_step_size = 4; const int uv_step[] = { 0, 0, 0, 4 }, uv_step_size = 4; unsigned char y[2], u, v; /* YUV components */ int y_, u_, v_, vb, uvg, ur; int r_, g_, b_; /* RGB components */ unsigned char g; int clipmask_even_index, clipmask_odd_index, bytes_per_pixel; int clipmask_add, stretch_bytes; frame = usbvision->cur_frame; f_even = frame->data + (frame->v4l2_linesize * frame->curline); f_odd = f_even + frame->v4l2_linesize * usbvision->stretch_height; /* Make sure there's enough data for the entire line */ /* In this mode usbvision transfer 3 bytes for every 2 pixels */ /* I need two lines to decode the color */ bytes_per_pixel = frame->v4l2_format.bytes_per_pixel; stretch_bytes = (usbvision->stretch_width - 1) * bytes_per_pixel; clipmask_even_index = frame->curline * MAX_FRAME_WIDTH; clipmask_odd_index = clipmask_even_index + MAX_FRAME_WIDTH; clipmask_add = usbvision->stretch_width; pixel_per_line = frame->isoc_header.frame_width; if (scratch_len(usbvision) < (int)pixel_per_line * 3) { /* printk(KERN_DEBUG "out of data, need %d\n", len); */ return parse_state_out; } if ((frame->curline + 1) >= frame->frmheight) return parse_state_next_frame; block_split = (pixel_per_line%y_block_size) ? 1 : 0; /* are some blocks splitted into different lines? */ y_odd_offset = (pixel_per_line / y_block_size) * (y_block_size + uv_block_size) + block_split * uv_block_size; scratch_set_extra_ptr(usbvision, &y_ptr, y_odd_offset); scratch_set_extra_ptr(usbvision, &u_ptr, y_block_size); scratch_set_extra_ptr(usbvision, &v_ptr, y_odd_offset + (4 - block_split) * sub_block_size); for (block = 0; block < (pixel_per_line / sub_block_size); block++) { for (pixel = 0; pixel < sub_block_size; pixel += 2) { scratch_get(usbvision, &y[0], 2); scratch_get_extra(usbvision, &u, &u_ptr, 1); scratch_get_extra(usbvision, &v, &v_ptr, 1); /* I don't use the YUV_TO_RGB macro for better performance */ v_ = v - 128; u_ = u - 128; vb = 132252 * v_; uvg = -53281 * u_ - 25625 * v_; ur = 104595 * u_; if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { *f_even++ = y[0]; *f_even++ = v; } else { y_ = 76284 * (y[0] - 16); b_ = (y_ + vb) >> 16; g_ = (y_ + uvg) >> 16; r_ = (y_ + ur) >> 16; switch (frame->v4l2_format.format) { case V4L2_PIX_FMT_RGB565: g = LIMIT_RGB(g_); *f_even++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_even++ = (0x07 & (g >> 3)) | (0xF8 & LIMIT_RGB(b_)); break; case V4L2_PIX_FMT_RGB24: *f_even++ = LIMIT_RGB(r_); *f_even++ = LIMIT_RGB(g_); *f_even++ = LIMIT_RGB(b_); break; case V4L2_PIX_FMT_RGB32: *f_even++ = LIMIT_RGB(r_); *f_even++ = LIMIT_RGB(g_); *f_even++ = LIMIT_RGB(b_); f_even++; break; case V4L2_PIX_FMT_RGB555: g = LIMIT_RGB(g_); *f_even++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_even++ = (0x03 & (g >> 3)) | (0x7C & (LIMIT_RGB(b_) << 2)); break; } } clipmask_even_index += clipmask_add; f_even += stretch_bytes; if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { *f_even++ = y[1]; *f_even++ = u; } else { y_ = 76284 * (y[1] - 16); b_ = (y_ + vb) >> 16; g_ = (y_ + uvg) >> 16; r_ = (y_ + ur) >> 16; switch (frame->v4l2_format.format) { case V4L2_PIX_FMT_RGB565: g = LIMIT_RGB(g_); *f_even++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_even++ = (0x07 & (g >> 3)) | (0xF8 & LIMIT_RGB(b_)); break; case V4L2_PIX_FMT_RGB24: *f_even++ = LIMIT_RGB(r_); *f_even++ = LIMIT_RGB(g_); *f_even++ = LIMIT_RGB(b_); break; case V4L2_PIX_FMT_RGB32: *f_even++ = LIMIT_RGB(r_); *f_even++ = LIMIT_RGB(g_); *f_even++ = LIMIT_RGB(b_); f_even++; break; case V4L2_PIX_FMT_RGB555: g = LIMIT_RGB(g_); *f_even++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_even++ = (0x03 & (g >> 3)) | (0x7C & (LIMIT_RGB(b_) << 2)); break; } } clipmask_even_index += clipmask_add; f_even += stretch_bytes; scratch_get_extra(usbvision, &y[0], &y_ptr, 2); if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { *f_odd++ = y[0]; *f_odd++ = v; } else { y_ = 76284 * (y[0] - 16); b_ = (y_ + vb) >> 16; g_ = (y_ + uvg) >> 16; r_ = (y_ + ur) >> 16; switch (frame->v4l2_format.format) { case V4L2_PIX_FMT_RGB565: g = LIMIT_RGB(g_); *f_odd++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_odd++ = (0x07 & (g >> 3)) | (0xF8 & LIMIT_RGB(b_)); break; case V4L2_PIX_FMT_RGB24: *f_odd++ = LIMIT_RGB(r_); *f_odd++ = LIMIT_RGB(g_); *f_odd++ = LIMIT_RGB(b_); break; case V4L2_PIX_FMT_RGB32: *f_odd++ = LIMIT_RGB(r_); *f_odd++ = LIMIT_RGB(g_); *f_odd++ = LIMIT_RGB(b_); f_odd++; break; case V4L2_PIX_FMT_RGB555: g = LIMIT_RGB(g_); *f_odd++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_odd++ = (0x03 & (g >> 3)) | (0x7C & (LIMIT_RGB(b_) << 2)); break; } } clipmask_odd_index += clipmask_add; f_odd += stretch_bytes; if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { *f_odd++ = y[1]; *f_odd++ = u; } else { y_ = 76284 * (y[1] - 16); b_ = (y_ + vb) >> 16; g_ = (y_ + uvg) >> 16; r_ = (y_ + ur) >> 16; switch (frame->v4l2_format.format) { case V4L2_PIX_FMT_RGB565: g = LIMIT_RGB(g_); *f_odd++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_odd++ = (0x07 & (g >> 3)) | (0xF8 & LIMIT_RGB(b_)); break; case V4L2_PIX_FMT_RGB24: *f_odd++ = LIMIT_RGB(r_); *f_odd++ = LIMIT_RGB(g_); *f_odd++ = LIMIT_RGB(b_); break; case V4L2_PIX_FMT_RGB32: *f_odd++ = LIMIT_RGB(r_); *f_odd++ = LIMIT_RGB(g_); *f_odd++ = LIMIT_RGB(b_); f_odd++; break; case V4L2_PIX_FMT_RGB555: g = LIMIT_RGB(g_); *f_odd++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_odd++ = (0x03 & (g >> 3)) | (0x7C & (LIMIT_RGB(b_) << 2)); break; } } clipmask_odd_index += clipmask_add; f_odd += stretch_bytes; } scratch_rm_old(usbvision, y_step[block % y_step_size] * sub_block_size); scratch_inc_extra_ptr(&y_ptr, y_step[(block + 2 * block_split) % y_step_size] * sub_block_size); scratch_inc_extra_ptr(&u_ptr, uv_step[block % uv_step_size] * sub_block_size); scratch_inc_extra_ptr(&v_ptr, uv_step[(block + 2 * block_split) % uv_step_size] * sub_block_size); } scratch_rm_old(usbvision, pixel_per_line * 3 / 2 + block_split * sub_block_size); frame->curline += 2 * usbvision->stretch_height; *pcopylen += frame->v4l2_linesize * 2 * usbvision->stretch_height; if (frame->curline >= frame->frmheight) return parse_state_next_frame; return parse_state_continue; } /* * usbvision_parse_data() * * Generic routine to parse the scratch buffer. It employs either * usbvision_find_header() or usbvision_parse_lines() to do most * of work. * */ static void usbvision_parse_data(struct usb_usbvision *usbvision) { struct usbvision_frame *frame; enum parse_state newstate; long copylen = 0; unsigned long lock_flags; frame = usbvision->cur_frame; PDEBUG(DBG_PARSE, "parsing len=%d\n", scratch_len(usbvision)); while (1) { newstate = parse_state_out; if (scratch_len(usbvision)) { if (frame->scanstate == scan_state_scanning) { newstate = usbvision_find_header(usbvision); } else if (frame->scanstate == scan_state_lines) { if (usbvision->isoc_mode == ISOC_MODE_YUV420) newstate = usbvision_parse_lines_420(usbvision, &copylen); else if (usbvision->isoc_mode == ISOC_MODE_YUV422) newstate = usbvision_parse_lines_422(usbvision, &copylen); else if (usbvision->isoc_mode == ISOC_MODE_COMPRESS) newstate = usbvision_parse_compress(usbvision, &copylen); } } if (newstate == parse_state_continue) continue; if ((newstate == parse_state_next_frame) || (newstate == parse_state_out)) break; return; /* parse_state_end_parse */ } if (newstate == parse_state_next_frame) { frame->grabstate = frame_state_done; do_gettimeofday(&(frame->timestamp)); frame->sequence = usbvision->frame_num; spin_lock_irqsave(&usbvision->queue_lock, lock_flags); list_move_tail(&(frame->frame), &usbvision->outqueue); usbvision->cur_frame = NULL; spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags); usbvision->frame_num++; /* This will cause the process to request another frame. */ if (waitqueue_active(&usbvision->wait_frame)) { PDEBUG(DBG_PARSE, "Wake up !"); wake_up_interruptible(&usbvision->wait_frame); } } else { frame->grabstate = frame_state_grabbing; } /* Update the frame's uncompressed length. */ frame->scanlength += copylen; } /* * Make all of the blocks of data contiguous */ static int usbvision_compress_isochronous(struct usb_usbvision *usbvision, struct urb *urb) { unsigned char *packet_data; int i, totlen = 0; for (i = 0; i < urb->number_of_packets; i++) { int packet_len = urb->iso_frame_desc[i].actual_length; int packet_stat = urb->iso_frame_desc[i].status; packet_data = urb->transfer_buffer + urb->iso_frame_desc[i].offset; /* Detect and ignore errored packets */ if (packet_stat) { /* packet_stat != 0 ????????????? */ PDEBUG(DBG_ISOC, "data error: [%d] len=%d, status=%X", i, packet_len, packet_stat); usbvision->isoc_err_count++; continue; } /* Detect and ignore empty packets */ if (packet_len < 0) { PDEBUG(DBG_ISOC, "error packet [%d]", i); usbvision->isoc_skip_count++; continue; } else if (packet_len == 0) { /* Frame end ????? */ PDEBUG(DBG_ISOC, "null packet [%d]", i); usbvision->isocstate = isoc_state_no_frame; usbvision->isoc_skip_count++; continue; } else if (packet_len > usbvision->isoc_packet_size) { PDEBUG(DBG_ISOC, "packet[%d] > isoc_packet_size", i); usbvision->isoc_skip_count++; continue; } PDEBUG(DBG_ISOC, "packet ok [%d] len=%d", i, packet_len); if (usbvision->isocstate == isoc_state_no_frame) { /* new frame begins */ usbvision->isocstate = isoc_state_in_frame; scratch_mark_header(usbvision); usbvision_measure_bandwidth(usbvision); PDEBUG(DBG_ISOC, "packet with header"); } /* * If usbvision continues to feed us with data but there is no * consumption (if, for example, V4L client fell asleep) we * may overflow the buffer. We have to move old data over to * free room for new data. This is bad for old data. If we * just drop new data then it's bad for new data... choose * your favorite evil here. */ if (scratch_free(usbvision) < packet_len) { usbvision->scratch_ovf_count++; PDEBUG(DBG_ISOC, "scratch buf overflow! scr_len: %d, n: %d", scratch_len(usbvision), packet_len); scratch_rm_old(usbvision, packet_len - scratch_free(usbvision)); } /* Now we know that there is enough room in scratch buffer */ scratch_put(usbvision, packet_data, packet_len); totlen += packet_len; usbvision->isoc_data_count += packet_len; usbvision->isoc_packet_count++; } #if ENABLE_HEXDUMP if (totlen > 0) { static int foo; if (foo < 1) { printk(KERN_DEBUG "+%d.\n", usbvision->scratchlen); usbvision_hexdump(data0, (totlen > 64) ? 64 : totlen); ++foo; } } #endif return totlen; } static void usbvision_isoc_irq(struct urb *urb) { int err_code = 0; int len; struct usb_usbvision *usbvision = urb->context; int i; unsigned long start_time = jiffies; struct usbvision_frame **f; /* We don't want to do anything if we are about to be removed! */ if (!USBVISION_IS_OPERATIONAL(usbvision)) return; /* any urb with wrong status is ignored without acknowledgement */ if (urb->status == -ENOENT) return; f = &usbvision->cur_frame; /* Manage streaming interruption */ if (usbvision->streaming == stream_interrupt) { usbvision->streaming = stream_idle; if ((*f)) { (*f)->grabstate = frame_state_ready; (*f)->scanstate = scan_state_scanning; } PDEBUG(DBG_IRQ, "stream interrupted"); wake_up_interruptible(&usbvision->wait_stream); } /* Copy the data received into our scratch buffer */ len = usbvision_compress_isochronous(usbvision, urb); usbvision->isoc_urb_count++; usbvision->urb_length = len; if (usbvision->streaming == stream_on) { /* If we collected enough data let's parse! */ if (scratch_len(usbvision) > USBVISION_HEADER_LENGTH && !list_empty(&(usbvision->inqueue))) { if (!(*f)) { (*f) = list_entry(usbvision->inqueue.next, struct usbvision_frame, frame); } usbvision_parse_data(usbvision); } else { /* If we don't have a frame we're current working on, complain */ PDEBUG(DBG_IRQ, "received data, but no one needs it"); scratch_reset(usbvision); } } else { PDEBUG(DBG_IRQ, "received data, but no one needs it"); scratch_reset(usbvision); } usbvision->time_in_irq += jiffies - start_time; for (i = 0; i < USBVISION_URB_FRAMES; i++) { urb->iso_frame_desc[i].status = 0; urb->iso_frame_desc[i].actual_length = 0; } urb->status = 0; urb->dev = usbvision->dev; err_code = usb_submit_urb(urb, GFP_ATOMIC); if (err_code) { dev_err(&usbvision->dev->dev, "%s: usb_submit_urb failed: error %d\n", __func__, err_code); } return; } /*************************************/ /* Low level usbvision access functions */ /*************************************/ /* * usbvision_read_reg() * * return < 0 -> Error * >= 0 -> Data */ int usbvision_read_reg(struct usb_usbvision *usbvision, unsigned char reg) { int err_code = 0; unsigned char buffer[1]; if (!USBVISION_IS_OPERATIONAL(usbvision)) return -1; err_code = usb_control_msg(usbvision->dev, usb_rcvctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) reg, buffer, 1, HZ); if (err_code < 0) { dev_err(&usbvision->dev->dev, "%s: failed: error %d\n", __func__, err_code); return err_code; } return buffer[0]; } /* * usbvision_write_reg() * * return 1 -> Reg written * 0 -> usbvision is not yet ready * -1 -> Something went wrong */ int usbvision_write_reg(struct usb_usbvision *usbvision, unsigned char reg, unsigned char value) { int err_code = 0; if (!USBVISION_IS_OPERATIONAL(usbvision)) return 0; err_code = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) reg, &value, 1, HZ); if (err_code < 0) { dev_err(&usbvision->dev->dev, "%s: failed: error %d\n", __func__, err_code); } return err_code; } static void usbvision_ctrl_urb_complete(struct urb *urb) { struct usb_usbvision *usbvision = (struct usb_usbvision *)urb->context; PDEBUG(DBG_IRQ, ""); usbvision->ctrl_urb_busy = 0; if (waitqueue_active(&usbvision->ctrl_urb_wq)) wake_up_interruptible(&usbvision->ctrl_urb_wq); } static int usbvision_write_reg_irq(struct usb_usbvision *usbvision, int address, unsigned char *data, int len) { int err_code = 0; PDEBUG(DBG_IRQ, ""); if (len > 8) return -EFAULT; if (usbvision->ctrl_urb_busy) return -EBUSY; usbvision->ctrl_urb_busy = 1; usbvision->ctrl_urb_setup.bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT; usbvision->ctrl_urb_setup.bRequest = USBVISION_OP_CODE; usbvision->ctrl_urb_setup.wValue = 0; usbvision->ctrl_urb_setup.wIndex = cpu_to_le16(address); usbvision->ctrl_urb_setup.wLength = cpu_to_le16(len); usb_fill_control_urb(usbvision->ctrl_urb, usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), (unsigned char *)&usbvision->ctrl_urb_setup, (void *)usbvision->ctrl_urb_buffer, len, usbvision_ctrl_urb_complete, (void *)usbvision); memcpy(usbvision->ctrl_urb_buffer, data, len); err_code = usb_submit_urb(usbvision->ctrl_urb, GFP_ATOMIC); if (err_code < 0) { /* error in usb_submit_urb() */ usbvision->ctrl_urb_busy = 0; } PDEBUG(DBG_IRQ, "submit %d byte: error %d", len, err_code); return err_code; } static int usbvision_init_compression(struct usb_usbvision *usbvision) { int err_code = 0; usbvision->last_isoc_frame_num = -1; usbvision->isoc_data_count = 0; usbvision->isoc_packet_count = 0; usbvision->isoc_skip_count = 0; usbvision->compr_level = 50; usbvision->last_compr_level = -1; usbvision->isoc_urb_count = 0; usbvision->request_intra = 1; usbvision->isoc_measure_bandwidth_count = 0; return err_code; } /* this function measures the used bandwidth since last call * return: 0 : no error * sets used_bandwidth to 1-100 : 1-100% of full bandwidth resp. to isoc_packet_size */ static int usbvision_measure_bandwidth(struct usb_usbvision *usbvision) { int err_code = 0; if (usbvision->isoc_measure_bandwidth_count < 2) { /* this gives an average bandwidth of 3 frames */ usbvision->isoc_measure_bandwidth_count++; return err_code; } if ((usbvision->isoc_packet_size > 0) && (usbvision->isoc_packet_count > 0)) { usbvision->used_bandwidth = usbvision->isoc_data_count / (usbvision->isoc_packet_count + usbvision->isoc_skip_count) * 100 / usbvision->isoc_packet_size; } usbvision->isoc_measure_bandwidth_count = 0; usbvision->isoc_data_count = 0; usbvision->isoc_packet_count = 0; usbvision->isoc_skip_count = 0; return err_code; } static int usbvision_adjust_compression(struct usb_usbvision *usbvision) { int err_code = 0; unsigned char buffer[6]; PDEBUG(DBG_IRQ, ""); if ((adjust_compression) && (usbvision->used_bandwidth > 0)) { usbvision->compr_level += (usbvision->used_bandwidth - 90) / 2; RESTRICT_TO_RANGE(usbvision->compr_level, 0, 100); if (usbvision->compr_level != usbvision->last_compr_level) { int distortion; if (usbvision->bridge_type == BRIDGE_NT1004 || usbvision->bridge_type == BRIDGE_NT1005) { buffer[0] = (unsigned char)(4 + 16 * usbvision->compr_level / 100); /* PCM Threshold 1 */ buffer[1] = (unsigned char)(4 + 8 * usbvision->compr_level / 100); /* PCM Threshold 2 */ distortion = 7 + 248 * usbvision->compr_level / 100; buffer[2] = (unsigned char)(distortion & 0xFF); /* Average distortion Threshold (inter) */ buffer[3] = (unsigned char)(distortion & 0xFF); /* Average distortion Threshold (intra) */ distortion = 1 + 42 * usbvision->compr_level / 100; buffer[4] = (unsigned char)(distortion & 0xFF); /* Maximum distortion Threshold (inter) */ buffer[5] = (unsigned char)(distortion & 0xFF); /* Maximum distortion Threshold (intra) */ } else { /* BRIDGE_NT1003 */ buffer[0] = (unsigned char)(4 + 16 * usbvision->compr_level / 100); /* PCM threshold 1 */ buffer[1] = (unsigned char)(4 + 8 * usbvision->compr_level / 100); /* PCM threshold 2 */ distortion = 2 + 253 * usbvision->compr_level / 100; buffer[2] = (unsigned char)(distortion & 0xFF); /* distortion threshold bit0-7 */ buffer[3] = 0; /* (unsigned char)((distortion >> 8) & 0x0F); distortion threshold bit 8-11 */ distortion = 0 + 43 * usbvision->compr_level / 100; buffer[4] = (unsigned char)(distortion & 0xFF); /* maximum distortion bit0-7 */ buffer[5] = 0; /* (unsigned char)((distortion >> 8) & 0x01); maximum distortion bit 8 */ } err_code = usbvision_write_reg_irq(usbvision, USBVISION_PCM_THR1, buffer, 6); if (err_code == 0) { PDEBUG(DBG_IRQ, "new compr params %#02x %#02x %#02x %#02x %#02x %#02x", buffer[0], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5]); usbvision->last_compr_level = usbvision->compr_level; } } } return err_code; } static int usbvision_request_intra(struct usb_usbvision *usbvision) { int err_code = 0; unsigned char buffer[1]; PDEBUG(DBG_IRQ, ""); usbvision->request_intra = 1; buffer[0] = 1; usbvision_write_reg_irq(usbvision, USBVISION_FORCE_INTRA, buffer, 1); return err_code; } static int usbvision_unrequest_intra(struct usb_usbvision *usbvision) { int err_code = 0; unsigned char buffer[1]; PDEBUG(DBG_IRQ, ""); usbvision->request_intra = 0; buffer[0] = 0; usbvision_write_reg_irq(usbvision, USBVISION_FORCE_INTRA, buffer, 1); return err_code; } /******************************* * usbvision utility functions *******************************/ int usbvision_power_off(struct usb_usbvision *usbvision) { int err_code = 0; PDEBUG(DBG_FUNC, ""); err_code = usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN); if (err_code == 1) usbvision->power = 0; PDEBUG(DBG_FUNC, "%s: err_code %d", (err_code != 1) ? "ERROR" : "power is off", err_code); return err_code; } /* configure webcam image sensor using the serial port */ static int usbvision_init_webcam(struct usb_usbvision *usbvision) { int rc; int i; static char init_values[38][3] = { { 0x04, 0x12, 0x08 }, { 0x05, 0xff, 0xc8 }, { 0x06, 0x18, 0x07 }, { 0x07, 0x90, 0x00 }, { 0x09, 0x00, 0x00 }, { 0x0a, 0x00, 0x00 }, { 0x0b, 0x08, 0x00 }, { 0x0d, 0xcc, 0xcc }, { 0x0e, 0x13, 0x14 }, { 0x10, 0x9b, 0x83 }, { 0x11, 0x5a, 0x3f }, { 0x12, 0xe4, 0x73 }, { 0x13, 0x88, 0x84 }, { 0x14, 0x89, 0x80 }, { 0x15, 0x00, 0x20 }, { 0x16, 0x00, 0x00 }, { 0x17, 0xff, 0xa0 }, { 0x18, 0x6b, 0x20 }, { 0x19, 0x22, 0x40 }, { 0x1a, 0x10, 0x07 }, { 0x1b, 0x00, 0x47 }, { 0x1c, 0x03, 0xe0 }, { 0x1d, 0x00, 0x00 }, { 0x1e, 0x00, 0x00 }, { 0x1f, 0x00, 0x00 }, { 0x20, 0x00, 0x00 }, { 0x21, 0x00, 0x00 }, { 0x22, 0x00, 0x00 }, { 0x23, 0x00, 0x00 }, { 0x24, 0x00, 0x00 }, { 0x25, 0x00, 0x00 }, { 0x26, 0x00, 0x00 }, { 0x27, 0x00, 0x00 }, { 0x28, 0x00, 0x00 }, { 0x29, 0x00, 0x00 }, { 0x08, 0x80, 0x60 }, { 0x0f, 0x2d, 0x24 }, { 0x0c, 0x80, 0x80 } }; char value[3]; /* the only difference between PAL and NTSC init_values */ if (usbvision_device_data[usbvision->dev_model].video_norm == V4L2_STD_NTSC) init_values[4][1] = 0x34; for (i = 0; i < sizeof(init_values) / 3; i++) { usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SOFT); memcpy(value, init_values[i], 3); rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) USBVISION_SER_DAT1, value, 3, HZ); if (rc < 0) return rc; usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SIO); /* write 3 bytes to the serial port using SIO mode */ usbvision_write_reg(usbvision, USBVISION_SER_CONT, 3 | 0x10); usbvision_write_reg(usbvision, USBVISION_IOPIN_REG, 0); usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SOFT); usbvision_write_reg(usbvision, USBVISION_IOPIN_REG, USBVISION_IO_2); usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SOFT | USBVISION_CLK_OUT); usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SOFT | USBVISION_DAT_IO); usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SOFT | USBVISION_CLK_OUT | USBVISION_DAT_IO); } return 0; } /* * usbvision_set_video_format() * */ static int usbvision_set_video_format(struct usb_usbvision *usbvision, int format) { static const char proc[] = "usbvision_set_video_format"; int rc; unsigned char value[2]; if (!USBVISION_IS_OPERATIONAL(usbvision)) return 0; PDEBUG(DBG_FUNC, "isoc_mode %#02x", format); if ((format != ISOC_MODE_YUV422) && (format != ISOC_MODE_YUV420) && (format != ISOC_MODE_COMPRESS)) { printk(KERN_ERR "usbvision: unknown video format %02x, using default YUV420", format); format = ISOC_MODE_YUV420; } value[0] = 0x0A; /* TODO: See the effect of the filter */ value[1] = format; /* Sets the VO_MODE register which follows FILT_CONT */ rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) USBVISION_FILT_CONT, value, 2, HZ); if (rc < 0) { printk(KERN_ERR "%s: ERROR=%d. USBVISION stopped - " "reconnect or reload driver.\n", proc, rc); } usbvision->isoc_mode = format; return rc; } /* * usbvision_set_output() * */ int usbvision_set_output(struct usb_usbvision *usbvision, int width, int height) { int err_code = 0; int usb_width, usb_height; unsigned int frame_rate = 0, frame_drop = 0; unsigned char value[4]; if (!USBVISION_IS_OPERATIONAL(usbvision)) return 0; if (width > MAX_USB_WIDTH) { usb_width = width / 2; usbvision->stretch_width = 2; } else { usb_width = width; usbvision->stretch_width = 1; } if (height > MAX_USB_HEIGHT) { usb_height = height / 2; usbvision->stretch_height = 2; } else { usb_height = height; usbvision->stretch_height = 1; } RESTRICT_TO_RANGE(usb_width, MIN_FRAME_WIDTH, MAX_USB_WIDTH); usb_width &= ~(MIN_FRAME_WIDTH-1); RESTRICT_TO_RANGE(usb_height, MIN_FRAME_HEIGHT, MAX_USB_HEIGHT); usb_height &= ~(1); PDEBUG(DBG_FUNC, "usb %dx%d; screen %dx%d; stretch %dx%d", usb_width, usb_height, width, height, usbvision->stretch_width, usbvision->stretch_height); /* I'll not rewrite the same values */ if ((usb_width != usbvision->curwidth) || (usb_height != usbvision->curheight)) { value[0] = usb_width & 0xff; /* LSB */ value[1] = (usb_width >> 8) & 0x03; /* MSB */ value[2] = usb_height & 0xff; /* LSB */ value[3] = (usb_height >> 8) & 0x03; /* MSB */ err_code = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) USBVISION_LXSIZE_O, value, 4, HZ); if (err_code < 0) { dev_err(&usbvision->dev->dev, "%s failed: error %d\n", __func__, err_code); return err_code; } usbvision->curwidth = usbvision->stretch_width * usb_width; usbvision->curheight = usbvision->stretch_height * usb_height; } if (usbvision->isoc_mode == ISOC_MODE_YUV422) frame_rate = (usbvision->isoc_packet_size * 1000) / (usb_width * usb_height * 2); else if (usbvision->isoc_mode == ISOC_MODE_YUV420) frame_rate = (usbvision->isoc_packet_size * 1000) / ((usb_width * usb_height * 12) / 8); else frame_rate = FRAMERATE_MAX; if (usbvision->tvnorm_id & V4L2_STD_625_50) frame_drop = frame_rate * 32 / 25 - 1; else if (usbvision->tvnorm_id & V4L2_STD_525_60) frame_drop = frame_rate * 32 / 30 - 1; RESTRICT_TO_RANGE(frame_drop, FRAMERATE_MIN, FRAMERATE_MAX); PDEBUG(DBG_FUNC, "frame_rate %d fps, frame_drop %d", frame_rate, frame_drop); frame_drop = FRAMERATE_MAX; /* We can allow the maximum here, because dropping is controlled */ if (usbvision_device_data[usbvision->dev_model].codec == CODEC_WEBCAM) { if (usbvision_device_data[usbvision->dev_model].video_norm == V4L2_STD_PAL) frame_drop = 25; else frame_drop = 30; } /* frame_drop = 7; => frame_phase = 1, 5, 9, 13, 17, 21, 25, 0, 4, 8, ... => frame_skip = 4; => frame_rate = (7 + 1) * 25 / 32 = 200 / 32 = 6.25; frame_drop = 9; => frame_phase = 1, 5, 8, 11, 14, 17, 21, 24, 27, 1, 4, 8, ... => frame_skip = 4, 3, 3, 3, 3, 4, 3, 3, 3, 3, 4, ... => frame_rate = (9 + 1) * 25 / 32 = 250 / 32 = 7.8125; */ err_code = usbvision_write_reg(usbvision, USBVISION_FRM_RATE, frame_drop); return err_code; } /* * usbvision_frames_alloc * allocate the required frames */ int usbvision_frames_alloc(struct usb_usbvision *usbvision, int number_of_frames) { int i; /* needs to be page aligned cause the buffers can be mapped individually! */ usbvision->max_frame_size = PAGE_ALIGN(usbvision->curwidth * usbvision->curheight * usbvision->palette.bytes_per_pixel); /* Try to do my best to allocate the frames the user want in the remaining memory */ usbvision->num_frames = number_of_frames; while (usbvision->num_frames > 0) { usbvision->fbuf_size = usbvision->num_frames * usbvision->max_frame_size; usbvision->fbuf = usbvision_rvmalloc(usbvision->fbuf_size); if (usbvision->fbuf) break; usbvision->num_frames--; } spin_lock_init(&usbvision->queue_lock); init_waitqueue_head(&usbvision->wait_frame); init_waitqueue_head(&usbvision->wait_stream); /* Allocate all buffers */ for (i = 0; i < usbvision->num_frames; i++) { usbvision->frame[i].index = i; usbvision->frame[i].grabstate = frame_state_unused; usbvision->frame[i].data = usbvision->fbuf + i * usbvision->max_frame_size; /* * Set default sizes for read operation. */ usbvision->stretch_width = 1; usbvision->stretch_height = 1; usbvision->frame[i].width = usbvision->curwidth; usbvision->frame[i].height = usbvision->curheight; usbvision->frame[i].bytes_read = 0; } PDEBUG(DBG_FUNC, "allocated %d frames (%d bytes per frame)", usbvision->num_frames, usbvision->max_frame_size); return usbvision->num_frames; } /* * usbvision_frames_free * frees memory allocated for the frames */ void usbvision_frames_free(struct usb_usbvision *usbvision) { /* Have to free all that memory */ PDEBUG(DBG_FUNC, "free %d frames", usbvision->num_frames); if (usbvision->fbuf != NULL) { usbvision_rvfree(usbvision->fbuf, usbvision->fbuf_size); usbvision->fbuf = NULL; usbvision->num_frames = 0; } } /* * usbvision_empty_framequeues() * prepare queues for incoming and outgoing frames */ void usbvision_empty_framequeues(struct usb_usbvision *usbvision) { u32 i; INIT_LIST_HEAD(&(usbvision->inqueue)); INIT_LIST_HEAD(&(usbvision->outqueue)); for (i = 0; i < USBVISION_NUMFRAMES; i++) { usbvision->frame[i].grabstate = frame_state_unused; usbvision->frame[i].bytes_read = 0; } } /* * usbvision_stream_interrupt() * stops streaming */ int usbvision_stream_interrupt(struct usb_usbvision *usbvision) { int ret = 0; /* stop reading from the device */ usbvision->streaming = stream_interrupt; ret = wait_event_timeout(usbvision->wait_stream, (usbvision->streaming == stream_idle), msecs_to_jiffies(USBVISION_NUMSBUF*USBVISION_URB_FRAMES)); return ret; } /* * usbvision_set_compress_params() * */ static int usbvision_set_compress_params(struct usb_usbvision *usbvision) { static const char proc[] = "usbvision_set_compresion_params: "; int rc; unsigned char value[6]; value[0] = 0x0F; /* Intra-Compression cycle */ value[1] = 0x01; /* Reg.45 one line per strip */ value[2] = 0x00; /* Reg.46 Force intra mode on all new frames */ value[3] = 0x00; /* Reg.47 FORCE_UP <- 0 normal operation (not force) */ value[4] = 0xA2; /* Reg.48 BUF_THR I'm not sure if this does something in not compressed mode. */ value[5] = 0x00; /* Reg.49 DVI_YUV This has nothing to do with compression */ /* catched values for NT1004 */ /* value[0] = 0xFF; Never apply intra mode automatically */ /* value[1] = 0xF1; Use full frame height for virtual strip width; One line per strip */ /* value[2] = 0x01; Force intra mode on all new frames */ /* value[3] = 0x00; Strip size 400 Bytes; do not force up */ /* value[4] = 0xA2; */ if (!USBVISION_IS_OPERATIONAL(usbvision)) return 0; rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) USBVISION_INTRA_CYC, value, 5, HZ); if (rc < 0) { printk(KERN_ERR "%sERROR=%d. USBVISION stopped - " "reconnect or reload driver.\n", proc, rc); return rc; } if (usbvision->bridge_type == BRIDGE_NT1004) { value[0] = 20; /* PCM Threshold 1 */ value[1] = 12; /* PCM Threshold 2 */ value[2] = 255; /* Distortion Threshold inter */ value[3] = 255; /* Distortion Threshold intra */ value[4] = 43; /* Max Distortion inter */ value[5] = 43; /* Max Distortion intra */ } else { value[0] = 20; /* PCM Threshold 1 */ value[1] = 12; /* PCM Threshold 2 */ value[2] = 255; /* Distortion Threshold d7-d0 */ value[3] = 0; /* Distortion Threshold d11-d8 */ value[4] = 43; /* Max Distortion d7-d0 */ value[5] = 0; /* Max Distortion d8 */ } if (!USBVISION_IS_OPERATIONAL(usbvision)) return 0; rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) USBVISION_PCM_THR1, value, 6, HZ); if (rc < 0) { printk(KERN_ERR "%sERROR=%d. USBVISION stopped - " "reconnect or reload driver.\n", proc, rc); } return rc; } /* * usbvision_set_input() * * Set the input (saa711x, ...) size x y and other misc input params * I've no idea if this parameters are right * */ int usbvision_set_input(struct usb_usbvision *usbvision) { static const char proc[] = "usbvision_set_input: "; int rc; unsigned char value[8]; unsigned char dvi_yuv_value; if (!USBVISION_IS_OPERATIONAL(usbvision)) return 0; /* Set input format expected from decoder*/ if (usbvision_device_data[usbvision->dev_model].vin_reg1_override) { value[0] = usbvision_device_data[usbvision->dev_model].vin_reg1; } else if (usbvision_device_data[usbvision->dev_model].codec == CODEC_SAA7113) { /* SAA7113 uses 8 bit output */ value[0] = USBVISION_8_422_SYNC; } else { /* I'm sure only about d2-d0 [010] 16 bit 4:2:2 usin sync pulses * as that is how saa7111 is configured */ value[0] = USBVISION_16_422_SYNC; /* | USBVISION_VSNC_POL | USBVISION_VCLK_POL);*/ } rc = usbvision_write_reg(usbvision, USBVISION_VIN_REG1, value[0]); if (rc < 0) { printk(KERN_ERR "%sERROR=%d. USBVISION stopped - " "reconnect or reload driver.\n", proc, rc); return rc; } if (usbvision->tvnorm_id & V4L2_STD_PAL) { value[0] = 0xC0; value[1] = 0x02; /* 0x02C0 -> 704 Input video line length */ value[2] = 0x20; value[3] = 0x01; /* 0x0120 -> 288 Input video n. of lines */ value[4] = 0x60; value[5] = 0x00; /* 0x0060 -> 96 Input video h offset */ value[6] = 0x16; value[7] = 0x00; /* 0x0016 -> 22 Input video v offset */ } else if (usbvision->tvnorm_id & V4L2_STD_SECAM) { value[0] = 0xC0; value[1] = 0x02; /* 0x02C0 -> 704 Input video line length */ value[2] = 0x20; value[3] = 0x01; /* 0x0120 -> 288 Input video n. of lines */ value[4] = 0x01; value[5] = 0x00; /* 0x0001 -> 01 Input video h offset */ value[6] = 0x01; value[7] = 0x00; /* 0x0001 -> 01 Input video v offset */ } else { /* V4L2_STD_NTSC */ value[0] = 0xD0; value[1] = 0x02; /* 0x02D0 -> 720 Input video line length */ value[2] = 0xF0; value[3] = 0x00; /* 0x00F0 -> 240 Input video number of lines */ value[4] = 0x50; value[5] = 0x00; /* 0x0050 -> 80 Input video h offset */ value[6] = 0x10; value[7] = 0x00; /* 0x0010 -> 16 Input video v offset */ } /* webcam is only 480 pixels wide, both PAL and NTSC version */ if (usbvision_device_data[usbvision->dev_model].codec == CODEC_WEBCAM) { value[0] = 0xe0; value[1] = 0x01; /* 0x01E0 -> 480 Input video line length */ } if (usbvision_device_data[usbvision->dev_model].x_offset >= 0) { value[4] = usbvision_device_data[usbvision->dev_model].x_offset & 0xff; value[5] = (usbvision_device_data[usbvision->dev_model].x_offset & 0x0300) >> 8; } if (adjust_x_offset != -1) { value[4] = adjust_x_offset & 0xff; value[5] = (adjust_x_offset & 0x0300) >> 8; } if (usbvision_device_data[usbvision->dev_model].y_offset >= 0) { value[6] = usbvision_device_data[usbvision->dev_model].y_offset & 0xff; value[7] = (usbvision_device_data[usbvision->dev_model].y_offset & 0x0300) >> 8; } if (adjust_y_offset != -1) { value[6] = adjust_y_offset & 0xff; value[7] = (adjust_y_offset & 0x0300) >> 8; } rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, /* USBVISION specific code */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) USBVISION_LXSIZE_I, value, 8, HZ); if (rc < 0) { printk(KERN_ERR "%sERROR=%d. USBVISION stopped - " "reconnect or reload driver.\n", proc, rc); return rc; } dvi_yuv_value = 0x00; /* U comes after V, Ya comes after U/V, Yb comes after Yb */ if (usbvision_device_data[usbvision->dev_model].dvi_yuv_override) { dvi_yuv_value = usbvision_device_data[usbvision->dev_model].dvi_yuv; } else if (usbvision_device_data[usbvision->dev_model].codec == CODEC_SAA7113) { /* This changes as the fine sync control changes. Further investigation necessary */ dvi_yuv_value = 0x06; } return usbvision_write_reg(usbvision, USBVISION_DVI_YUV, dvi_yuv_value); } /* * usbvision_set_dram_settings() * * Set the buffer address needed by the usbvision dram to operate * This values has been taken with usbsnoop. * */ static int usbvision_set_dram_settings(struct usb_usbvision *usbvision) { int rc; unsigned char value[8]; if (usbvision->isoc_mode == ISOC_MODE_COMPRESS) { value[0] = 0x42; value[1] = 0x71; value[2] = 0xff; value[3] = 0x00; value[4] = 0x98; value[5] = 0xe0; value[6] = 0x71; value[7] = 0xff; /* UR: 0x0E200-0x3FFFF = 204288 Words (1 Word = 2 Byte) */ /* FDL: 0x00000-0x0E099 = 57498 Words */ /* VDW: 0x0E3FF-0x3FFFF */ } else { value[0] = 0x42; value[1] = 0x00; value[2] = 0xff; value[3] = 0x00; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; value[7] = 0xff; } /* These are the values of the address of the video buffer, * they have to be loaded into the USBVISION_DRM_PRM1-8 * * Start address of video output buffer for read: drm_prm1-2 -> 0x00000 * End address of video output buffer for read: drm_prm1-3 -> 0x1ffff * Start address of video frame delay buffer: drm_prm1-4 -> 0x20000 * Only used in compressed mode * End address of video frame delay buffer: drm_prm1-5-6 -> 0x3ffff * Only used in compressed mode * Start address of video output buffer for write: drm_prm1-7 -> 0x00000 * End address of video output buffer for write: drm_prm1-8 -> 0x1ffff */ if (!USBVISION_IS_OPERATIONAL(usbvision)) return 0; rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, /* USBVISION specific code */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) USBVISION_DRM_PRM1, value, 8, HZ); if (rc < 0) { dev_err(&usbvision->dev->dev, "%s: ERROR=%d\n", __func__, rc); return rc; } /* Restart the video buffer logic */ rc = usbvision_write_reg(usbvision, USBVISION_DRM_CONT, USBVISION_RES_UR | USBVISION_RES_FDL | USBVISION_RES_VDW); if (rc < 0) return rc; rc = usbvision_write_reg(usbvision, USBVISION_DRM_CONT, 0x00); return rc; } /* * () * * Power on the device, enables suspend-resume logic * & reset the isoc End-Point * */ int usbvision_power_on(struct usb_usbvision *usbvision) { int err_code = 0; PDEBUG(DBG_FUNC, ""); usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN); usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN | USBVISION_RES2); if (usbvision_device_data[usbvision->dev_model].codec == CODEC_WEBCAM) { usbvision_write_reg(usbvision, USBVISION_VIN_REG1, USBVISION_16_422_SYNC | USBVISION_HVALID_PO); usbvision_write_reg(usbvision, USBVISION_VIN_REG2, USBVISION_NOHVALID | USBVISION_KEEP_BLANK); } usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN | USBVISION_PWR_VID); mdelay(10); err_code = usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN | USBVISION_PWR_VID | USBVISION_RES2); if (err_code == 1) usbvision->power = 1; PDEBUG(DBG_FUNC, "%s: err_code %d", (err_code < 0) ? "ERROR" : "power is on", err_code); return err_code; } /* * usbvision timer stuff */ /* to call usbvision_power_off from task queue */ static void call_usbvision_power_off(struct work_struct *work) { struct usb_usbvision *usbvision = container_of(work, struct usb_usbvision, power_off_work); PDEBUG(DBG_FUNC, ""); if (mutex_lock_interruptible(&usbvision->v4l2_lock)) return; if (usbvision->user == 0) { usbvision_i2c_unregister(usbvision); usbvision_power_off(usbvision); usbvision->initialized = 0; } mutex_unlock(&usbvision->v4l2_lock); } static void usbvision_power_off_timer(unsigned long data) { struct usb_usbvision *usbvision = (void *)data; PDEBUG(DBG_FUNC, ""); del_timer(&usbvision->power_off_timer); INIT_WORK(&usbvision->power_off_work, call_usbvision_power_off); (void) schedule_work(&usbvision->power_off_work); } void usbvision_init_power_off_timer(struct usb_usbvision *usbvision) { init_timer(&usbvision->power_off_timer); usbvision->power_off_timer.data = (long)usbvision; usbvision->power_off_timer.function = usbvision_power_off_timer; } void usbvision_set_power_off_timer(struct usb_usbvision *usbvision) { mod_timer(&usbvision->power_off_timer, jiffies + USBVISION_POWEROFF_TIME); } void usbvision_reset_power_off_timer(struct usb_usbvision *usbvision) { if (timer_pending(&usbvision->power_off_timer)) del_timer(&usbvision->power_off_timer); } /* * usbvision_begin_streaming() * Sure you have to put bit 7 to 0, if not incoming frames are droped, but no * idea about the rest */ int usbvision_begin_streaming(struct usb_usbvision *usbvision) { if (usbvision->isoc_mode == ISOC_MODE_COMPRESS) usbvision_init_compression(usbvision); return usbvision_write_reg(usbvision, USBVISION_VIN_REG2, USBVISION_NOHVALID | usbvision->vin_reg2_preset); } /* * usbvision_restart_isoc() * Not sure yet if touching here PWR_REG make loose the config */ int usbvision_restart_isoc(struct usb_usbvision *usbvision) { int ret; ret = usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN | USBVISION_PWR_VID); if (ret < 0) return ret; ret = usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN | USBVISION_PWR_VID | USBVISION_RES2); if (ret < 0) return ret; ret = usbvision_write_reg(usbvision, USBVISION_VIN_REG2, USBVISION_KEEP_BLANK | USBVISION_NOHVALID | usbvision->vin_reg2_preset); if (ret < 0) return ret; /* TODO: schedule timeout */ while ((usbvision_read_reg(usbvision, USBVISION_STATUS_REG) & 0x01) != 1) ; return 0; } int usbvision_audio_off(struct usb_usbvision *usbvision) { if (usbvision_write_reg(usbvision, USBVISION_IOPIN_REG, USBVISION_AUDIO_MUTE) < 0) { printk(KERN_ERR "usbvision_audio_off: can't write reg\n"); return -1; } usbvision->audio_mute = 0; usbvision->audio_channel = USBVISION_AUDIO_MUTE; return 0; } int usbvision_set_audio(struct usb_usbvision *usbvision, int audio_channel) { if (!usbvision->audio_mute) { if (usbvision_write_reg(usbvision, USBVISION_IOPIN_REG, audio_channel) < 0) { printk(KERN_ERR "usbvision_set_audio: can't write iopin register for audio switching\n"); return -1; } } usbvision->audio_channel = audio_channel; return 0; } int usbvision_setup(struct usb_usbvision *usbvision, int format) { if (usbvision_device_data[usbvision->dev_model].codec == CODEC_WEBCAM) usbvision_init_webcam(usbvision); usbvision_set_video_format(usbvision, format); usbvision_set_dram_settings(usbvision); usbvision_set_compress_params(usbvision); usbvision_set_input(usbvision); usbvision_set_output(usbvision, MAX_USB_WIDTH, MAX_USB_HEIGHT); usbvision_restart_isoc(usbvision); /* cosas del PCM */ return USBVISION_IS_OPERATIONAL(usbvision); } int usbvision_set_alternate(struct usb_usbvision *dev) { int err_code, prev_alt = dev->iface_alt; int i; dev->iface_alt = 0; for (i = 0; i < dev->num_alt; i++) if (dev->alt_max_pkt_size[i] > dev->alt_max_pkt_size[dev->iface_alt]) dev->iface_alt = i; if (dev->iface_alt != prev_alt) { dev->isoc_packet_size = dev->alt_max_pkt_size[dev->iface_alt]; PDEBUG(DBG_FUNC, "setting alternate %d with max_packet_size=%u", dev->iface_alt, dev->isoc_packet_size); err_code = usb_set_interface(dev->dev, dev->iface, dev->iface_alt); if (err_code < 0) { dev_err(&dev->dev->dev, "cannot change alternate number to %d (error=%i)\n", dev->iface_alt, err_code); return err_code; } } PDEBUG(DBG_ISOC, "ISO Packet Length:%d", dev->isoc_packet_size); return 0; } /* * usbvision_init_isoc() * */ int usbvision_init_isoc(struct usb_usbvision *usbvision) { struct usb_device *dev = usbvision->dev; int buf_idx, err_code, reg_value; int sb_size; if (!USBVISION_IS_OPERATIONAL(usbvision)) return -EFAULT; usbvision->cur_frame = NULL; scratch_reset(usbvision); /* Alternate interface 1 is is the biggest frame size */ err_code = usbvision_set_alternate(usbvision); if (err_code < 0) { usbvision->last_error = err_code; return -EBUSY; } sb_size = USBVISION_URB_FRAMES * usbvision->isoc_packet_size; reg_value = (16 - usbvision_read_reg(usbvision, USBVISION_ALTER_REG)) & 0x0F; usbvision->usb_bandwidth = reg_value >> 1; PDEBUG(DBG_ISOC, "USB Bandwidth Usage: %dMbit/Sec", usbvision->usb_bandwidth); /* We double buffer the Iso lists */ for (buf_idx = 0; buf_idx < USBVISION_NUMSBUF; buf_idx++) { int j, k; struct urb *urb; urb = usb_alloc_urb(USBVISION_URB_FRAMES, GFP_KERNEL); if (urb == NULL) { dev_err(&usbvision->dev->dev, "%s: usb_alloc_urb() failed\n", __func__); return -ENOMEM; } usbvision->sbuf[buf_idx].urb = urb; usbvision->sbuf[buf_idx].data = usb_alloc_coherent(usbvision->dev, sb_size, GFP_KERNEL, &urb->transfer_dma); urb->dev = dev; urb->context = usbvision; urb->pipe = usb_rcvisocpipe(dev, usbvision->video_endp); urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->interval = 1; urb->transfer_buffer = usbvision->sbuf[buf_idx].data; urb->complete = usbvision_isoc_irq; urb->number_of_packets = USBVISION_URB_FRAMES; urb->transfer_buffer_length = usbvision->isoc_packet_size * USBVISION_URB_FRAMES; for (j = k = 0; j < USBVISION_URB_FRAMES; j++, k += usbvision->isoc_packet_size) { urb->iso_frame_desc[j].offset = k; urb->iso_frame_desc[j].length = usbvision->isoc_packet_size; } } /* Submit all URBs */ for (buf_idx = 0; buf_idx < USBVISION_NUMSBUF; buf_idx++) { err_code = usb_submit_urb(usbvision->sbuf[buf_idx].urb, GFP_KERNEL); if (err_code) { dev_err(&usbvision->dev->dev, "%s: usb_submit_urb(%d) failed: error %d\n", __func__, buf_idx, err_code); } } usbvision->streaming = stream_idle; PDEBUG(DBG_ISOC, "%s: streaming=1 usbvision->video_endp=$%02x", __func__, usbvision->video_endp); return 0; } /* * usbvision_stop_isoc() * * This procedure stops streaming and deallocates URBs. Then it * activates zero-bandwidth alt. setting of the video interface. * */ void usbvision_stop_isoc(struct usb_usbvision *usbvision) { int buf_idx, err_code, reg_value; int sb_size = USBVISION_URB_FRAMES * usbvision->isoc_packet_size; if ((usbvision->streaming == stream_off) || (usbvision->dev == NULL)) return; /* Unschedule all of the iso td's */ for (buf_idx = 0; buf_idx < USBVISION_NUMSBUF; buf_idx++) { usb_kill_urb(usbvision->sbuf[buf_idx].urb); if (usbvision->sbuf[buf_idx].data) { usb_free_coherent(usbvision->dev, sb_size, usbvision->sbuf[buf_idx].data, usbvision->sbuf[buf_idx].urb->transfer_dma); } usb_free_urb(usbvision->sbuf[buf_idx].urb); usbvision->sbuf[buf_idx].urb = NULL; } PDEBUG(DBG_ISOC, "%s: streaming=stream_off\n", __func__); usbvision->streaming = stream_off; if (!usbvision->remove_pending) { /* Set packet size to 0 */ usbvision->iface_alt = 0; err_code = usb_set_interface(usbvision->dev, usbvision->iface, usbvision->iface_alt); if (err_code < 0) { dev_err(&usbvision->dev->dev, "%s: usb_set_interface() failed: error %d\n", __func__, err_code); usbvision->last_error = err_code; } reg_value = (16-usbvision_read_reg(usbvision, USBVISION_ALTER_REG)) & 0x0F; usbvision->isoc_packet_size = (reg_value == 0) ? 0 : (reg_value * 64) - 1; PDEBUG(DBG_ISOC, "ISO Packet Length:%d", usbvision->isoc_packet_size); usbvision->usb_bandwidth = reg_value >> 1; PDEBUG(DBG_ISOC, "USB Bandwidth Usage: %dMbit/Sec", usbvision->usb_bandwidth); } } int usbvision_muxsel(struct usb_usbvision *usbvision, int channel) { /* inputs #0 and #3 are constant for every SAA711x. */ /* inputs #1 and #2 are variable for SAA7111 and SAA7113 */ int mode[4] = { SAA7115_COMPOSITE0, 0, 0, SAA7115_COMPOSITE3 }; int audio[] = { 1, 0, 0, 0 }; /* channel 0 is TV with audiochannel 1 (tuner mono) */ /* channel 1 is Composite with audio channel 0 (line in) */ /* channel 2 is S-Video with audio channel 0 (line in) */ /* channel 3 is additional video inputs to the device with audio channel 0 (line in) */ RESTRICT_TO_RANGE(channel, 0, usbvision->video_inputs); usbvision->ctl_input = channel; /* set the new channel */ /* Regular USB TV Tuners -> channel: 0 = Television, 1 = Composite, 2 = S-Video */ /* Four video input devices -> channel: 0 = Chan White, 1 = Chan Green, 2 = Chan Yellow, 3 = Chan Red */ switch (usbvision_device_data[usbvision->dev_model].codec) { case CODEC_SAA7113: mode[1] = SAA7115_COMPOSITE2; if (switch_svideo_input) { /* To handle problems with S-Video Input for * some devices. Use switch_svideo_input * parameter when loading the module.*/ mode[2] = SAA7115_COMPOSITE1; } else { mode[2] = SAA7115_SVIDEO1; } break; case CODEC_SAA7111: default: /* modes for saa7111 */ mode[1] = SAA7115_COMPOSITE1; mode[2] = SAA7115_SVIDEO1; break; } call_all(usbvision, video, s_routing, mode[channel], 0, 0); usbvision_set_audio(usbvision, audio[channel]); return 0; } /* * Overrides for Emacs so that we follow Linus's tabbing style. * --------------------------------------------------------------------------- * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
lavvy/xbmc
tools/EventClients/Clients/WiiRemote/wiiuse_v0.12/src/dynamics.c
282
6925
/* * wiiuse * * Written By: * Michael Laforest < para > * Email: < thepara (--AT--) g m a i l [--DOT--] com > * * Copyright 2006-2007 * * This file is part of wiiuse. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * $Header$ * */ /** * @file * @brief Handles the dynamics of the wiimote. * * The file includes functions that handle the dynamics * of the wiimote. Such dynamics include orientation and * motion sensing. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #ifdef WIN32 #include <float.h> #endif #include "definitions.h" #include "wiiuse_internal.h" #include "ir.h" #include "dynamics.h" /** * @brief Calculate the roll, pitch, yaw. * * @param ac An accelerometer (accel_t) structure. * @param accel [in] Pointer to a vec3b_t structure that holds the raw acceleration data. * @param orient [out] Pointer to a orient_t structure that will hold the orientation data. * @param rorient [out] Pointer to a orient_t structure that will hold the non-smoothed orientation data. * @param smooth If smoothing should be performed on the angles calculated. 1 to enable, 0 to disable. * * Given the raw acceleration data from the accelerometer struct, calculate * the orientation of the device and set it in the \a orient parameter. */ void calculate_orientation(struct accel_t* ac, struct vec3b_t* accel, struct orient_t* orient, int smooth) { float xg, yg, zg; float x, y, z; /* * roll - use atan(z / x) [ ranges from -180 to 180 ] * pitch - use atan(z / y) [ ranges from -180 to 180 ] * yaw - impossible to tell without IR */ /* yaw - set to 0, IR will take care of it if it's enabled */ orient->yaw = 0.0f; /* find out how much it has to move to be 1g */ xg = (float)ac->cal_g.x; yg = (float)ac->cal_g.y; zg = (float)ac->cal_g.z; /* find out how much it actually moved and normalize to +/- 1g */ x = ((float)accel->x - (float)ac->cal_zero.x) / xg; y = ((float)accel->y - (float)ac->cal_zero.y) / yg; z = ((float)accel->z - (float)ac->cal_zero.z) / zg; /* make sure x,y,z are between -1 and 1 for the tan functions */ if (x < -1.0f) x = -1.0f; else if (x > 1.0f) x = 1.0f; if (y < -1.0f) y = -1.0f; else if (y > 1.0f) y = 1.0f; if (z < -1.0f) z = -1.0f; else if (z > 1.0f) z = 1.0f; /* if it is over 1g then it is probably accelerating and not reliable */ if (abs(accel->x - ac->cal_zero.x) <= ac->cal_g.x) { /* roll */ x = RAD_TO_DEGREE(atan2f(x, z)); orient->roll = x; orient->a_roll = x; } if (abs(accel->y - ac->cal_zero.y) <= ac->cal_g.y) { /* pitch */ y = RAD_TO_DEGREE(atan2f(y, z)); orient->pitch = y; orient->a_pitch = y; } /* smooth the angles if enabled */ if (smooth) { apply_smoothing(ac, orient, SMOOTH_ROLL); apply_smoothing(ac, orient, SMOOTH_PITCH); } } /** * @brief Calculate the gravity forces on each axis. * * @param ac An accelerometer (accel_t) structure. * @param accel [in] Pointer to a vec3b_t structure that holds the raw acceleration data. * @param gforce [out] Pointer to a gforce_t structure that will hold the gravity force data. */ void calculate_gforce(struct accel_t* ac, struct vec3b_t* accel, struct gforce_t* gforce) { float xg, yg, zg; /* find out how much it has to move to be 1g */ xg = (float)ac->cal_g.x; yg = (float)ac->cal_g.y; zg = (float)ac->cal_g.z; /* find out how much it actually moved and normalize to +/- 1g */ gforce->x = ((float)accel->x - (float)ac->cal_zero.x) / xg; gforce->y = ((float)accel->y - (float)ac->cal_zero.y) / yg; gforce->z = ((float)accel->z - (float)ac->cal_zero.z) / zg; } /** * @brief Calculate the angle and magnitude of a joystick. * * @param js [out] Pointer to a joystick_t structure. * @param x The raw x-axis value. * @param y The raw y-axis value. */ void calc_joystick_state(struct joystick_t* js, float x, float y) { float rx, ry, ang; /* * Since the joystick center may not be exactly: * (min + max) / 2 * Then the range from the min to the center and the center to the max * may be different. * Because of this, depending on if the current x or y value is greater * or less than the assoicated axis center value, it needs to be interpolated * between the center and the minimum or maxmimum rather than between * the minimum and maximum. * * So we have something like this: * (x min) [-1] ---------*------ [0] (x center) [0] -------- [1] (x max) * Where the * is the current x value. * The range is therefore -1 to 1, 0 being the exact center rather than * the middle of min and max. */ if (x == js->center.x) rx = 0; else if (x >= js->center.x) rx = ((float)(x - js->center.x) / (float)(js->max.x - js->center.x)); else rx = ((float)(x - js->min.x) / (float)(js->center.x - js->min.x)) - 1.0f; if (y == js->center.y) ry = 0; else if (y >= js->center.y) ry = ((float)(y - js->center.y) / (float)(js->max.y - js->center.y)); else ry = ((float)(y - js->min.y) / (float)(js->center.y - js->min.y)) - 1.0f; /* calculate the joystick angle and magnitude */ ang = RAD_TO_DEGREE(atanf(ry / rx)); ang -= 90.0f; if (rx < 0.0f) ang -= 180.0f; js->ang = absf(ang); js->mag = (float) sqrt((rx * rx) + (ry * ry)); } void apply_smoothing(struct accel_t* ac, struct orient_t* orient, int type) { switch (type) { case SMOOTH_ROLL: { /* it's possible last iteration was nan or inf, so set it to 0 if that happened */ if (isnan(ac->st_roll) || isinf(ac->st_roll)) ac->st_roll = 0.0f; /* * If the sign changes (which will happen if going from -180 to 180) * or from (-1 to 1) then don't smooth, just use the new angle. */ if (((ac->st_roll < 0) && (orient->roll > 0)) || ((ac->st_roll > 0) && (orient->roll < 0))) { ac->st_roll = orient->roll; } else { orient->roll = ac->st_roll + (ac->st_alpha * (orient->a_roll - ac->st_roll)); ac->st_roll = orient->roll; } return; } case SMOOTH_PITCH: { if (isnan(ac->st_pitch) || isinf(ac->st_pitch)) ac->st_pitch = 0.0f; if (((ac->st_pitch < 0) && (orient->pitch > 0)) || ((ac->st_pitch > 0) && (orient->pitch < 0))) { ac->st_pitch = orient->pitch; } else { orient->pitch = ac->st_pitch + (ac->st_alpha * (orient->a_pitch - ac->st_pitch)); ac->st_pitch = orient->pitch; } return; } } }
gpl-2.0
rayeleks/linux
drivers/staging/fwserial/fwserial.c
538
78743
/* * FireWire Serial driver * * Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/sched.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/rculist.h> #include <linux/workqueue.h> #include <linux/ratelimit.h> #include <linux/bug.h> #include <linux/uaccess.h> #include "fwserial.h" #define be32_to_u64(hi, lo) ((u64)be32_to_cpu(hi) << 32 | be32_to_cpu(lo)) #define LINUX_VENDOR_ID 0xd00d1eU /* same id used in card root directory */ #define FWSERIAL_VERSION 0x00e81cU /* must be unique within LINUX_VENDOR_ID */ /* configurable options */ static int num_ttys = 4; /* # of std ttys to create per fw_card */ /* - doubles as loopback port index */ static bool auto_connect = true; /* try to VIRT_CABLE to every peer */ static bool create_loop_dev = true; /* create a loopback device for each card */ module_param_named(ttys, num_ttys, int, S_IRUGO | S_IWUSR); module_param_named(auto, auto_connect, bool, S_IRUGO | S_IWUSR); module_param_named(loop, create_loop_dev, bool, S_IRUGO | S_IWUSR); /* * Threshold below which the tty is woken for writing * - should be equal to WAKEUP_CHARS in drivers/tty/n_tty.c because * even if the writer is woken, n_tty_poll() won't set POLLOUT until * our fifo is below this level */ #define WAKEUP_CHARS 256 /** * fwserial_list: list of every fw_serial created for each fw_card * See discussion in fwserial_probe. */ static LIST_HEAD(fwserial_list); static DEFINE_MUTEX(fwserial_list_mutex); /** * port_table: array of tty ports allocated to each fw_card * * tty ports are allocated during probe when an fw_serial is first * created for a given fw_card. Ports are allocated in a contiguous block, * each block consisting of 'num_ports' ports. */ static struct fwtty_port *port_table[MAX_TOTAL_PORTS]; static DEFINE_MUTEX(port_table_lock); static bool port_table_corrupt; #define FWTTY_INVALID_INDEX MAX_TOTAL_PORTS #define loop_idx(port) (((port)->index) / num_ports) #define table_idx(loop) ((loop) * num_ports + num_ttys) /* total # of tty ports created per fw_card */ static int num_ports; /* slab used as pool for struct fwtty_transactions */ static struct kmem_cache *fwtty_txn_cache; struct tty_driver *fwtty_driver; static struct tty_driver *fwloop_driver; static struct dentry *fwserial_debugfs; struct fwtty_transaction; typedef void (*fwtty_transaction_cb)(struct fw_card *card, int rcode, void *data, size_t length, struct fwtty_transaction *txn); struct fwtty_transaction { struct fw_transaction fw_txn; fwtty_transaction_cb callback; struct fwtty_port *port; union { struct dma_pending dma_pended; }; }; #define to_device(a, b) (a->b) #define fwtty_err(p, fmt, ...) \ dev_err(to_device(p, device), fmt, ##__VA_ARGS__) #define fwtty_info(p, fmt, ...) \ dev_info(to_device(p, device), fmt, ##__VA_ARGS__) #define fwtty_notice(p, fmt, ...) \ dev_notice(to_device(p, device), fmt, ##__VA_ARGS__) #define fwtty_dbg(p, fmt, ...) \ dev_dbg(to_device(p, device), "%s: " fmt, __func__, ##__VA_ARGS__) #define fwtty_err_ratelimited(p, fmt, ...) \ dev_err_ratelimited(to_device(p, device), fmt, ##__VA_ARGS__) #ifdef DEBUG static inline void debug_short_write(struct fwtty_port *port, int c, int n) { int avail; if (n < c) { spin_lock_bh(&port->lock); avail = dma_fifo_avail(&port->tx_fifo); spin_unlock_bh(&port->lock); fwtty_dbg(port, "short write: avail:%d req:%d wrote:%d\n", avail, c, n); } } #else #define debug_short_write(port, c, n) #endif static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card, int generation, int id); #ifdef FWTTY_PROFILING static void fwtty_profile_fifo(struct fwtty_port *port, unsigned *stat) { spin_lock_bh(&port->lock); fwtty_profile_data(stat, dma_fifo_avail(&port->tx_fifo)); spin_unlock_bh(&port->lock); } static void fwtty_dump_profile(struct seq_file *m, struct stats *stats) { /* for each stat, print sum of 0 to 2^k, then individually */ int k = 4; unsigned sum; int j; char t[10]; snprintf(t, 10, "< %d", 1 << k); seq_printf(m, "\n%14s %6s", " ", t); for (j = k + 1; j < DISTRIBUTION_MAX_INDEX; ++j) seq_printf(m, "%6d", 1 << j); ++k; for (j = 0, sum = 0; j <= k; ++j) sum += stats->reads[j]; seq_printf(m, "\n%14s: %6d", "reads", sum); for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j) seq_printf(m, "%6d", stats->reads[j]); for (j = 0, sum = 0; j <= k; ++j) sum += stats->writes[j]; seq_printf(m, "\n%14s: %6d", "writes", sum); for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j) seq_printf(m, "%6d", stats->writes[j]); for (j = 0, sum = 0; j <= k; ++j) sum += stats->txns[j]; seq_printf(m, "\n%14s: %6d", "txns", sum); for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j) seq_printf(m, "%6d", stats->txns[j]); for (j = 0, sum = 0; j <= k; ++j) sum += stats->unthrottle[j]; seq_printf(m, "\n%14s: %6d", "avail @ unthr", sum); for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j) seq_printf(m, "%6d", stats->unthrottle[j]); } #else #define fwtty_profile_fifo(port, stat) #define fwtty_dump_profile(m, stats) #endif /* * Returns the max receive packet size for the given node * Devices which are OHCI v1.0/ v1.1/ v1.2-draft or RFC 2734 compliant * are required by specification to support max_rec of 8 (512 bytes) or more. */ static inline int device_max_receive(struct fw_device *fw_device) { /* see IEEE 1394-2008 table 8-8 */ return min(2 << fw_device->max_rec, 4096); } static void fwtty_log_tx_error(struct fwtty_port *port, int rcode) { switch (rcode) { case RCODE_SEND_ERROR: fwtty_err_ratelimited(port, "card busy\n"); break; case RCODE_ADDRESS_ERROR: fwtty_err_ratelimited(port, "bad unit addr or write length\n"); break; case RCODE_DATA_ERROR: fwtty_err_ratelimited(port, "failed rx\n"); break; case RCODE_NO_ACK: fwtty_err_ratelimited(port, "missing ack\n"); break; case RCODE_BUSY: fwtty_err_ratelimited(port, "remote busy\n"); break; default: fwtty_err_ratelimited(port, "failed tx: %d\n", rcode); } } static void fwtty_txn_constructor(void *this) { struct fwtty_transaction *txn = this; init_timer(&txn->fw_txn.split_timeout_timer); } static void fwtty_common_callback(struct fw_card *card, int rcode, void *payload, size_t len, void *cb_data) { struct fwtty_transaction *txn = cb_data; struct fwtty_port *port = txn->port; if (port && rcode != RCODE_COMPLETE) fwtty_log_tx_error(port, rcode); if (txn->callback) txn->callback(card, rcode, payload, len, txn); kmem_cache_free(fwtty_txn_cache, txn); } static int fwtty_send_data_async(struct fwtty_peer *peer, int tcode, unsigned long long addr, void *payload, size_t len, fwtty_transaction_cb callback, struct fwtty_port *port) { struct fwtty_transaction *txn; int generation; txn = kmem_cache_alloc(fwtty_txn_cache, GFP_ATOMIC); if (!txn) return -ENOMEM; txn->callback = callback; txn->port = port; generation = peer->generation; smp_rmb(); fw_send_request(peer->serial->card, &txn->fw_txn, tcode, peer->node_id, generation, peer->speed, addr, payload, len, fwtty_common_callback, txn); return 0; } static void fwtty_send_txn_async(struct fwtty_peer *peer, struct fwtty_transaction *txn, int tcode, unsigned long long addr, void *payload, size_t len, fwtty_transaction_cb callback, struct fwtty_port *port) { int generation; txn->callback = callback; txn->port = port; generation = peer->generation; smp_rmb(); fw_send_request(peer->serial->card, &txn->fw_txn, tcode, peer->node_id, generation, peer->speed, addr, payload, len, fwtty_common_callback, txn); } static void __fwtty_restart_tx(struct fwtty_port *port) { int len, avail; len = dma_fifo_out_level(&port->tx_fifo); if (len) schedule_delayed_work(&port->drain, 0); avail = dma_fifo_avail(&port->tx_fifo); fwtty_dbg(port, "fifo len: %d avail: %d\n", len, avail); } static void fwtty_restart_tx(struct fwtty_port *port) { spin_lock_bh(&port->lock); __fwtty_restart_tx(port); spin_unlock_bh(&port->lock); } /** * fwtty_update_port_status - decodes & dispatches line status changes * * Note: in loopback, the port->lock is being held. Only use functions that * don't attempt to reclaim the port->lock. */ static void fwtty_update_port_status(struct fwtty_port *port, unsigned status) { unsigned delta; struct tty_struct *tty; /* simulated LSR/MSR status from remote */ status &= ~MCTRL_MASK; delta = (port->mstatus ^ status) & ~MCTRL_MASK; delta &= ~(status & TIOCM_RNG); port->mstatus = status; if (delta & TIOCM_RNG) ++port->icount.rng; if (delta & TIOCM_DSR) ++port->icount.dsr; if (delta & TIOCM_CAR) ++port->icount.dcd; if (delta & TIOCM_CTS) ++port->icount.cts; fwtty_dbg(port, "status: %x delta: %x\n", status, delta); if (delta & TIOCM_CAR) { tty = tty_port_tty_get(&port->port); if (tty && !C_CLOCAL(tty)) { if (status & TIOCM_CAR) wake_up_interruptible(&port->port.open_wait); else schedule_work(&port->hangup); } tty_kref_put(tty); } if (delta & TIOCM_CTS) { tty = tty_port_tty_get(&port->port); if (tty && C_CRTSCTS(tty)) { if (tty->hw_stopped) { if (status & TIOCM_CTS) { tty->hw_stopped = 0; if (port->loopback) __fwtty_restart_tx(port); else fwtty_restart_tx(port); } } else { if (~status & TIOCM_CTS) tty->hw_stopped = 1; } } tty_kref_put(tty); } else if (delta & OOB_TX_THROTTLE) { tty = tty_port_tty_get(&port->port); if (tty) { if (tty->hw_stopped) { if (~status & OOB_TX_THROTTLE) { tty->hw_stopped = 0; if (port->loopback) __fwtty_restart_tx(port); else fwtty_restart_tx(port); } } else { if (status & OOB_TX_THROTTLE) tty->hw_stopped = 1; } } tty_kref_put(tty); } if (delta & (UART_LSR_BI << 24)) { if (status & (UART_LSR_BI << 24)) { port->break_last = jiffies; schedule_delayed_work(&port->emit_breaks, 0); } else { /* run emit_breaks one last time (if pending) */ mod_delayed_work(system_wq, &port->emit_breaks, 0); } } if (delta & (TIOCM_DSR | TIOCM_CAR | TIOCM_CTS | TIOCM_RNG)) wake_up_interruptible(&port->port.delta_msr_wait); } /** * __fwtty_port_line_status - generate 'line status' for indicated port * * This function returns a remote 'MSR' state based on the local 'MCR' state, * as if a null modem cable was attached. The actual status is a mangling * of TIOCM_* bits suitable for sending to a peer's status_addr. * * Note: caller must be holding port lock */ static unsigned __fwtty_port_line_status(struct fwtty_port *port) { unsigned status = 0; /* TODO: add module param to tie RNG to DTR as well */ if (port->mctrl & TIOCM_DTR) status |= TIOCM_DSR | TIOCM_CAR; if (port->mctrl & TIOCM_RTS) status |= TIOCM_CTS; if (port->mctrl & OOB_RX_THROTTLE) status |= OOB_TX_THROTTLE; /* emulate BRK as add'l line status */ if (port->break_ctl) status |= UART_LSR_BI << 24; return status; } /** * __fwtty_write_port_status - send the port line status to peer * * Note: caller must be holding the port lock. */ static int __fwtty_write_port_status(struct fwtty_port *port) { struct fwtty_peer *peer; int err = -ENOENT; unsigned status = __fwtty_port_line_status(port); rcu_read_lock(); peer = rcu_dereference(port->peer); if (peer) { err = fwtty_send_data_async(peer, TCODE_WRITE_QUADLET_REQUEST, peer->status_addr, &status, sizeof(status), NULL, port); } rcu_read_unlock(); return err; } /** * fwtty_write_port_status - same as above but locked by port lock */ static int fwtty_write_port_status(struct fwtty_port *port) { int err; spin_lock_bh(&port->lock); err = __fwtty_write_port_status(port); spin_unlock_bh(&port->lock); return err; } static void fwtty_throttle_port(struct fwtty_port *port) { struct tty_struct *tty; unsigned old; tty = tty_port_tty_get(&port->port); if (!tty) return; spin_lock_bh(&port->lock); old = port->mctrl; port->mctrl |= OOB_RX_THROTTLE; if (C_CRTSCTS(tty)) port->mctrl &= ~TIOCM_RTS; if (~old & OOB_RX_THROTTLE) __fwtty_write_port_status(port); spin_unlock_bh(&port->lock); tty_kref_put(tty); } /** * fwtty_do_hangup - wait for ldisc to deliver all pending rx; only then hangup * * When the remote has finished tx, and all in-flight rx has been received and * and pushed to the flip buffer, the remote may close its device. This will * drop DTR on the remote which will drop carrier here. Typically, the tty is * hung up when carrier is dropped or lost. * * However, there is a race between the hang up and the line discipline * delivering its data to the reader. A hangup will cause the ldisc to flush * (ie., clear) the read buffer and flip buffer. Because of firewire's * relatively high throughput, the ldisc frequently lags well behind the driver, * resulting in lost data (which has already been received and written to * the flip buffer) when the remote closes its end. * * Unfortunately, since the flip buffer offers no direct method for determining * if it holds data, ensuring the ldisc has delivered all data is problematic. */ /* FIXME: drop this workaround when __tty_hangup waits for ldisc completion */ static void fwtty_do_hangup(struct work_struct *work) { struct fwtty_port *port = to_port(work, hangup); struct tty_struct *tty; schedule_timeout_uninterruptible(msecs_to_jiffies(50)); tty = tty_port_tty_get(&port->port); if (tty) tty_vhangup(tty); tty_kref_put(tty); } static void fwtty_emit_breaks(struct work_struct *work) { struct fwtty_port *port = to_port(to_delayed_work(work), emit_breaks); static const char buf[16]; unsigned long now = jiffies; unsigned long elapsed = now - port->break_last; int n, t, c, brk = 0; /* generate breaks at the line rate (but at least 1) */ n = (elapsed * port->cps) / HZ + 1; port->break_last = now; fwtty_dbg(port, "sending %d brks\n", n); while (n) { t = min(n, 16); c = tty_insert_flip_string_fixed_flag(&port->port, buf, TTY_BREAK, t); n -= c; brk += c; if (c < t) break; } tty_flip_buffer_push(&port->port); if (port->mstatus & (UART_LSR_BI << 24)) schedule_delayed_work(&port->emit_breaks, FREQ_BREAKS); port->icount.brk += brk; } static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len) { int c, n = len; unsigned lsr; int err = 0; fwtty_dbg(port, "%d\n", n); fwtty_profile_data(port->stats.reads, n); if (port->write_only) { n = 0; goto out; } /* disregard break status; breaks are generated by emit_breaks work */ lsr = (port->mstatus >> 24) & ~UART_LSR_BI; if (port->overrun) lsr |= UART_LSR_OE; if (lsr & UART_LSR_OE) ++port->icount.overrun; lsr &= port->status_mask; if (lsr & ~port->ignore_mask & UART_LSR_OE) { if (!tty_insert_flip_char(&port->port, 0, TTY_OVERRUN)) { err = -EIO; goto out; } } port->overrun = false; if (lsr & port->ignore_mask & ~UART_LSR_OE) { /* TODO: don't drop SAK and Magic SysRq here */ n = 0; goto out; } c = tty_insert_flip_string_fixed_flag(&port->port, data, TTY_NORMAL, n); if (c > 0) tty_flip_buffer_push(&port->port); n -= c; if (n) { port->overrun = true; err = -EIO; fwtty_err_ratelimited(port, "flip buffer overrun\n"); } else { /* throttle the sender if remaining flip buffer space has * reached high watermark to avoid losing data which may be * in-flight. Since the AR request context is 32k, that much * data may have _already_ been acked. */ if (tty_buffer_space_avail(&port->port) < HIGH_WATERMARK) fwtty_throttle_port(port); } out: port->icount.rx += len; port->stats.lost += n; return err; } /** * fwtty_port_handler - bus address handler for port reads/writes * @parameters: fw_address_callback_t as specified by firewire core interface * * This handler is responsible for handling inbound read/write dma from remotes. */ static void fwtty_port_handler(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source, int generation, unsigned long long addr, void *data, size_t len, void *callback_data) { struct fwtty_port *port = callback_data; struct fwtty_peer *peer; int err; int rcode; /* Only accept rx from the peer virtual-cabled to this port */ rcu_read_lock(); peer = __fwserial_peer_by_node_id(card, generation, source); rcu_read_unlock(); if (!peer || peer != rcu_access_pointer(port->peer)) { rcode = RCODE_ADDRESS_ERROR; fwtty_err_ratelimited(port, "ignoring unauthenticated data\n"); goto respond; } switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: if (addr != port->rx_handler.offset || len != 4) { rcode = RCODE_ADDRESS_ERROR; } else { fwtty_update_port_status(port, *(unsigned *)data); rcode = RCODE_COMPLETE; } break; case TCODE_WRITE_BLOCK_REQUEST: if (addr != port->rx_handler.offset + 4 || len > port->rx_handler.length - 4) { rcode = RCODE_ADDRESS_ERROR; } else { err = fwtty_rx(port, data, len); switch (err) { case 0: rcode = RCODE_COMPLETE; break; case -EIO: rcode = RCODE_DATA_ERROR; break; default: rcode = RCODE_CONFLICT_ERROR; break; } } break; default: rcode = RCODE_TYPE_ERROR; } respond: fw_send_response(card, request, rcode); } /** * fwtty_tx_complete - callback for tx dma * @data: ignored, has no meaning for write txns * @length: ignored, has no meaning for write txns * * The writer must be woken here if the fifo has been emptied because it * may have slept if chars_in_buffer was != 0 */ static void fwtty_tx_complete(struct fw_card *card, int rcode, void *data, size_t length, struct fwtty_transaction *txn) { struct fwtty_port *port = txn->port; int len; fwtty_dbg(port, "rcode: %d\n", rcode); switch (rcode) { case RCODE_COMPLETE: spin_lock_bh(&port->lock); dma_fifo_out_complete(&port->tx_fifo, &txn->dma_pended); len = dma_fifo_level(&port->tx_fifo); spin_unlock_bh(&port->lock); port->icount.tx += txn->dma_pended.len; break; default: /* TODO: implement retries */ spin_lock_bh(&port->lock); dma_fifo_out_complete(&port->tx_fifo, &txn->dma_pended); len = dma_fifo_level(&port->tx_fifo); spin_unlock_bh(&port->lock); port->stats.dropped += txn->dma_pended.len; } if (len < WAKEUP_CHARS) tty_port_tty_wakeup(&port->port); } static int fwtty_tx(struct fwtty_port *port, bool drain) { struct fwtty_peer *peer; struct fwtty_transaction *txn; struct tty_struct *tty; int n, len; tty = tty_port_tty_get(&port->port); if (!tty) return -ENOENT; rcu_read_lock(); peer = rcu_dereference(port->peer); if (!peer) { n = -EIO; goto out; } if (test_and_set_bit(IN_TX, &port->flags)) { n = -EALREADY; goto out; } /* try to write as many dma transactions out as possible */ n = -EAGAIN; while (!tty->stopped && !tty->hw_stopped && !test_bit(STOP_TX, &port->flags)) { txn = kmem_cache_alloc(fwtty_txn_cache, GFP_ATOMIC); if (!txn) { n = -ENOMEM; break; } spin_lock_bh(&port->lock); n = dma_fifo_out_pend(&port->tx_fifo, &txn->dma_pended); spin_unlock_bh(&port->lock); fwtty_dbg(port, "out: %u rem: %d\n", txn->dma_pended.len, n); if (n < 0) { kmem_cache_free(fwtty_txn_cache, txn); if (n == -EAGAIN) { ++port->stats.tx_stall; } else if (n == -ENODATA) { fwtty_profile_data(port->stats.txns, 0); } else { ++port->stats.fifo_errs; fwtty_err_ratelimited(port, "fifo err: %d\n", n); } break; } fwtty_profile_data(port->stats.txns, txn->dma_pended.len); fwtty_send_txn_async(peer, txn, TCODE_WRITE_BLOCK_REQUEST, peer->fifo_addr, txn->dma_pended.data, txn->dma_pended.len, fwtty_tx_complete, port); ++port->stats.sent; /* * Stop tx if the 'last view' of the fifo is empty or if * this is the writer and there's not enough data to bother */ if (n == 0 || (!drain && n < WRITER_MINIMUM)) break; } if (n >= 0 || n == -EAGAIN || n == -ENOMEM || n == -ENODATA) { spin_lock_bh(&port->lock); len = dma_fifo_out_level(&port->tx_fifo); if (len) { unsigned long delay = (n == -ENOMEM) ? HZ : 1; schedule_delayed_work(&port->drain, delay); } len = dma_fifo_level(&port->tx_fifo); spin_unlock_bh(&port->lock); /* wakeup the writer */ if (drain && len < WAKEUP_CHARS) tty_wakeup(tty); } clear_bit(IN_TX, &port->flags); wake_up_interruptible(&port->wait_tx); out: rcu_read_unlock(); tty_kref_put(tty); return n; } static void fwtty_drain_tx(struct work_struct *work) { struct fwtty_port *port = to_port(to_delayed_work(work), drain); fwtty_tx(port, true); } static void fwtty_write_xchar(struct fwtty_port *port, char ch) { struct fwtty_peer *peer; ++port->stats.xchars; fwtty_dbg(port, "%02x\n", ch); rcu_read_lock(); peer = rcu_dereference(port->peer); if (peer) { fwtty_send_data_async(peer, TCODE_WRITE_BLOCK_REQUEST, peer->fifo_addr, &ch, sizeof(ch), NULL, port); } rcu_read_unlock(); } struct fwtty_port *fwtty_port_get(unsigned index) { struct fwtty_port *port; if (index >= MAX_TOTAL_PORTS) return NULL; mutex_lock(&port_table_lock); port = port_table[index]; if (port) kref_get(&port->serial->kref); mutex_unlock(&port_table_lock); return port; } EXPORT_SYMBOL(fwtty_port_get); static int fwtty_ports_add(struct fw_serial *serial) { int err = -EBUSY; int i, j; if (port_table_corrupt) return err; mutex_lock(&port_table_lock); for (i = 0; i + num_ports <= MAX_TOTAL_PORTS; i += num_ports) { if (!port_table[i]) { for (j = 0; j < num_ports; ++i, ++j) { serial->ports[j]->index = i; port_table[i] = serial->ports[j]; } err = 0; break; } } mutex_unlock(&port_table_lock); return err; } static void fwserial_destroy(struct kref *kref) { struct fw_serial *serial = to_serial(kref, kref); struct fwtty_port **ports = serial->ports; int j, i = ports[0]->index; synchronize_rcu(); mutex_lock(&port_table_lock); for (j = 0; j < num_ports; ++i, ++j) { port_table_corrupt |= port_table[i] != ports[j]; WARN_ONCE(port_table_corrupt, "port_table[%d]: %p != ports[%d]: %p", i, port_table[i], j, ports[j]); port_table[i] = NULL; } mutex_unlock(&port_table_lock); for (j = 0; j < num_ports; ++j) { fw_core_remove_address_handler(&ports[j]->rx_handler); tty_port_destroy(&ports[j]->port); kfree(ports[j]); } kfree(serial); } void fwtty_port_put(struct fwtty_port *port) { kref_put(&port->serial->kref, fwserial_destroy); } EXPORT_SYMBOL(fwtty_port_put); static void fwtty_port_dtr_rts(struct tty_port *tty_port, int on) { struct fwtty_port *port = to_port(tty_port, port); fwtty_dbg(port, "on/off: %d\n", on); spin_lock_bh(&port->lock); /* Don't change carrier state if this is a console */ if (!port->port.console) { if (on) port->mctrl |= TIOCM_DTR | TIOCM_RTS; else port->mctrl &= ~(TIOCM_DTR | TIOCM_RTS); } __fwtty_write_port_status(port); spin_unlock_bh(&port->lock); } /** * fwtty_port_carrier_raised: required tty_port operation * * This port operation is polled after a tty has been opened and is waiting for * carrier detect -- see drivers/tty/tty_port:tty_port_block_til_ready(). */ static int fwtty_port_carrier_raised(struct tty_port *tty_port) { struct fwtty_port *port = to_port(tty_port, port); int rc; rc = (port->mstatus & TIOCM_CAR); fwtty_dbg(port, "%d\n", rc); return rc; } static unsigned set_termios(struct fwtty_port *port, struct tty_struct *tty) { unsigned baud, frame; baud = tty_termios_baud_rate(&tty->termios); tty_termios_encode_baud_rate(&tty->termios, baud, baud); /* compute bit count of 2 frames */ frame = 12 + ((C_CSTOPB(tty)) ? 4 : 2) + ((C_PARENB(tty)) ? 2 : 0); switch (C_CSIZE(tty)) { case CS5: frame -= (C_CSTOPB(tty)) ? 1 : 0; break; case CS6: frame += 2; break; case CS7: frame += 4; break; case CS8: frame += 6; break; } port->cps = (baud << 1) / frame; port->status_mask = UART_LSR_OE; if (_I_FLAG(tty, BRKINT | PARMRK)) port->status_mask |= UART_LSR_BI; port->ignore_mask = 0; if (I_IGNBRK(tty)) { port->ignore_mask |= UART_LSR_BI; if (I_IGNPAR(tty)) port->ignore_mask |= UART_LSR_OE; } port->write_only = !C_CREAD(tty); /* turn off echo and newline xlat if loopback */ if (port->loopback) { tty->termios.c_lflag &= ~(ECHO | ECHOE | ECHOK | ECHOKE | ECHONL | ECHOPRT | ECHOCTL); tty->termios.c_oflag &= ~ONLCR; } return baud; } static int fwtty_port_activate(struct tty_port *tty_port, struct tty_struct *tty) { struct fwtty_port *port = to_port(tty_port, port); unsigned baud; int err; set_bit(TTY_IO_ERROR, &tty->flags); err = dma_fifo_alloc(&port->tx_fifo, FWTTY_PORT_TXFIFO_LEN, cache_line_size(), port->max_payload, FWTTY_PORT_MAX_PEND_DMA, GFP_KERNEL); if (err) return err; spin_lock_bh(&port->lock); baud = set_termios(port, tty); /* if console, don't change carrier state */ if (!port->port.console) { port->mctrl = 0; if (baud != 0) port->mctrl = TIOCM_DTR | TIOCM_RTS; } if (C_CRTSCTS(tty) && ~port->mstatus & TIOCM_CTS) tty->hw_stopped = 1; __fwtty_write_port_status(port); spin_unlock_bh(&port->lock); clear_bit(TTY_IO_ERROR, &tty->flags); return 0; } /** * fwtty_port_shutdown * * Note: the tty port core ensures this is not the console and * manages TTY_IO_ERROR properly */ static void fwtty_port_shutdown(struct tty_port *tty_port) { struct fwtty_port *port = to_port(tty_port, port); /* TODO: cancel outstanding transactions */ cancel_delayed_work_sync(&port->emit_breaks); cancel_delayed_work_sync(&port->drain); spin_lock_bh(&port->lock); port->flags = 0; port->break_ctl = 0; port->overrun = 0; __fwtty_write_port_status(port); dma_fifo_free(&port->tx_fifo); spin_unlock_bh(&port->lock); } static int fwtty_open(struct tty_struct *tty, struct file *fp) { struct fwtty_port *port = tty->driver_data; return tty_port_open(&port->port, tty, fp); } static void fwtty_close(struct tty_struct *tty, struct file *fp) { struct fwtty_port *port = tty->driver_data; tty_port_close(&port->port, tty, fp); } static void fwtty_hangup(struct tty_struct *tty) { struct fwtty_port *port = tty->driver_data; tty_port_hangup(&port->port); } static void fwtty_cleanup(struct tty_struct *tty) { struct fwtty_port *port = tty->driver_data; tty->driver_data = NULL; fwtty_port_put(port); } static int fwtty_install(struct tty_driver *driver, struct tty_struct *tty) { struct fwtty_port *port = fwtty_port_get(tty->index); int err; err = tty_standard_install(driver, tty); if (!err) tty->driver_data = port; else fwtty_port_put(port); return err; } static int fwloop_install(struct tty_driver *driver, struct tty_struct *tty) { struct fwtty_port *port = fwtty_port_get(table_idx(tty->index)); int err; err = tty_standard_install(driver, tty); if (!err) tty->driver_data = port; else fwtty_port_put(port); return err; } static int fwtty_write(struct tty_struct *tty, const unsigned char *buf, int c) { struct fwtty_port *port = tty->driver_data; int n, len; fwtty_dbg(port, "%d\n", c); fwtty_profile_data(port->stats.writes, c); spin_lock_bh(&port->lock); n = dma_fifo_in(&port->tx_fifo, buf, c); len = dma_fifo_out_level(&port->tx_fifo); if (len < DRAIN_THRESHOLD) schedule_delayed_work(&port->drain, 1); spin_unlock_bh(&port->lock); if (len >= DRAIN_THRESHOLD) fwtty_tx(port, false); debug_short_write(port, c, n); return (n < 0) ? 0 : n; } static int fwtty_write_room(struct tty_struct *tty) { struct fwtty_port *port = tty->driver_data; int n; spin_lock_bh(&port->lock); n = dma_fifo_avail(&port->tx_fifo); spin_unlock_bh(&port->lock); fwtty_dbg(port, "%d\n", n); return n; } static int fwtty_chars_in_buffer(struct tty_struct *tty) { struct fwtty_port *port = tty->driver_data; int n; spin_lock_bh(&port->lock); n = dma_fifo_level(&port->tx_fifo); spin_unlock_bh(&port->lock); fwtty_dbg(port, "%d\n", n); return n; } static void fwtty_send_xchar(struct tty_struct *tty, char ch) { struct fwtty_port *port = tty->driver_data; fwtty_dbg(port, "%02x\n", ch); fwtty_write_xchar(port, ch); } static void fwtty_throttle(struct tty_struct *tty) { struct fwtty_port *port = tty->driver_data; /* * Ignore throttling (but not unthrottling). * It only makes sense to throttle when data will no longer be * accepted by the tty flip buffer. For example, it is * possible for received data to overflow the tty buffer long * before the line discipline ever has a chance to throttle the driver. * Additionally, the driver may have already completed the I/O * but the tty buffer is still emptying, so the line discipline is * throttling and unthrottling nothing. */ ++port->stats.throttled; } static void fwtty_unthrottle(struct tty_struct *tty) { struct fwtty_port *port = tty->driver_data; fwtty_dbg(port, "CRTSCTS: %d\n", C_CRTSCTS(tty) != 0); fwtty_profile_fifo(port, port->stats.unthrottle); spin_lock_bh(&port->lock); port->mctrl &= ~OOB_RX_THROTTLE; if (C_CRTSCTS(tty)) port->mctrl |= TIOCM_RTS; __fwtty_write_port_status(port); spin_unlock_bh(&port->lock); } static int check_msr_delta(struct fwtty_port *port, unsigned long mask, struct async_icount *prev) { struct async_icount now; int delta; now = port->icount; delta = ((mask & TIOCM_RNG && prev->rng != now.rng) || (mask & TIOCM_DSR && prev->dsr != now.dsr) || (mask & TIOCM_CAR && prev->dcd != now.dcd) || (mask & TIOCM_CTS && prev->cts != now.cts)); *prev = now; return delta; } static int wait_msr_change(struct fwtty_port *port, unsigned long mask) { struct async_icount prev; prev = port->icount; return wait_event_interruptible(port->port.delta_msr_wait, check_msr_delta(port, mask, &prev)); } static int get_serial_info(struct fwtty_port *port, struct serial_struct __user *info) { struct serial_struct tmp; memset(&tmp, 0, sizeof(tmp)); tmp.type = PORT_UNKNOWN; tmp.line = port->port.tty->index; tmp.flags = port->port.flags; tmp.xmit_fifo_size = FWTTY_PORT_TXFIFO_LEN; tmp.baud_base = 400000000; tmp.close_delay = port->port.close_delay; return (copy_to_user(info, &tmp, sizeof(*info))) ? -EFAULT : 0; } static int set_serial_info(struct fwtty_port *port, struct serial_struct __user *info) { struct serial_struct tmp; if (copy_from_user(&tmp, info, sizeof(tmp))) return -EFAULT; if (tmp.irq != 0 || tmp.port != 0 || tmp.custom_divisor != 0 || tmp.baud_base != 400000000) return -EPERM; if (!capable(CAP_SYS_ADMIN)) { if (((tmp.flags & ~ASYNC_USR_MASK) != (port->port.flags & ~ASYNC_USR_MASK))) return -EPERM; } else { port->port.close_delay = tmp.close_delay * HZ / 100; } return 0; } static int fwtty_ioctl(struct tty_struct *tty, unsigned cmd, unsigned long arg) { struct fwtty_port *port = tty->driver_data; int err; switch (cmd) { case TIOCGSERIAL: mutex_lock(&port->port.mutex); err = get_serial_info(port, (void __user *)arg); mutex_unlock(&port->port.mutex); break; case TIOCSSERIAL: mutex_lock(&port->port.mutex); err = set_serial_info(port, (void __user *)arg); mutex_unlock(&port->port.mutex); break; case TIOCMIWAIT: err = wait_msr_change(port, arg); break; default: err = -ENOIOCTLCMD; } return err; } static void fwtty_set_termios(struct tty_struct *tty, struct ktermios *old) { struct fwtty_port *port = tty->driver_data; unsigned baud; spin_lock_bh(&port->lock); baud = set_termios(port, tty); if ((baud == 0) && (old->c_cflag & CBAUD)) { port->mctrl &= ~(TIOCM_DTR | TIOCM_RTS); } else if ((baud != 0) && !(old->c_cflag & CBAUD)) { if (C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags)) port->mctrl |= TIOCM_DTR | TIOCM_RTS; else port->mctrl |= TIOCM_DTR; } __fwtty_write_port_status(port); spin_unlock_bh(&port->lock); if (old->c_cflag & CRTSCTS) { if (!C_CRTSCTS(tty)) { tty->hw_stopped = 0; fwtty_restart_tx(port); } } else if (C_CRTSCTS(tty) && ~port->mstatus & TIOCM_CTS) { tty->hw_stopped = 1; } } /** * fwtty_break_ctl - start/stop sending breaks * * Signals the remote to start or stop generating simulated breaks. * First, stop dequeueing from the fifo and wait for writer/drain to leave tx * before signalling the break line status. This guarantees any pending rx will * be queued to the line discipline before break is simulated on the remote. * Conversely, turning off break_ctl requires signalling the line status change, * then enabling tx. */ static int fwtty_break_ctl(struct tty_struct *tty, int state) { struct fwtty_port *port = tty->driver_data; long ret; fwtty_dbg(port, "%d\n", state); if (state == -1) { set_bit(STOP_TX, &port->flags); ret = wait_event_interruptible_timeout(port->wait_tx, !test_bit(IN_TX, &port->flags), 10); if (ret == 0 || ret == -ERESTARTSYS) { clear_bit(STOP_TX, &port->flags); fwtty_restart_tx(port); return -EINTR; } } spin_lock_bh(&port->lock); port->break_ctl = (state == -1); __fwtty_write_port_status(port); spin_unlock_bh(&port->lock); if (state == 0) { spin_lock_bh(&port->lock); dma_fifo_reset(&port->tx_fifo); clear_bit(STOP_TX, &port->flags); spin_unlock_bh(&port->lock); } return 0; } static int fwtty_tiocmget(struct tty_struct *tty) { struct fwtty_port *port = tty->driver_data; unsigned tiocm; spin_lock_bh(&port->lock); tiocm = (port->mctrl & MCTRL_MASK) | (port->mstatus & ~MCTRL_MASK); spin_unlock_bh(&port->lock); fwtty_dbg(port, "%x\n", tiocm); return tiocm; } static int fwtty_tiocmset(struct tty_struct *tty, unsigned set, unsigned clear) { struct fwtty_port *port = tty->driver_data; fwtty_dbg(port, "set: %x clear: %x\n", set, clear); /* TODO: simulate loopback if TIOCM_LOOP set */ spin_lock_bh(&port->lock); port->mctrl &= ~(clear & MCTRL_MASK & 0xffff); port->mctrl |= set & MCTRL_MASK & 0xffff; __fwtty_write_port_status(port); spin_unlock_bh(&port->lock); return 0; } static int fwtty_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { struct fwtty_port *port = tty->driver_data; struct stats stats; memcpy(&stats, &port->stats, sizeof(stats)); if (port->port.console) (*port->fwcon_ops->stats)(&stats, port->con_data); icount->cts = port->icount.cts; icount->dsr = port->icount.dsr; icount->rng = port->icount.rng; icount->dcd = port->icount.dcd; icount->rx = port->icount.rx; icount->tx = port->icount.tx + stats.xchars; icount->frame = port->icount.frame; icount->overrun = port->icount.overrun; icount->parity = port->icount.parity; icount->brk = port->icount.brk; icount->buf_overrun = port->icount.overrun; return 0; } static void fwtty_proc_show_port(struct seq_file *m, struct fwtty_port *port) { struct stats stats; memcpy(&stats, &port->stats, sizeof(stats)); if (port->port.console) (*port->fwcon_ops->stats)(&stats, port->con_data); seq_printf(m, " addr:%012llx tx:%d rx:%d", port->rx_handler.offset, port->icount.tx + stats.xchars, port->icount.rx); seq_printf(m, " cts:%d dsr:%d rng:%d dcd:%d", port->icount.cts, port->icount.dsr, port->icount.rng, port->icount.dcd); seq_printf(m, " fe:%d oe:%d pe:%d brk:%d", port->icount.frame, port->icount.overrun, port->icount.parity, port->icount.brk); } static void fwtty_debugfs_show_port(struct seq_file *m, struct fwtty_port *port) { struct stats stats; memcpy(&stats, &port->stats, sizeof(stats)); if (port->port.console) (*port->fwcon_ops->stats)(&stats, port->con_data); seq_printf(m, " dr:%d st:%d err:%d lost:%d", stats.dropped, stats.tx_stall, stats.fifo_errs, stats.lost); seq_printf(m, " pkts:%d thr:%d", stats.sent, stats.throttled); if (port->port.console) { seq_puts(m, "\n "); (*port->fwcon_ops->proc_show)(m, port->con_data); } fwtty_dump_profile(m, &port->stats); } static void fwtty_debugfs_show_peer(struct seq_file *m, struct fwtty_peer *peer) { int generation = peer->generation; smp_rmb(); seq_printf(m, " %s:", dev_name(&peer->unit->device)); seq_printf(m, " node:%04x gen:%d", peer->node_id, generation); seq_printf(m, " sp:%d max:%d guid:%016llx", peer->speed, peer->max_payload, (unsigned long long) peer->guid); seq_printf(m, " mgmt:%012llx", (unsigned long long) peer->mgmt_addr); seq_printf(m, " addr:%012llx", (unsigned long long) peer->status_addr); seq_putc(m, '\n'); } static int fwtty_proc_show(struct seq_file *m, void *v) { struct fwtty_port *port; int i; seq_puts(m, "fwserinfo: 1.0 driver: 1.0\n"); for (i = 0; i < MAX_TOTAL_PORTS && (port = fwtty_port_get(i)); ++i) { seq_printf(m, "%2d:", i); if (capable(CAP_SYS_ADMIN)) fwtty_proc_show_port(m, port); fwtty_port_put(port); seq_puts(m, "\n"); } return 0; } static int fwtty_debugfs_stats_show(struct seq_file *m, void *v) { struct fw_serial *serial = m->private; struct fwtty_port *port; int i; for (i = 0; i < num_ports; ++i) { port = fwtty_port_get(serial->ports[i]->index); if (port) { seq_printf(m, "%2d:", port->index); fwtty_proc_show_port(m, port); fwtty_debugfs_show_port(m, port); fwtty_port_put(port); seq_puts(m, "\n"); } } return 0; } static int fwtty_debugfs_peers_show(struct seq_file *m, void *v) { struct fw_serial *serial = m->private; struct fwtty_peer *peer; rcu_read_lock(); seq_printf(m, "card: %s guid: %016llx\n", dev_name(serial->card->device), (unsigned long long) serial->card->guid); list_for_each_entry_rcu(peer, &serial->peer_list, list) fwtty_debugfs_show_peer(m, peer); rcu_read_unlock(); return 0; } static int fwtty_proc_open(struct inode *inode, struct file *fp) { return single_open(fp, fwtty_proc_show, NULL); } static int fwtty_stats_open(struct inode *inode, struct file *fp) { return single_open(fp, fwtty_debugfs_stats_show, inode->i_private); } static int fwtty_peers_open(struct inode *inode, struct file *fp) { return single_open(fp, fwtty_debugfs_peers_show, inode->i_private); } static const struct file_operations fwtty_stats_fops = { .owner = THIS_MODULE, .open = fwtty_stats_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations fwtty_peers_fops = { .owner = THIS_MODULE, .open = fwtty_peers_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations fwtty_proc_fops = { .owner = THIS_MODULE, .open = fwtty_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct tty_port_operations fwtty_port_ops = { .dtr_rts = fwtty_port_dtr_rts, .carrier_raised = fwtty_port_carrier_raised, .shutdown = fwtty_port_shutdown, .activate = fwtty_port_activate, }; static const struct tty_operations fwtty_ops = { .open = fwtty_open, .close = fwtty_close, .hangup = fwtty_hangup, .cleanup = fwtty_cleanup, .install = fwtty_install, .write = fwtty_write, .write_room = fwtty_write_room, .chars_in_buffer = fwtty_chars_in_buffer, .send_xchar = fwtty_send_xchar, .throttle = fwtty_throttle, .unthrottle = fwtty_unthrottle, .ioctl = fwtty_ioctl, .set_termios = fwtty_set_termios, .break_ctl = fwtty_break_ctl, .tiocmget = fwtty_tiocmget, .tiocmset = fwtty_tiocmset, .get_icount = fwtty_get_icount, .proc_fops = &fwtty_proc_fops, }; static const struct tty_operations fwloop_ops = { .open = fwtty_open, .close = fwtty_close, .hangup = fwtty_hangup, .cleanup = fwtty_cleanup, .install = fwloop_install, .write = fwtty_write, .write_room = fwtty_write_room, .chars_in_buffer = fwtty_chars_in_buffer, .send_xchar = fwtty_send_xchar, .throttle = fwtty_throttle, .unthrottle = fwtty_unthrottle, .ioctl = fwtty_ioctl, .set_termios = fwtty_set_termios, .break_ctl = fwtty_break_ctl, .tiocmget = fwtty_tiocmget, .tiocmset = fwtty_tiocmset, .get_icount = fwtty_get_icount, }; static inline int mgmt_pkt_expected_len(__be16 code) { static const struct fwserial_mgmt_pkt pkt; switch (be16_to_cpu(code)) { case FWSC_VIRT_CABLE_PLUG: return sizeof(pkt.hdr) + sizeof(pkt.plug_req); case FWSC_VIRT_CABLE_PLUG_RSP: /* | FWSC_RSP_OK */ return sizeof(pkt.hdr) + sizeof(pkt.plug_rsp); case FWSC_VIRT_CABLE_UNPLUG: case FWSC_VIRT_CABLE_UNPLUG_RSP: case FWSC_VIRT_CABLE_PLUG_RSP | FWSC_RSP_NACK: case FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK: return sizeof(pkt.hdr); default: return -1; } } static inline void fill_plug_params(struct virt_plug_params *params, struct fwtty_port *port) { u64 status_addr = port->rx_handler.offset; u64 fifo_addr = port->rx_handler.offset + 4; size_t fifo_len = port->rx_handler.length - 4; params->status_hi = cpu_to_be32(status_addr >> 32); params->status_lo = cpu_to_be32(status_addr); params->fifo_hi = cpu_to_be32(fifo_addr >> 32); params->fifo_lo = cpu_to_be32(fifo_addr); params->fifo_len = cpu_to_be32(fifo_len); } static inline void fill_plug_req(struct fwserial_mgmt_pkt *pkt, struct fwtty_port *port) { pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG); pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code)); fill_plug_params(&pkt->plug_req, port); } static inline void fill_plug_rsp_ok(struct fwserial_mgmt_pkt *pkt, struct fwtty_port *port) { pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG_RSP); pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code)); fill_plug_params(&pkt->plug_rsp, port); } static inline void fill_plug_rsp_nack(struct fwserial_mgmt_pkt *pkt) { pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG_RSP | FWSC_RSP_NACK); pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code)); } static inline void fill_unplug_req(struct fwserial_mgmt_pkt *pkt) { pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG); pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code)); } static inline void fill_unplug_rsp_nack(struct fwserial_mgmt_pkt *pkt) { pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK); pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code)); } static inline void fill_unplug_rsp_ok(struct fwserial_mgmt_pkt *pkt) { pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP); pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code)); } static void fwserial_virt_plug_complete(struct fwtty_peer *peer, struct virt_plug_params *params) { struct fwtty_port *port = peer->port; peer->status_addr = be32_to_u64(params->status_hi, params->status_lo); peer->fifo_addr = be32_to_u64(params->fifo_hi, params->fifo_lo); peer->fifo_len = be32_to_cpu(params->fifo_len); peer_set_state(peer, FWPS_ATTACHED); /* reconfigure tx_fifo optimally for this peer */ spin_lock_bh(&port->lock); port->max_payload = min(peer->max_payload, peer->fifo_len); dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload); spin_unlock_bh(&peer->port->lock); if (port->port.console && port->fwcon_ops->notify != NULL) (*port->fwcon_ops->notify)(FWCON_NOTIFY_ATTACH, port->con_data); fwtty_info(&peer->unit, "peer (guid:%016llx) connected on %s\n", (unsigned long long)peer->guid, dev_name(port->device)); } static inline int fwserial_send_mgmt_sync(struct fwtty_peer *peer, struct fwserial_mgmt_pkt *pkt) { int generation; int rcode, tries = 5; do { generation = peer->generation; smp_rmb(); rcode = fw_run_transaction(peer->serial->card, TCODE_WRITE_BLOCK_REQUEST, peer->node_id, generation, peer->speed, peer->mgmt_addr, pkt, be16_to_cpu(pkt->hdr.len)); if (rcode == RCODE_BUSY || rcode == RCODE_SEND_ERROR || rcode == RCODE_GENERATION) { fwtty_dbg(&peer->unit, "mgmt write error: %d\n", rcode); continue; } else { break; } } while (--tries > 0); return rcode; } /** * fwserial_claim_port - attempt to claim port @ index for peer * * Returns ptr to claimed port or error code (as ERR_PTR()) * Can sleep - must be called from process context */ static struct fwtty_port *fwserial_claim_port(struct fwtty_peer *peer, int index) { struct fwtty_port *port; if (index < 0 || index >= num_ports) return ERR_PTR(-EINVAL); /* must guarantee that previous port releases have completed */ synchronize_rcu(); port = peer->serial->ports[index]; spin_lock_bh(&port->lock); if (!rcu_access_pointer(port->peer)) rcu_assign_pointer(port->peer, peer); else port = ERR_PTR(-EBUSY); spin_unlock_bh(&port->lock); return port; } /** * fwserial_find_port - find avail port and claim for peer * * Returns ptr to claimed port or NULL if none avail * Can sleep - must be called from process context */ static struct fwtty_port *fwserial_find_port(struct fwtty_peer *peer) { struct fwtty_port **ports = peer->serial->ports; int i; /* must guarantee that previous port releases have completed */ synchronize_rcu(); /* TODO: implement optional GUID-to-specific port # matching */ /* find an unattached port (but not the loopback port, if present) */ for (i = 0; i < num_ttys; ++i) { spin_lock_bh(&ports[i]->lock); if (!ports[i]->peer) { /* claim port */ rcu_assign_pointer(ports[i]->peer, peer); spin_unlock_bh(&ports[i]->lock); return ports[i]; } spin_unlock_bh(&ports[i]->lock); } return NULL; } static void fwserial_release_port(struct fwtty_port *port, bool reset) { /* drop carrier (and all other line status) */ if (reset) fwtty_update_port_status(port, 0); spin_lock_bh(&port->lock); /* reset dma fifo max transmission size back to S100 */ port->max_payload = link_speed_to_max_payload(SCODE_100); dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload); RCU_INIT_POINTER(port->peer, NULL); spin_unlock_bh(&port->lock); if (port->port.console && port->fwcon_ops->notify != NULL) (*port->fwcon_ops->notify)(FWCON_NOTIFY_DETACH, port->con_data); } static void fwserial_plug_timeout(unsigned long data) { struct fwtty_peer *peer = (struct fwtty_peer *)data; struct fwtty_port *port; spin_lock_bh(&peer->lock); if (peer->state != FWPS_PLUG_PENDING) { spin_unlock_bh(&peer->lock); return; } port = peer_revert_state(peer); spin_unlock_bh(&peer->lock); if (port) fwserial_release_port(port, false); } /** * fwserial_connect_peer - initiate virtual cable with peer * * Returns 0 if VIRT_CABLE_PLUG request was successfully sent, * otherwise error code. Must be called from process context. */ static int fwserial_connect_peer(struct fwtty_peer *peer) { struct fwtty_port *port; struct fwserial_mgmt_pkt *pkt; int err, rcode; pkt = kmalloc(sizeof(*pkt), GFP_KERNEL); if (!pkt) return -ENOMEM; port = fwserial_find_port(peer); if (!port) { fwtty_err(&peer->unit, "avail ports in use\n"); err = -EBUSY; goto free_pkt; } spin_lock_bh(&peer->lock); /* only initiate VIRT_CABLE_PLUG if peer is currently not attached */ if (peer->state != FWPS_NOT_ATTACHED) { err = -EBUSY; goto release_port; } peer->port = port; peer_set_state(peer, FWPS_PLUG_PENDING); fill_plug_req(pkt, peer->port); setup_timer(&peer->timer, fwserial_plug_timeout, (unsigned long)peer); mod_timer(&peer->timer, jiffies + VIRT_CABLE_PLUG_TIMEOUT); spin_unlock_bh(&peer->lock); rcode = fwserial_send_mgmt_sync(peer, pkt); spin_lock_bh(&peer->lock); if (peer->state == FWPS_PLUG_PENDING && rcode != RCODE_COMPLETE) { if (rcode == RCODE_CONFLICT_ERROR) err = -EAGAIN; else err = -EIO; goto cancel_timer; } spin_unlock_bh(&peer->lock); kfree(pkt); return 0; cancel_timer: del_timer(&peer->timer); peer_revert_state(peer); release_port: spin_unlock_bh(&peer->lock); fwserial_release_port(port, false); free_pkt: kfree(pkt); return err; } /** * fwserial_close_port - * HUP the tty (if the tty exists) and unregister the tty device. * Only used by the unit driver upon unit removal to disconnect and * cleanup all attached ports * * The port reference is put by fwtty_cleanup (if a reference was * ever taken). */ static void fwserial_close_port(struct tty_driver *driver, struct fwtty_port *port) { struct tty_struct *tty; mutex_lock(&port->port.mutex); tty = tty_port_tty_get(&port->port); if (tty) { tty_vhangup(tty); tty_kref_put(tty); } mutex_unlock(&port->port.mutex); if (driver == fwloop_driver) tty_unregister_device(driver, loop_idx(port)); else tty_unregister_device(driver, port->index); } /** * fwserial_lookup - finds first fw_serial associated with card * @card: fw_card to match * * NB: caller must be holding fwserial_list_mutex */ static struct fw_serial *fwserial_lookup(struct fw_card *card) { struct fw_serial *serial; list_for_each_entry(serial, &fwserial_list, list) { if (card == serial->card) return serial; } return NULL; } /** * __fwserial_lookup_rcu - finds first fw_serial associated with card * @card: fw_card to match * * NB: caller must be inside rcu_read_lock() section */ static struct fw_serial *__fwserial_lookup_rcu(struct fw_card *card) { struct fw_serial *serial; list_for_each_entry_rcu(serial, &fwserial_list, list) { if (card == serial->card) return serial; } return NULL; } /** * __fwserial_peer_by_node_id - finds a peer matching the given generation + id * * If a matching peer could not be found for the specified generation/node id, * this could be because: * a) the generation has changed and one of the nodes hasn't updated yet * b) the remote node has created its remote unit device before this * local node has created its corresponding remote unit device * In either case, the remote node should retry * * Note: caller must be in rcu_read_lock() section */ static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card, int generation, int id) { struct fw_serial *serial; struct fwtty_peer *peer; serial = __fwserial_lookup_rcu(card); if (!serial) { /* * Something is very wrong - there should be a matching * fw_serial structure for every fw_card. Maybe the remote node * has created its remote unit device before this driver has * been probed for any unit devices... */ fwtty_err(card, "unknown card (guid %016llx)\n", (unsigned long long) card->guid); return NULL; } list_for_each_entry_rcu(peer, &serial->peer_list, list) { int g = peer->generation; smp_rmb(); if (generation == g && id == peer->node_id) return peer; } return NULL; } #ifdef DEBUG static void __dump_peer_list(struct fw_card *card) { struct fw_serial *serial; struct fwtty_peer *peer; serial = __fwserial_lookup_rcu(card); if (!serial) return; list_for_each_entry_rcu(peer, &serial->peer_list, list) { int g = peer->generation; smp_rmb(); fwtty_dbg(card, "peer(%d:%x) guid: %016llx\n", g, peer->node_id, (unsigned long long) peer->guid); } } #else #define __dump_peer_list(s) #endif static void fwserial_auto_connect(struct work_struct *work) { struct fwtty_peer *peer = to_peer(to_delayed_work(work), connect); int err; err = fwserial_connect_peer(peer); if (err == -EAGAIN && ++peer->connect_retries < MAX_CONNECT_RETRIES) schedule_delayed_work(&peer->connect, CONNECT_RETRY_DELAY); } static void fwserial_peer_workfn(struct work_struct *work) { struct fwtty_peer *peer = to_peer(work, work); peer->workfn(work); } /** * fwserial_add_peer - add a newly probed 'serial' unit device as a 'peer' * @serial: aggregate representing the specific fw_card to add the peer to * @unit: 'peer' to create and add to peer_list of serial * * Adds a 'peer' (ie, a local or remote 'serial' unit device) to the list of * peers for a specific fw_card. Optionally, auto-attach this peer to an * available tty port. This function is called either directly or indirectly * as a result of a 'serial' unit device being created & probed. * * Note: this function is serialized with fwserial_remove_peer() by the * fwserial_list_mutex held in fwserial_probe(). * * A 1:1 correspondence between an fw_unit and an fwtty_peer is maintained * via the dev_set_drvdata() for the device of the fw_unit. */ static int fwserial_add_peer(struct fw_serial *serial, struct fw_unit *unit) { struct device *dev = &unit->device; struct fw_device *parent = fw_parent_device(unit); struct fwtty_peer *peer; struct fw_csr_iterator ci; int key, val; int generation; peer = kzalloc(sizeof(*peer), GFP_KERNEL); if (!peer) return -ENOMEM; peer_set_state(peer, FWPS_NOT_ATTACHED); dev_set_drvdata(dev, peer); peer->unit = unit; peer->guid = (u64)parent->config_rom[3] << 32 | parent->config_rom[4]; peer->speed = parent->max_speed; peer->max_payload = min(device_max_receive(parent), link_speed_to_max_payload(peer->speed)); generation = parent->generation; smp_rmb(); peer->node_id = parent->node_id; smp_wmb(); peer->generation = generation; /* retrieve the mgmt bus addr from the unit directory */ fw_csr_iterator_init(&ci, unit->directory); while (fw_csr_iterator_next(&ci, &key, &val)) { if (key == (CSR_OFFSET | CSR_DEPENDENT_INFO)) { peer->mgmt_addr = CSR_REGISTER_BASE + 4 * val; break; } } if (peer->mgmt_addr == 0ULL) { /* * No mgmt address effectively disables VIRT_CABLE_PLUG - * this peer will not be able to attach to a remote */ peer_set_state(peer, FWPS_NO_MGMT_ADDR); } spin_lock_init(&peer->lock); peer->port = NULL; init_timer(&peer->timer); INIT_WORK(&peer->work, fwserial_peer_workfn); INIT_DELAYED_WORK(&peer->connect, fwserial_auto_connect); /* associate peer with specific fw_card */ peer->serial = serial; list_add_rcu(&peer->list, &serial->peer_list); fwtty_info(&peer->unit, "peer added (guid:%016llx)\n", (unsigned long long)peer->guid); /* identify the local unit & virt cable to loopback port */ if (parent->is_local) { serial->self = peer; if (create_loop_dev) { struct fwtty_port *port; port = fwserial_claim_port(peer, num_ttys); if (!IS_ERR(port)) { struct virt_plug_params params; spin_lock_bh(&peer->lock); peer->port = port; fill_plug_params(&params, port); fwserial_virt_plug_complete(peer, &params); spin_unlock_bh(&peer->lock); fwtty_write_port_status(port); } } } else if (auto_connect) { /* auto-attach to remote units only (if policy allows) */ schedule_delayed_work(&peer->connect, 1); } return 0; } /** * fwserial_remove_peer - remove a 'serial' unit device as a 'peer' * * Remove a 'peer' from its list of peers. This function is only * called by fwserial_remove() on bus removal of the unit device. * * Note: this function is serialized with fwserial_add_peer() by the * fwserial_list_mutex held in fwserial_remove(). */ static void fwserial_remove_peer(struct fwtty_peer *peer) { struct fwtty_port *port; spin_lock_bh(&peer->lock); peer_set_state(peer, FWPS_GONE); spin_unlock_bh(&peer->lock); cancel_delayed_work_sync(&peer->connect); cancel_work_sync(&peer->work); spin_lock_bh(&peer->lock); /* if this unit is the local unit, clear link */ if (peer == peer->serial->self) peer->serial->self = NULL; /* cancel the request timeout timer (if running) */ del_timer(&peer->timer); port = peer->port; peer->port = NULL; list_del_rcu(&peer->list); fwtty_info(&peer->unit, "peer removed (guid:%016llx)\n", (unsigned long long)peer->guid); spin_unlock_bh(&peer->lock); if (port) fwserial_release_port(port, true); synchronize_rcu(); kfree(peer); } /** * fwserial_create - init everything to create TTYs for a specific fw_card * @unit: fw_unit for first 'serial' unit device probed for this fw_card * * This function inits the aggregate structure (an fw_serial instance) * used to manage the TTY ports registered by a specific fw_card. Also, the * unit device is added as the first 'peer'. * * This unit device may represent a local unit device (as specified by the * config ROM unit directory) or it may represent a remote unit device * (as specified by the reading of the remote node's config ROM). * * Returns 0 to indicate "ownership" of the unit device, or a negative errno * value to indicate which error. */ static int fwserial_create(struct fw_unit *unit) { struct fw_device *parent = fw_parent_device(unit); struct fw_card *card = parent->card; struct fw_serial *serial; struct fwtty_port *port; struct device *tty_dev; int i, j; int err; serial = kzalloc(sizeof(*serial), GFP_KERNEL); if (!serial) return -ENOMEM; kref_init(&serial->kref); serial->card = card; INIT_LIST_HEAD(&serial->peer_list); for (i = 0; i < num_ports; ++i) { port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) { err = -ENOMEM; goto free_ports; } tty_port_init(&port->port); port->index = FWTTY_INVALID_INDEX; port->port.ops = &fwtty_port_ops; port->serial = serial; tty_buffer_set_limit(&port->port, 128 * 1024); spin_lock_init(&port->lock); INIT_DELAYED_WORK(&port->drain, fwtty_drain_tx); INIT_DELAYED_WORK(&port->emit_breaks, fwtty_emit_breaks); INIT_WORK(&port->hangup, fwtty_do_hangup); init_waitqueue_head(&port->wait_tx); port->max_payload = link_speed_to_max_payload(SCODE_100); dma_fifo_init(&port->tx_fifo); RCU_INIT_POINTER(port->peer, NULL); serial->ports[i] = port; /* get unique bus addr region for port's status & recv fifo */ port->rx_handler.length = FWTTY_PORT_RXFIFO_LEN + 4; port->rx_handler.address_callback = fwtty_port_handler; port->rx_handler.callback_data = port; /* * XXX: use custom memory region above cpu physical memory addrs * this will ease porting to 64-bit firewire adapters */ err = fw_core_add_address_handler(&port->rx_handler, &fw_high_memory_region); if (err) { kfree(port); goto free_ports; } } /* preserve i for error cleanup */ err = fwtty_ports_add(serial); if (err) { fwtty_err(&unit, "no space in port table\n"); goto free_ports; } for (j = 0; j < num_ttys; ++j) { tty_dev = tty_port_register_device(&serial->ports[j]->port, fwtty_driver, serial->ports[j]->index, card->device); if (IS_ERR(tty_dev)) { err = PTR_ERR(tty_dev); fwtty_err(&unit, "register tty device error (%d)\n", err); goto unregister_ttys; } serial->ports[j]->device = tty_dev; } /* preserve j for error cleanup */ if (create_loop_dev) { struct device *loop_dev; loop_dev = tty_port_register_device(&serial->ports[j]->port, fwloop_driver, loop_idx(serial->ports[j]), card->device); if (IS_ERR(loop_dev)) { err = PTR_ERR(loop_dev); fwtty_err(&unit, "create loop device failed (%d)\n", err); goto unregister_ttys; } serial->ports[j]->device = loop_dev; serial->ports[j]->loopback = true; } if (!IS_ERR_OR_NULL(fwserial_debugfs)) { serial->debugfs = debugfs_create_dir(dev_name(&unit->device), fwserial_debugfs); if (!IS_ERR_OR_NULL(serial->debugfs)) { debugfs_create_file("peers", 0444, serial->debugfs, serial, &fwtty_peers_fops); debugfs_create_file("stats", 0444, serial->debugfs, serial, &fwtty_stats_fops); } } list_add_rcu(&serial->list, &fwserial_list); fwtty_notice(&unit, "TTY over FireWire on device %s (guid %016llx)\n", dev_name(card->device), (unsigned long long) card->guid); err = fwserial_add_peer(serial, unit); if (!err) return 0; fwtty_err(&unit, "unable to add peer unit device (%d)\n", err); /* fall-through to error processing */ debugfs_remove_recursive(serial->debugfs); list_del_rcu(&serial->list); if (create_loop_dev) tty_unregister_device(fwloop_driver, loop_idx(serial->ports[j])); unregister_ttys: for (--j; j >= 0; --j) tty_unregister_device(fwtty_driver, serial->ports[j]->index); kref_put(&serial->kref, fwserial_destroy); return err; free_ports: for (--i; i >= 0; --i) { tty_port_destroy(&serial->ports[i]->port); kfree(serial->ports[i]); } kfree(serial); return err; } /** * fwserial_probe: bus probe function for firewire 'serial' unit devices * * A 'serial' unit device is created and probed as a result of: * - declaring a ieee1394 bus id table for 'devices' matching a fabricated * 'serial' unit specifier id * - adding a unit directory to the config ROM(s) for a 'serial' unit * * The firewire core registers unit devices by enumerating unit directories * of a node's config ROM after reading the config ROM when a new node is * added to the bus topology after a bus reset. * * The practical implications of this are: * - this probe is called for both local and remote nodes that have a 'serial' * unit directory in their config ROM (that matches the specifiers in * fwserial_id_table). * - no specific order is enforced for local vs. remote unit devices * * This unit driver copes with the lack of specific order in the same way the * firewire net driver does -- each probe, for either a local or remote unit * device, is treated as a 'peer' (has a struct fwtty_peer instance) and the * first peer created for a given fw_card (tracked by the global fwserial_list) * creates the underlying TTYs (aggregated in a fw_serial instance). * * NB: an early attempt to differentiate local & remote unit devices by creating * peers only for remote units and fw_serial instances (with their * associated TTY devices) only for local units was discarded. Managing * the peer lifetimes on device removal proved too complicated. * * fwserial_probe/fwserial_remove are effectively serialized by the * fwserial_list_mutex. This is necessary because the addition of the first peer * for a given fw_card will trigger the creation of the fw_serial for that * fw_card, which must not simultaneously contend with the removal of the * last peer for a given fw_card triggering the destruction of the same * fw_serial for the same fw_card. */ static int fwserial_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) { struct fw_serial *serial; int err; mutex_lock(&fwserial_list_mutex); serial = fwserial_lookup(fw_parent_device(unit)->card); if (!serial) err = fwserial_create(unit); else err = fwserial_add_peer(serial, unit); mutex_unlock(&fwserial_list_mutex); return err; } /** * fwserial_remove: bus removal function for firewire 'serial' unit devices * * The corresponding 'peer' for this unit device is removed from the list of * peers for the associated fw_serial (which has a 1:1 correspondence with a * specific fw_card). If this is the last peer being removed, then trigger * the destruction of the underlying TTYs. */ static void fwserial_remove(struct fw_unit *unit) { struct fwtty_peer *peer = dev_get_drvdata(&unit->device); struct fw_serial *serial = peer->serial; int i; mutex_lock(&fwserial_list_mutex); fwserial_remove_peer(peer); if (list_empty(&serial->peer_list)) { /* unlink from the fwserial_list here */ list_del_rcu(&serial->list); debugfs_remove_recursive(serial->debugfs); for (i = 0; i < num_ttys; ++i) fwserial_close_port(fwtty_driver, serial->ports[i]); if (create_loop_dev) fwserial_close_port(fwloop_driver, serial->ports[i]); kref_put(&serial->kref, fwserial_destroy); } mutex_unlock(&fwserial_list_mutex); } /** * fwserial_update: bus update function for 'firewire' serial unit devices * * Updates the new node_id and bus generation for this peer. Note that locking * is unnecessary; but careful memory barrier usage is important to enforce the * load and store order of generation & node_id. * * The fw-core orders the write of node_id before generation in the parent * fw_device to ensure that a stale node_id cannot be used with a current * bus generation. So the generation value must be read before the node_id. * * In turn, this orders the write of node_id before generation in the peer to * also ensure a stale node_id cannot be used with a current bus generation. */ static void fwserial_update(struct fw_unit *unit) { struct fw_device *parent = fw_parent_device(unit); struct fwtty_peer *peer = dev_get_drvdata(&unit->device); int generation; generation = parent->generation; smp_rmb(); peer->node_id = parent->node_id; smp_wmb(); peer->generation = generation; } static const struct ieee1394_device_id fwserial_id_table[] = { { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = LINUX_VENDOR_ID, .version = FWSERIAL_VERSION, }, { } }; static struct fw_driver fwserial_driver = { .driver = { .owner = THIS_MODULE, .name = KBUILD_MODNAME, .bus = &fw_bus_type, }, .probe = fwserial_probe, .update = fwserial_update, .remove = fwserial_remove, .id_table = fwserial_id_table, }; #define FW_UNIT_SPECIFIER(id) ((CSR_SPECIFIER_ID << 24) | (id)) #define FW_UNIT_VERSION(ver) ((CSR_VERSION << 24) | (ver)) #define FW_UNIT_ADDRESS(ofs) (((CSR_OFFSET | CSR_DEPENDENT_INFO) << 24) \ | (((ofs) - CSR_REGISTER_BASE) >> 2)) /* XXX: config ROM definitons could be improved with semi-automated offset * and length calculation */ #define FW_ROM_LEN(quads) ((quads) << 16) #define FW_ROM_DESCRIPTOR(ofs) (((CSR_LEAF | CSR_DESCRIPTOR) << 24) | (ofs)) struct fwserial_unit_directory_data { u32 len_crc; u32 unit_specifier; u32 unit_sw_version; u32 unit_addr_offset; u32 desc1_ofs; u32 desc1_len_crc; u32 desc1_data[5]; } __packed; static struct fwserial_unit_directory_data fwserial_unit_directory_data = { .len_crc = FW_ROM_LEN(4), .unit_specifier = FW_UNIT_SPECIFIER(LINUX_VENDOR_ID), .unit_sw_version = FW_UNIT_VERSION(FWSERIAL_VERSION), .desc1_ofs = FW_ROM_DESCRIPTOR(1), .desc1_len_crc = FW_ROM_LEN(5), .desc1_data = { 0x00000000, /* type = text */ 0x00000000, /* enc = ASCII, lang EN */ 0x4c696e75, /* 'Linux TTY' */ 0x78205454, 0x59000000, }, }; static struct fw_descriptor fwserial_unit_directory = { .length = sizeof(fwserial_unit_directory_data) / sizeof(u32), .key = (CSR_DIRECTORY | CSR_UNIT) << 24, .data = (u32 *)&fwserial_unit_directory_data, }; /* * The management address is in the unit space region but above other known * address users (to keep wild writes from causing havoc) */ static const struct fw_address_region fwserial_mgmt_addr_region = { .start = CSR_REGISTER_BASE + 0x1e0000ULL, .end = 0x1000000000000ULL, }; static struct fw_address_handler fwserial_mgmt_addr_handler; /** * fwserial_handle_plug_req - handle VIRT_CABLE_PLUG request work * @work: ptr to peer->work * * Attempts to complete the VIRT_CABLE_PLUG handshake sequence for this peer. * * This checks for a collided request-- ie, that a VIRT_CABLE_PLUG request was * already sent to this peer. If so, the collision is resolved by comparing * guid values; the loser sends the plug response. * * Note: if an error prevents a response, don't do anything -- the * remote will timeout its request. */ static void fwserial_handle_plug_req(struct work_struct *work) { struct fwtty_peer *peer = to_peer(work, work); struct virt_plug_params *plug_req = &peer->work_params.plug_req; struct fwtty_port *port; struct fwserial_mgmt_pkt *pkt; int rcode; pkt = kmalloc(sizeof(*pkt), GFP_KERNEL); if (!pkt) return; port = fwserial_find_port(peer); spin_lock_bh(&peer->lock); switch (peer->state) { case FWPS_NOT_ATTACHED: if (!port) { fwtty_err(&peer->unit, "no more ports avail\n"); fill_plug_rsp_nack(pkt); } else { peer->port = port; fill_plug_rsp_ok(pkt, peer->port); peer_set_state(peer, FWPS_PLUG_RESPONDING); /* don't release claimed port */ port = NULL; } break; case FWPS_PLUG_PENDING: if (peer->serial->card->guid > peer->guid) goto cleanup; /* We lost - hijack the already-claimed port and send ok */ del_timer(&peer->timer); fill_plug_rsp_ok(pkt, peer->port); peer_set_state(peer, FWPS_PLUG_RESPONDING); break; default: fill_plug_rsp_nack(pkt); } spin_unlock_bh(&peer->lock); if (port) fwserial_release_port(port, false); rcode = fwserial_send_mgmt_sync(peer, pkt); spin_lock_bh(&peer->lock); if (peer->state == FWPS_PLUG_RESPONDING) { if (rcode == RCODE_COMPLETE) { struct fwtty_port *tmp = peer->port; fwserial_virt_plug_complete(peer, plug_req); spin_unlock_bh(&peer->lock); fwtty_write_port_status(tmp); spin_lock_bh(&peer->lock); } else { fwtty_err(&peer->unit, "PLUG_RSP error (%d)\n", rcode); port = peer_revert_state(peer); } } cleanup: spin_unlock_bh(&peer->lock); if (port) fwserial_release_port(port, false); kfree(pkt); } static void fwserial_handle_unplug_req(struct work_struct *work) { struct fwtty_peer *peer = to_peer(work, work); struct fwtty_port *port = NULL; struct fwserial_mgmt_pkt *pkt; int rcode; pkt = kmalloc(sizeof(*pkt), GFP_KERNEL); if (!pkt) return; spin_lock_bh(&peer->lock); switch (peer->state) { case FWPS_ATTACHED: fill_unplug_rsp_ok(pkt); peer_set_state(peer, FWPS_UNPLUG_RESPONDING); break; case FWPS_UNPLUG_PENDING: if (peer->serial->card->guid > peer->guid) goto cleanup; /* We lost - send unplug rsp */ del_timer(&peer->timer); fill_unplug_rsp_ok(pkt); peer_set_state(peer, FWPS_UNPLUG_RESPONDING); break; default: fill_unplug_rsp_nack(pkt); } spin_unlock_bh(&peer->lock); rcode = fwserial_send_mgmt_sync(peer, pkt); spin_lock_bh(&peer->lock); if (peer->state == FWPS_UNPLUG_RESPONDING) { if (rcode != RCODE_COMPLETE) fwtty_err(&peer->unit, "UNPLUG_RSP error (%d)\n", rcode); port = peer_revert_state(peer); } cleanup: spin_unlock_bh(&peer->lock); if (port) fwserial_release_port(port, true); kfree(pkt); } static int fwserial_parse_mgmt_write(struct fwtty_peer *peer, struct fwserial_mgmt_pkt *pkt, unsigned long long addr, size_t len) { struct fwtty_port *port = NULL; bool reset = false; int rcode; if (addr != fwserial_mgmt_addr_handler.offset || len < sizeof(pkt->hdr)) return RCODE_ADDRESS_ERROR; if (len != be16_to_cpu(pkt->hdr.len) || len != mgmt_pkt_expected_len(pkt->hdr.code)) return RCODE_DATA_ERROR; spin_lock_bh(&peer->lock); if (peer->state == FWPS_GONE) { /* * This should never happen - it would mean that the * remote unit that just wrote this transaction was * already removed from the bus -- and the removal was * processed before we rec'd this transaction */ fwtty_err(&peer->unit, "peer already removed\n"); spin_unlock_bh(&peer->lock); return RCODE_ADDRESS_ERROR; } rcode = RCODE_COMPLETE; fwtty_dbg(&peer->unit, "mgmt: hdr.code: %04hx\n", pkt->hdr.code); switch (be16_to_cpu(pkt->hdr.code) & FWSC_CODE_MASK) { case FWSC_VIRT_CABLE_PLUG: if (work_pending(&peer->work)) { fwtty_err(&peer->unit, "plug req: busy\n"); rcode = RCODE_CONFLICT_ERROR; } else { peer->work_params.plug_req = pkt->plug_req; peer->workfn = fwserial_handle_plug_req; queue_work(system_unbound_wq, &peer->work); } break; case FWSC_VIRT_CABLE_PLUG_RSP: if (peer->state != FWPS_PLUG_PENDING) { rcode = RCODE_CONFLICT_ERROR; } else if (be16_to_cpu(pkt->hdr.code) & FWSC_RSP_NACK) { fwtty_notice(&peer->unit, "NACK plug rsp\n"); port = peer_revert_state(peer); } else { struct fwtty_port *tmp = peer->port; fwserial_virt_plug_complete(peer, &pkt->plug_rsp); spin_unlock_bh(&peer->lock); fwtty_write_port_status(tmp); spin_lock_bh(&peer->lock); } break; case FWSC_VIRT_CABLE_UNPLUG: if (work_pending(&peer->work)) { fwtty_err(&peer->unit, "unplug req: busy\n"); rcode = RCODE_CONFLICT_ERROR; } else { peer->workfn = fwserial_handle_unplug_req; queue_work(system_unbound_wq, &peer->work); } break; case FWSC_VIRT_CABLE_UNPLUG_RSP: if (peer->state != FWPS_UNPLUG_PENDING) { rcode = RCODE_CONFLICT_ERROR; } else { if (be16_to_cpu(pkt->hdr.code) & FWSC_RSP_NACK) fwtty_notice(&peer->unit, "NACK unplug?\n"); port = peer_revert_state(peer); reset = true; } break; default: fwtty_err(&peer->unit, "unknown mgmt code %d\n", be16_to_cpu(pkt->hdr.code)); rcode = RCODE_DATA_ERROR; } spin_unlock_bh(&peer->lock); if (port) fwserial_release_port(port, reset); return rcode; } /** * fwserial_mgmt_handler: bus address handler for mgmt requests * @parameters: fw_address_callback_t as specified by firewire core interface * * This handler is responsible for handling virtual cable requests from remotes * for all cards. */ static void fwserial_mgmt_handler(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source, int generation, unsigned long long addr, void *data, size_t len, void *callback_data) { struct fwserial_mgmt_pkt *pkt = data; struct fwtty_peer *peer; int rcode; rcu_read_lock(); peer = __fwserial_peer_by_node_id(card, generation, source); if (!peer) { fwtty_dbg(card, "peer(%d:%x) not found\n", generation, source); __dump_peer_list(card); rcode = RCODE_CONFLICT_ERROR; } else { switch (tcode) { case TCODE_WRITE_BLOCK_REQUEST: rcode = fwserial_parse_mgmt_write(peer, pkt, addr, len); break; default: rcode = RCODE_TYPE_ERROR; } } rcu_read_unlock(); fw_send_response(card, request, rcode); } static int __init fwserial_init(void) { int err, num_loops = !!(create_loop_dev); /* XXX: placeholder for a "firewire" debugfs node */ fwserial_debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL); /* num_ttys/num_ports must not be set above the static alloc avail */ if (num_ttys + num_loops > MAX_CARD_PORTS) num_ttys = MAX_CARD_PORTS - num_loops; num_ports = num_ttys + num_loops; fwtty_driver = tty_alloc_driver(MAX_TOTAL_PORTS, TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV); if (IS_ERR(fwtty_driver)) { err = PTR_ERR(fwtty_driver); goto remove_debugfs; } fwtty_driver->driver_name = KBUILD_MODNAME; fwtty_driver->name = tty_dev_name; fwtty_driver->major = 0; fwtty_driver->minor_start = 0; fwtty_driver->type = TTY_DRIVER_TYPE_SERIAL; fwtty_driver->subtype = SERIAL_TYPE_NORMAL; fwtty_driver->init_termios = tty_std_termios; fwtty_driver->init_termios.c_cflag |= CLOCAL; tty_set_operations(fwtty_driver, &fwtty_ops); err = tty_register_driver(fwtty_driver); if (err) { pr_err("register tty driver failed (%d)\n", err); goto put_tty; } if (create_loop_dev) { fwloop_driver = tty_alloc_driver(MAX_TOTAL_PORTS / num_ports, TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV); if (IS_ERR(fwloop_driver)) { err = PTR_ERR(fwloop_driver); goto unregister_driver; } fwloop_driver->driver_name = KBUILD_MODNAME "_loop"; fwloop_driver->name = loop_dev_name; fwloop_driver->major = 0; fwloop_driver->minor_start = 0; fwloop_driver->type = TTY_DRIVER_TYPE_SERIAL; fwloop_driver->subtype = SERIAL_TYPE_NORMAL; fwloop_driver->init_termios = tty_std_termios; fwloop_driver->init_termios.c_cflag |= CLOCAL; tty_set_operations(fwloop_driver, &fwloop_ops); err = tty_register_driver(fwloop_driver); if (err) { pr_err("register loop driver failed (%d)\n", err); goto put_loop; } } fwtty_txn_cache = kmem_cache_create("fwtty_txn_cache", sizeof(struct fwtty_transaction), 0, 0, fwtty_txn_constructor); if (!fwtty_txn_cache) { err = -ENOMEM; goto unregister_loop; } /* * Ideally, this address handler would be registered per local node * (rather than the same handler for all local nodes). However, * since the firewire core requires the config rom descriptor *before* * the local unit device(s) are created, a single management handler * must suffice for all local serial units. */ fwserial_mgmt_addr_handler.length = sizeof(struct fwserial_mgmt_pkt); fwserial_mgmt_addr_handler.address_callback = fwserial_mgmt_handler; err = fw_core_add_address_handler(&fwserial_mgmt_addr_handler, &fwserial_mgmt_addr_region); if (err) { pr_err("add management handler failed (%d)\n", err); goto destroy_cache; } fwserial_unit_directory_data.unit_addr_offset = FW_UNIT_ADDRESS(fwserial_mgmt_addr_handler.offset); err = fw_core_add_descriptor(&fwserial_unit_directory); if (err) { pr_err("add unit descriptor failed (%d)\n", err); goto remove_handler; } err = driver_register(&fwserial_driver.driver); if (err) { pr_err("register fwserial driver failed (%d)\n", err); goto remove_descriptor; } return 0; remove_descriptor: fw_core_remove_descriptor(&fwserial_unit_directory); remove_handler: fw_core_remove_address_handler(&fwserial_mgmt_addr_handler); destroy_cache: kmem_cache_destroy(fwtty_txn_cache); unregister_loop: if (create_loop_dev) tty_unregister_driver(fwloop_driver); put_loop: if (create_loop_dev) put_tty_driver(fwloop_driver); unregister_driver: tty_unregister_driver(fwtty_driver); put_tty: put_tty_driver(fwtty_driver); remove_debugfs: debugfs_remove_recursive(fwserial_debugfs); return err; } static void __exit fwserial_exit(void) { driver_unregister(&fwserial_driver.driver); fw_core_remove_descriptor(&fwserial_unit_directory); fw_core_remove_address_handler(&fwserial_mgmt_addr_handler); kmem_cache_destroy(fwtty_txn_cache); if (create_loop_dev) { tty_unregister_driver(fwloop_driver); put_tty_driver(fwloop_driver); } tty_unregister_driver(fwtty_driver); put_tty_driver(fwtty_driver); debugfs_remove_recursive(fwserial_debugfs); } module_init(fwserial_init); module_exit(fwserial_exit); MODULE_AUTHOR("Peter Hurley (peter@hurleysoftware.com)"); MODULE_DESCRIPTION("FireWire Serial TTY Driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(ieee1394, fwserial_id_table); MODULE_PARM_DESC(ttys, "Number of ttys to create for each local firewire node"); MODULE_PARM_DESC(auto, "Auto-connect a tty to each firewire node discovered"); MODULE_PARM_DESC(loop, "Create a loopback device, fwloop<n>, with ttys");
gpl-2.0
surdupetru/android_kernel_huawei_msm8916-caf
drivers/target/target_core_alua.c
794
55716
/******************************************************************************* * Filename: target_core_alua.c * * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA) * * (c) Copyright 2009-2012 RisingTide Systems LLC. * * Nicholas A. Bellinger <nab@kernel.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ******************************************************************************/ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/configfs.h> #include <linux/export.h> #include <linux/file.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <asm/unaligned.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> #include <target/target_core_fabric.h> #include <target/target_core_configfs.h> #include "target_core_internal.h" #include "target_core_alua.h" #include "target_core_ua.h" static sense_reason_t core_alua_check_transition(int state, int *primary); static int core_alua_set_tg_pt_secondary_state( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, struct se_port *port, int explict, int offline); static u16 alua_lu_gps_counter; static u32 alua_lu_gps_count; static DEFINE_SPINLOCK(lu_gps_lock); static LIST_HEAD(lu_gps_list); struct t10_alua_lu_gp *default_lu_gp; /* * REPORT_TARGET_PORT_GROUPS * * See spc4r17 section 6.27 */ sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct se_port *port; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; unsigned char *buf; u32 rd_len = 0, off; int ext_hdr = (cmd->t_task_cdb[1] & 0x20); /* * Skip over RESERVED area to first Target port group descriptor * depending on the PARAMETER DATA FORMAT type.. */ if (ext_hdr != 0) off = 8; else off = 4; if (cmd->data_length < off) { pr_warn("REPORT TARGET PORT GROUPS allocation length %u too" " small for %s header\n", cmd->data_length, (ext_hdr) ? "extended" : "normal"); return TCM_INVALID_CDB_FIELD; } buf = transport_kmap_data_sg(cmd); if (!buf) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; spin_lock(&dev->t10_alua.tg_pt_gps_lock); list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { /* * Check if the Target port group and Target port descriptor list * based on tg_pt_gp_members count will fit into the response payload. * Otherwise, bump rd_len to let the initiator know we have exceeded * the allocation length and the response is truncated. */ if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) > cmd->data_length) { rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4); continue; } /* * PREF: Preferred target port bit, determine if this * bit should be set for port group. */ if (tg_pt_gp->tg_pt_gp_pref) buf[off] = 0x80; /* * Set the ASYMMETRIC ACCESS State */ buf[off++] |= (atomic_read( &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff); /* * Set supported ASYMMETRIC ACCESS State bits */ buf[off] = 0x80; /* T_SUP */ buf[off] |= 0x40; /* O_SUP */ buf[off] |= 0x8; /* U_SUP */ buf[off] |= 0x4; /* S_SUP */ buf[off] |= 0x2; /* AN_SUP */ buf[off++] |= 0x1; /* AO_SUP */ /* * TARGET PORT GROUP */ buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff); buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff); off++; /* Skip over Reserved */ /* * STATUS CODE */ buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff); /* * Vendor Specific field */ buf[off++] = 0x00; /* * TARGET PORT COUNT */ buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff); rd_len += 8; spin_lock(&tg_pt_gp->tg_pt_gp_lock); list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) { port = tg_pt_gp_mem->tg_pt; /* * Start Target Port descriptor format * * See spc4r17 section 6.2.7 Table 247 */ off += 2; /* Skip over Obsolete */ /* * Set RELATIVE TARGET PORT IDENTIFIER */ buf[off++] = ((port->sep_rtpi >> 8) & 0xff); buf[off++] = (port->sep_rtpi & 0xff); rd_len += 4; } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); } spin_unlock(&dev->t10_alua.tg_pt_gps_lock); /* * Set the RETURN DATA LENGTH set in the header of the DataIN Payload */ put_unaligned_be32(rd_len, &buf[0]); /* * Fill in the Extended header parameter data format if requested */ if (ext_hdr != 0) { buf[4] = 0x10; /* * Set the implict transition time (in seconds) for the application * client to use as a base for it's transition timeout value. * * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN * this CDB was received upon to determine this value individually * for ALUA target port group. */ port = cmd->se_lun->lun_sep; tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; if (tg_pt_gp_mem) { spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; if (tg_pt_gp) buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs; spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); } } transport_kunmap_data_sg(cmd); target_complete_cmd(cmd, GOOD); return 0; } /* * SET_TARGET_PORT_GROUPS for explict ALUA operation. * * See spc4r17 section 6.35 */ sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct se_port *port, *l_port = cmd->se_lun->lun_sep; struct se_node_acl *nacl = cmd->se_sess->se_node_acl; struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; unsigned char *buf; unsigned char *ptr; sense_reason_t rc = TCM_NO_SENSE; u32 len = 4; /* Skip over RESERVED area in header */ int alua_access_state, primary = 0; u16 tg_pt_id, rtpi; if (!l_port) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; if (cmd->data_length < 4) { pr_warn("SET TARGET PORT GROUPS parameter list length %u too" " small\n", cmd->data_length); return TCM_INVALID_PARAMETER_LIST; } buf = transport_kmap_data_sg(cmd); if (!buf) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; /* * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed * for the local tg_pt_gp. */ l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; if (!l_tg_pt_gp_mem) { pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); rc = TCM_UNSUPPORTED_SCSI_OPCODE; goto out; } spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; if (!l_tg_pt_gp) { spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); rc = TCM_UNSUPPORTED_SCSI_OPCODE; goto out; } spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) { pr_debug("Unable to process SET_TARGET_PORT_GROUPS" " while TPGS_EXPLICT_ALUA is disabled\n"); rc = TCM_UNSUPPORTED_SCSI_OPCODE; goto out; } ptr = &buf[4]; /* Skip over RESERVED area in header */ while (len < cmd->data_length) { bool found = false; alua_access_state = (ptr[0] & 0x0f); /* * Check the received ALUA access state, and determine if * the state is a primary or secondary target port asymmetric * access state. */ rc = core_alua_check_transition(alua_access_state, &primary); if (rc) { /* * If the SET TARGET PORT GROUPS attempts to establish * an invalid combination of target port asymmetric * access states or attempts to establish an * unsupported target port asymmetric access state, * then the command shall be terminated with CHECK * CONDITION status, with the sense key set to ILLEGAL * REQUEST, and the additional sense code set to INVALID * FIELD IN PARAMETER LIST. */ goto out; } /* * If the ASYMMETRIC ACCESS STATE field (see table 267) * specifies a primary target port asymmetric access state, * then the TARGET PORT GROUP OR TARGET PORT field specifies * a primary target port group for which the primary target * port asymmetric access state shall be changed. If the * ASYMMETRIC ACCESS STATE field specifies a secondary target * port asymmetric access state, then the TARGET PORT GROUP OR * TARGET PORT field specifies the relative target port * identifier (see 3.1.120) of the target port for which the * secondary target port asymmetric access state shall be * changed. */ if (primary) { tg_pt_id = get_unaligned_be16(ptr + 2); /* * Locate the matching target port group ID from * the global tg_pt_gp list */ spin_lock(&dev->t10_alua.tg_pt_gps_lock); list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { if (!tg_pt_gp->tg_pt_gp_valid_id) continue; if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) continue; atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); if (!core_alua_do_port_transition(tg_pt_gp, dev, l_port, nacl, alua_access_state, 1)) found = true; spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); smp_mb__after_atomic(); break; } spin_unlock(&dev->t10_alua.tg_pt_gps_lock); } else { /* * Extact the RELATIVE TARGET PORT IDENTIFIER to identify * the Target Port in question for the the incoming * SET_TARGET_PORT_GROUPS op. */ rtpi = get_unaligned_be16(ptr + 2); /* * Locate the matching relative target port identifier * for the struct se_device storage object. */ spin_lock(&dev->se_port_lock); list_for_each_entry(port, &dev->dev_sep_list, sep_list) { if (port->sep_rtpi != rtpi) continue; tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; spin_unlock(&dev->se_port_lock); if (!core_alua_set_tg_pt_secondary_state( tg_pt_gp_mem, port, 1, 1)) found = true; spin_lock(&dev->se_port_lock); break; } spin_unlock(&dev->se_port_lock); } if (!found) { rc = TCM_INVALID_PARAMETER_LIST; goto out; } ptr += 4; len += 4; } out: transport_kunmap_data_sg(cmd); if (!rc) target_complete_cmd(cmd, GOOD); return rc; } static inline int core_alua_state_nonoptimized( struct se_cmd *cmd, unsigned char *cdb, int nonop_delay_msecs, u8 *alua_ascq) { /* * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked * later to determine if processing of this cmd needs to be * temporarily delayed for the Active/NonOptimized primary access state. */ cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; cmd->alua_nonop_delay = nonop_delay_msecs; return 0; } static inline int core_alua_state_standby( struct se_cmd *cmd, unsigned char *cdb, u8 *alua_ascq) { /* * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by * spc4r17 section 5.9.2.4.4 */ switch (cdb[0]) { case INQUIRY: case LOG_SELECT: case LOG_SENSE: case MODE_SELECT: case MODE_SENSE: case REPORT_LUNS: case RECEIVE_DIAGNOSTIC: case SEND_DIAGNOSTIC: case READ_CAPACITY: return 0; case SERVICE_ACTION_IN: switch (cdb[1] & 0x1f) { case SAI_READ_CAPACITY_16: return 0; default: *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; return 1; } case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: return 0; default: *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; return 1; } case MAINTENANCE_OUT: switch (cdb[1]) { case MO_SET_TARGET_PGS: return 0; default: *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; return 1; } case REQUEST_SENSE: case PERSISTENT_RESERVE_IN: case PERSISTENT_RESERVE_OUT: case READ_BUFFER: case WRITE_BUFFER: return 0; default: *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; return 1; } return 0; } static inline int core_alua_state_unavailable( struct se_cmd *cmd, unsigned char *cdb, u8 *alua_ascq) { /* * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by * spc4r17 section 5.9.2.4.5 */ switch (cdb[0]) { case INQUIRY: case REPORT_LUNS: return 0; case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: return 0; default: *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; return 1; } case MAINTENANCE_OUT: switch (cdb[1]) { case MO_SET_TARGET_PGS: return 0; default: *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; return 1; } case REQUEST_SENSE: case READ_BUFFER: case WRITE_BUFFER: return 0; default: *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; return 1; } return 0; } static inline int core_alua_state_transition( struct se_cmd *cmd, unsigned char *cdb, u8 *alua_ascq) { /* * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by * spc4r17 section 5.9.2.5 */ switch (cdb[0]) { case INQUIRY: case REPORT_LUNS: return 0; case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: return 0; default: *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; return 1; } case REQUEST_SENSE: case READ_BUFFER: case WRITE_BUFFER: return 0; default: *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; return 1; } return 0; } /* * return 1: Is used to signal LUN not accecsable, and check condition/not ready * return 0: Used to signal success * reutrn -1: Used to signal failure, and invalid cdb field */ sense_reason_t target_alua_state_check(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; unsigned char *cdb = cmd->t_task_cdb; struct se_lun *lun = cmd->se_lun; struct se_port *port = lun->lun_sep; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; int out_alua_state, nonop_delay_msecs; u8 alua_ascq; int ret; if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) return 0; if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) return 0; if (!port) return 0; /* * First, check for a struct se_port specific secondary ALUA target port * access state: OFFLINE */ if (atomic_read(&port->sep_tg_pt_secondary_offline)) { pr_debug("ALUA: Got secondary offline status for local" " target port\n"); alua_ascq = ASCQ_04H_ALUA_OFFLINE; ret = 1; goto out; } /* * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the * ALUA target port group, to obtain current ALUA access state. * Otherwise look for the underlying struct se_device association with * a ALUA logical unit group. */ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); /* * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional * statement so the compiler knows explicitly to check this case first. * For the Optimized ALUA access state case, we want to process the * incoming fabric cmd ASAP.. */ if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED) return 0; switch (out_alua_state) { case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: ret = core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs, &alua_ascq); break; case ALUA_ACCESS_STATE_STANDBY: ret = core_alua_state_standby(cmd, cdb, &alua_ascq); break; case ALUA_ACCESS_STATE_UNAVAILABLE: ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq); break; case ALUA_ACCESS_STATE_TRANSITION: ret = core_alua_state_transition(cmd, cdb, &alua_ascq); break; /* * OFFLINE is a secondary ALUA target port group access state, that is * handled above with struct se_port->sep_tg_pt_secondary_offline=1 */ case ALUA_ACCESS_STATE_OFFLINE: default: pr_err("Unknown ALUA access state: 0x%02x\n", out_alua_state); return TCM_INVALID_CDB_FIELD; } out: if (ret > 0) { /* * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; * The ALUA additional sense code qualifier (ASCQ) is determined * by the ALUA primary or secondary access state.. */ pr_debug("[%s]: ALUA TG Port not available, " "SenseKey: NOT_READY, ASC/ASCQ: " "0x04/0x%02x\n", cmd->se_tfo->get_fabric_name(), alua_ascq); cmd->scsi_asc = 0x04; cmd->scsi_ascq = alua_ascq; return TCM_CHECK_CONDITION_NOT_READY; } return 0; } /* * Check implict and explict ALUA state change request. */ static sense_reason_t core_alua_check_transition(int state, int *primary) { switch (state) { case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: case ALUA_ACCESS_STATE_STANDBY: case ALUA_ACCESS_STATE_UNAVAILABLE: /* * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are * defined as primary target port asymmetric access states. */ *primary = 1; break; case ALUA_ACCESS_STATE_OFFLINE: /* * OFFLINE state is defined as a secondary target port * asymmetric access state. */ *primary = 0; break; default: pr_err("Unknown ALUA access state: 0x%02x\n", state); return TCM_INVALID_PARAMETER_LIST; } return 0; } static char *core_alua_dump_state(int state) { switch (state) { case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: return "Active/Optimized"; case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: return "Active/NonOptimized"; case ALUA_ACCESS_STATE_STANDBY: return "Standby"; case ALUA_ACCESS_STATE_UNAVAILABLE: return "Unavailable"; case ALUA_ACCESS_STATE_OFFLINE: return "Offline"; default: return "Unknown"; } return NULL; } char *core_alua_dump_status(int status) { switch (status) { case ALUA_STATUS_NONE: return "None"; case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG: return "Altered by Explict STPG"; case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA: return "Altered by Implict ALUA"; default: return "Unknown"; } return NULL; } /* * Used by fabric modules to determine when we need to delay processing * for the Active/NonOptimized paths.. */ int core_alua_check_nonop_delay( struct se_cmd *cmd) { if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED)) return 0; if (in_interrupt()) return 0; /* * The ALUA Active/NonOptimized access state delay can be disabled * in via configfs with a value of zero */ if (!cmd->alua_nonop_delay) return 0; /* * struct se_cmd->alua_nonop_delay gets set by a target port group * defined interval in core_alua_state_nonoptimized() */ msleep_interruptible(cmd->alua_nonop_delay); return 0; } EXPORT_SYMBOL(core_alua_check_nonop_delay); /* * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex * */ static int core_alua_write_tpg_metadata( const char *path, unsigned char *md_buf, u32 md_buf_len) { struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600); int ret; if (IS_ERR(file)) { pr_err("filp_open(%s) for ALUA metadata failed\n", path); return -ENODEV; } ret = kernel_write(file, md_buf, md_buf_len, 0); if (ret < 0) pr_err("Error writing ALUA metadata file: %s\n", path); fput(file); return (ret < 0) ? -EIO : 0; } /* * Called with tg_pt_gp->tg_pt_gp_md_mutex held */ static int core_alua_update_tpg_primary_metadata( struct t10_alua_tg_pt_gp *tg_pt_gp, int primary_state, unsigned char *md_buf) { struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; char path[ALUA_METADATA_PATH_LEN]; int len; memset(path, 0, ALUA_METADATA_PATH_LEN); len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len, "tg_pt_gp_id=%hu\n" "alua_access_state=0x%02x\n" "alua_access_status=0x%02x\n", tg_pt_gp->tg_pt_gp_id, primary_state, tg_pt_gp->tg_pt_gp_alua_access_status); snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); return core_alua_write_tpg_metadata(path, md_buf, len); } static int core_alua_do_transition_tg_pt( struct t10_alua_tg_pt_gp *tg_pt_gp, struct se_port *l_port, struct se_node_acl *nacl, unsigned char *md_buf, int new_state, int explict) { struct se_dev_entry *se_deve; struct se_lun_acl *lacl; struct se_port *port; struct t10_alua_tg_pt_gp_member *mem; int old_state = 0; /* * Save the old primary ALUA access state, and set the current state * to ALUA_ACCESS_STATE_TRANSITION. */ old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, ALUA_ACCESS_STATE_TRANSITION); tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ? ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; /* * Check for the optional ALUA primary state transition delay */ if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); spin_lock(&tg_pt_gp->tg_pt_gp_lock); list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) { port = mem->tg_pt; /* * After an implicit target port asymmetric access state * change, a device server shall establish a unit attention * condition for the initiator port associated with every I_T * nexus with the additional sense code set to ASYMMETRIC * ACCESS STATE CHAGED. * * After an explicit target port asymmetric access state * change, a device server shall establish a unit attention * condition with the additional sense code set to ASYMMETRIC * ACCESS STATE CHANGED for the initiator port associated with * every I_T nexus other than the I_T nexus on which the SET * TARGET PORT GROUPS command */ atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); smp_mb__after_atomic(); spin_unlock(&tg_pt_gp->tg_pt_gp_lock); spin_lock_bh(&port->sep_alua_lock); list_for_each_entry(se_deve, &port->sep_alua_list, alua_port_list) { lacl = se_deve->se_lun_acl; /* * se_deve->se_lun_acl pointer may be NULL for a * entry created without explict Node+MappedLUN ACLs */ if (!lacl) continue; if (explict && (nacl != NULL) && (nacl == lacl->se_lun_nacl) && (l_port != NULL) && (l_port == port)) continue; core_scsi3_ua_allocate(lacl->se_lun_nacl, se_deve->mapped_lun, 0x2A, ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); } spin_unlock_bh(&port->sep_alua_lock); spin_lock(&tg_pt_gp->tg_pt_gp_lock); atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); smp_mb__after_atomic(); } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); /* * Update the ALUA metadata buf that has been allocated in * core_alua_do_port_transition(), this metadata will be written * to struct file. * * Note that there is the case where we do not want to update the * metadata when the saved metadata is being parsed in userspace * when setting the existing port access state and access status. * * Also note that the failure to write out the ALUA metadata to * struct file does NOT affect the actual ALUA transition. */ if (tg_pt_gp->tg_pt_gp_write_metadata) { mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); core_alua_update_tpg_primary_metadata(tg_pt_gp, new_state, md_buf); mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); } /* * Set the current primary ALUA access state to the requested new state */ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" " from primary access state %s to %s\n", (explict) ? "explict" : "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), core_alua_dump_state(new_state)); return 0; } int core_alua_do_port_transition( struct t10_alua_tg_pt_gp *l_tg_pt_gp, struct se_device *l_dev, struct se_port *l_port, struct se_node_acl *l_nacl, int new_state, int explict) { struct se_device *dev; struct se_port *port; struct se_node_acl *nacl; struct t10_alua_lu_gp *lu_gp; struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; struct t10_alua_tg_pt_gp *tg_pt_gp; unsigned char *md_buf; int primary; if (core_alua_check_transition(new_state, &primary) != 0) return -EINVAL; md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); if (!md_buf) { pr_err("Unable to allocate buf for ALUA metadata\n"); return -ENOMEM; } local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); lu_gp = local_lu_gp_mem->lu_gp; atomic_inc(&lu_gp->lu_gp_ref_cnt); smp_mb__after_atomic(); spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); /* * For storage objects that are members of the 'default_lu_gp', * we only do transition on the passed *l_tp_pt_gp, and not * on all of the matching target port groups IDs in default_lu_gp. */ if (!lu_gp->lu_gp_id) { /* * core_alua_do_transition_tg_pt() will always return * success. */ core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, md_buf, new_state, explict); atomic_dec(&lu_gp->lu_gp_ref_cnt); smp_mb__after_atomic(); kfree(md_buf); return 0; } /* * For all other LU groups aside from 'default_lu_gp', walk all of * the associated storage objects looking for a matching target port * group ID from the local target port group. */ spin_lock(&lu_gp->lu_gp_lock); list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { dev = lu_gp_mem->lu_gp_mem_dev; atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); smp_mb__after_atomic(); spin_unlock(&lu_gp->lu_gp_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock); list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { if (!tg_pt_gp->tg_pt_gp_valid_id) continue; /* * If the target behavior port asymmetric access state * is changed for any target port group accessiable via * a logical unit within a LU group, the target port * behavior group asymmetric access states for the same * target port group accessible via other logical units * in that LU group will also change. */ if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id) continue; if (l_tg_pt_gp == tg_pt_gp) { port = l_port; nacl = l_nacl; } else { port = NULL; nacl = NULL; } atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); /* * core_alua_do_transition_tg_pt() will always return * success. */ core_alua_do_transition_tg_pt(tg_pt_gp, port, nacl, md_buf, new_state, explict); spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); smp_mb__after_atomic(); } spin_unlock(&dev->t10_alua.tg_pt_gps_lock); spin_lock(&lu_gp->lu_gp_lock); atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); smp_mb__after_atomic(); } spin_unlock(&lu_gp->lu_gp_lock); pr_debug("Successfully processed LU Group: %s all ALUA TG PT" " Group IDs: %hu %s transition to primary state: %s\n", config_item_name(&lu_gp->lu_gp_group.cg_item), l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", core_alua_dump_state(new_state)); atomic_dec(&lu_gp->lu_gp_ref_cnt); smp_mb__after_atomic(); kfree(md_buf); return 0; } /* * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held */ static int core_alua_update_tpg_secondary_metadata( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, struct se_port *port, unsigned char *md_buf, u32 md_buf_len) { struct se_portal_group *se_tpg = port->sep_tpg; char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; int len; memset(path, 0, ALUA_METADATA_PATH_LEN); memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s", se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg)); if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" "alua_tg_pt_status=0x%02x\n", atomic_read(&port->sep_tg_pt_secondary_offline), port->sep_tg_pt_secondary_stat); snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u", se_tpg->se_tpg_tfo->get_fabric_name(), wwn, port->sep_lun->unpacked_lun); return core_alua_write_tpg_metadata(path, md_buf, len); } static int core_alua_set_tg_pt_secondary_state( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, struct se_port *port, int explict, int offline) { struct t10_alua_tg_pt_gp *tg_pt_gp; unsigned char *md_buf; u32 md_buf_len; int trans_delay_msecs; spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; if (!tg_pt_gp) { spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); pr_err("Unable to complete secondary state" " transition\n"); return -EINVAL; } trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; /* * Set the secondary ALUA target port access state to OFFLINE * or release the previously secondary state for struct se_port */ if (offline) atomic_set(&port->sep_tg_pt_secondary_offline, 1); else atomic_set(&port->sep_tg_pt_secondary_offline, 0); md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len; port->sep_tg_pt_secondary_stat = (explict) ? ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" " to secondary access state: %s\n", (explict) ? "explict" : "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); /* * Do the optional transition delay after we set the secondary * ALUA access state. */ if (trans_delay_msecs != 0) msleep_interruptible(trans_delay_msecs); /* * See if we need to update the ALUA fabric port metadata for * secondary state and status */ if (port->sep_tg_pt_secondary_write_md) { md_buf = kzalloc(md_buf_len, GFP_KERNEL); if (!md_buf) { pr_err("Unable to allocate md_buf for" " secondary ALUA access metadata\n"); return -ENOMEM; } mutex_lock(&port->sep_tg_pt_md_mutex); core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, md_buf, md_buf_len); mutex_unlock(&port->sep_tg_pt_md_mutex); kfree(md_buf); } return 0; } struct t10_alua_lu_gp * core_alua_allocate_lu_gp(const char *name, int def_group) { struct t10_alua_lu_gp *lu_gp; lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); if (!lu_gp) { pr_err("Unable to allocate struct t10_alua_lu_gp\n"); return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&lu_gp->lu_gp_node); INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); spin_lock_init(&lu_gp->lu_gp_lock); atomic_set(&lu_gp->lu_gp_ref_cnt, 0); if (def_group) { lu_gp->lu_gp_id = alua_lu_gps_counter++; lu_gp->lu_gp_valid_id = 1; alua_lu_gps_count++; } return lu_gp; } int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) { struct t10_alua_lu_gp *lu_gp_tmp; u16 lu_gp_id_tmp; /* * The lu_gp->lu_gp_id may only be set once.. */ if (lu_gp->lu_gp_valid_id) { pr_warn("ALUA LU Group already has a valid ID," " ignoring request\n"); return -EINVAL; } spin_lock(&lu_gps_lock); if (alua_lu_gps_count == 0x0000ffff) { pr_err("Maximum ALUA alua_lu_gps_count:" " 0x0000ffff reached\n"); spin_unlock(&lu_gps_lock); kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); return -ENOSPC; } again: lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : alua_lu_gps_counter++; list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) { if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { if (!lu_gp_id) goto again; pr_warn("ALUA Logical Unit Group ID: %hu" " already exists, ignoring request\n", lu_gp_id); spin_unlock(&lu_gps_lock); return -EINVAL; } } lu_gp->lu_gp_id = lu_gp_id_tmp; lu_gp->lu_gp_valid_id = 1; list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list); alua_lu_gps_count++; spin_unlock(&lu_gps_lock); return 0; } static struct t10_alua_lu_gp_member * core_alua_allocate_lu_gp_mem(struct se_device *dev) { struct t10_alua_lu_gp_member *lu_gp_mem; lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); if (!lu_gp_mem) { pr_err("Unable to allocate struct t10_alua_lu_gp_member\n"); return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); spin_lock_init(&lu_gp_mem->lu_gp_mem_lock); atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0); lu_gp_mem->lu_gp_mem_dev = dev; dev->dev_alua_lu_gp_mem = lu_gp_mem; return lu_gp_mem; } void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) { struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp; /* * Once we have reached this point, config_item_put() has * already been called from target_core_alua_drop_lu_gp(). * * Here, we remove the *lu_gp from the global list so that * no associations can be made while we are releasing * struct t10_alua_lu_gp. */ spin_lock(&lu_gps_lock); list_del(&lu_gp->lu_gp_node); alua_lu_gps_count--; spin_unlock(&lu_gps_lock); /* * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() * in target_core_configfs.c:target_core_store_alua_lu_gp() to be * released with core_alua_put_lu_gp_from_name() */ while (atomic_read(&lu_gp->lu_gp_ref_cnt)) cpu_relax(); /* * Release reference to struct t10_alua_lu_gp * from all associated * struct se_device. */ spin_lock(&lu_gp->lu_gp_lock); list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { if (lu_gp_mem->lu_gp_assoc) { list_del(&lu_gp_mem->lu_gp_mem_list); lu_gp->lu_gp_members--; lu_gp_mem->lu_gp_assoc = 0; } spin_unlock(&lu_gp->lu_gp_lock); /* * * lu_gp_mem is associated with a single * struct se_device->dev_alua_lu_gp_mem, and is released when * struct se_device is released via core_alua_free_lu_gp_mem(). * * If the passed lu_gp does NOT match the default_lu_gp, assume * we want to re-assocate a given lu_gp_mem with default_lu_gp. */ spin_lock(&lu_gp_mem->lu_gp_mem_lock); if (lu_gp != default_lu_gp) __core_alua_attach_lu_gp_mem(lu_gp_mem, default_lu_gp); else lu_gp_mem->lu_gp = NULL; spin_unlock(&lu_gp_mem->lu_gp_mem_lock); spin_lock(&lu_gp->lu_gp_lock); } spin_unlock(&lu_gp->lu_gp_lock); kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); } void core_alua_free_lu_gp_mem(struct se_device *dev) { struct t10_alua_lu_gp *lu_gp; struct t10_alua_lu_gp_member *lu_gp_mem; lu_gp_mem = dev->dev_alua_lu_gp_mem; if (!lu_gp_mem) return; while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) cpu_relax(); spin_lock(&lu_gp_mem->lu_gp_mem_lock); lu_gp = lu_gp_mem->lu_gp; if (lu_gp) { spin_lock(&lu_gp->lu_gp_lock); if (lu_gp_mem->lu_gp_assoc) { list_del(&lu_gp_mem->lu_gp_mem_list); lu_gp->lu_gp_members--; lu_gp_mem->lu_gp_assoc = 0; } spin_unlock(&lu_gp->lu_gp_lock); lu_gp_mem->lu_gp = NULL; } spin_unlock(&lu_gp_mem->lu_gp_mem_lock); kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem); } struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) { struct t10_alua_lu_gp *lu_gp; struct config_item *ci; spin_lock(&lu_gps_lock); list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) { if (!lu_gp->lu_gp_valid_id) continue; ci = &lu_gp->lu_gp_group.cg_item; if (!strcmp(config_item_name(ci), name)) { atomic_inc(&lu_gp->lu_gp_ref_cnt); spin_unlock(&lu_gps_lock); return lu_gp; } } spin_unlock(&lu_gps_lock); return NULL; } void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) { spin_lock(&lu_gps_lock); atomic_dec(&lu_gp->lu_gp_ref_cnt); spin_unlock(&lu_gps_lock); } /* * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock */ void __core_alua_attach_lu_gp_mem( struct t10_alua_lu_gp_member *lu_gp_mem, struct t10_alua_lu_gp *lu_gp) { spin_lock(&lu_gp->lu_gp_lock); lu_gp_mem->lu_gp = lu_gp; lu_gp_mem->lu_gp_assoc = 1; list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list); lu_gp->lu_gp_members++; spin_unlock(&lu_gp->lu_gp_lock); } /* * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock */ void __core_alua_drop_lu_gp_mem( struct t10_alua_lu_gp_member *lu_gp_mem, struct t10_alua_lu_gp *lu_gp) { spin_lock(&lu_gp->lu_gp_lock); list_del(&lu_gp_mem->lu_gp_mem_list); lu_gp_mem->lu_gp = NULL; lu_gp_mem->lu_gp_assoc = 0; lu_gp->lu_gp_members--; spin_unlock(&lu_gp->lu_gp_lock); } struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, const char *name, int def_group) { struct t10_alua_tg_pt_gp *tg_pt_gp; tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); if (!tg_pt_gp) { pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n"); return NULL; } INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list); mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); tg_pt_gp->tg_pt_gp_dev = dev; tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, ALUA_ACCESS_STATE_ACTIVE_OPTMIZED); /* * Enable both explict and implict ALUA support by default */ tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA; /* * Set the default Active/NonOptimized Delay in milliseconds */ tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS; if (def_group) { spin_lock(&dev->t10_alua.tg_pt_gps_lock); tg_pt_gp->tg_pt_gp_id = dev->t10_alua.alua_tg_pt_gps_counter++; tg_pt_gp->tg_pt_gp_valid_id = 1; dev->t10_alua.alua_tg_pt_gps_count++; list_add_tail(&tg_pt_gp->tg_pt_gp_list, &dev->t10_alua.tg_pt_gps_list); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); } return tg_pt_gp; } int core_alua_set_tg_pt_gp_id( struct t10_alua_tg_pt_gp *tg_pt_gp, u16 tg_pt_gp_id) { struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; struct t10_alua_tg_pt_gp *tg_pt_gp_tmp; u16 tg_pt_gp_id_tmp; /* * The tg_pt_gp->tg_pt_gp_id may only be set once.. */ if (tg_pt_gp->tg_pt_gp_valid_id) { pr_warn("ALUA TG PT Group already has a valid ID," " ignoring request\n"); return -EINVAL; } spin_lock(&dev->t10_alua.tg_pt_gps_lock); if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { pr_err("Maximum ALUA alua_tg_pt_gps_count:" " 0x0000ffff reached\n"); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); return -ENOSPC; } again: tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : dev->t10_alua.alua_tg_pt_gps_counter++; list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { if (!tg_pt_gp_id) goto again; pr_err("ALUA Target Port Group ID: %hu already" " exists, ignoring request\n", tg_pt_gp_id); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); return -EINVAL; } } tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; tg_pt_gp->tg_pt_gp_valid_id = 1; list_add_tail(&tg_pt_gp->tg_pt_gp_list, &dev->t10_alua.tg_pt_gps_list); dev->t10_alua.alua_tg_pt_gps_count++; spin_unlock(&dev->t10_alua.tg_pt_gps_lock); return 0; } struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( struct se_port *port) { struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache, GFP_KERNEL); if (!tg_pt_gp_mem) { pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n"); return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list); spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock); atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0); tg_pt_gp_mem->tg_pt = port; port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem; return tg_pt_gp_mem; } void core_alua_free_tg_pt_gp( struct t10_alua_tg_pt_gp *tg_pt_gp) { struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp; /* * Once we have reached this point, config_item_put() has already * been called from target_core_alua_drop_tg_pt_gp(). * * Here we remove *tg_pt_gp from the global list so that * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS * can be made while we are releasing struct t10_alua_tg_pt_gp. */ spin_lock(&dev->t10_alua.tg_pt_gps_lock); list_del(&tg_pt_gp->tg_pt_gp_list); dev->t10_alua.alua_tg_pt_gps_counter--; spin_unlock(&dev->t10_alua.tg_pt_gps_lock); /* * Allow a struct t10_alua_tg_pt_gp_member * referenced by * core_alua_get_tg_pt_gp_by_name() in * target_core_configfs.c:target_core_store_alua_tg_pt_gp() * to be released with core_alua_put_tg_pt_gp_from_name(). */ while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt)) cpu_relax(); /* * Release reference to struct t10_alua_tg_pt_gp from all associated * struct se_port. */ spin_lock(&tg_pt_gp->tg_pt_gp_lock); list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp, &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) { if (tg_pt_gp_mem->tg_pt_gp_assoc) { list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); tg_pt_gp->tg_pt_gp_members--; tg_pt_gp_mem->tg_pt_gp_assoc = 0; } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); /* * tg_pt_gp_mem is associated with a single * se_port->sep_alua_tg_pt_gp_mem, and is released via * core_alua_free_tg_pt_gp_mem(). * * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, * assume we want to re-assocate a given tg_pt_gp_mem with * default_tg_pt_gp. */ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) { __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, dev->t10_alua.default_tg_pt_gp); } else tg_pt_gp_mem->tg_pt_gp = NULL; spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); spin_lock(&tg_pt_gp->tg_pt_gp_lock); } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); } void core_alua_free_tg_pt_gp_mem(struct se_port *port) { struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; if (!tg_pt_gp_mem) return; while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt)) cpu_relax(); spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; if (tg_pt_gp) { spin_lock(&tg_pt_gp->tg_pt_gp_lock); if (tg_pt_gp_mem->tg_pt_gp_assoc) { list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); tg_pt_gp->tg_pt_gp_members--; tg_pt_gp_mem->tg_pt_gp_assoc = 0; } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); tg_pt_gp_mem->tg_pt_gp = NULL; } spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem); } static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( struct se_device *dev, const char *name) { struct t10_alua_tg_pt_gp *tg_pt_gp; struct config_item *ci; spin_lock(&dev->t10_alua.tg_pt_gps_lock); list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { if (!tg_pt_gp->tg_pt_gp_valid_id) continue; ci = &tg_pt_gp->tg_pt_gp_group.cg_item; if (!strcmp(config_item_name(ci), name)) { atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); return tg_pt_gp; } } spin_unlock(&dev->t10_alua.tg_pt_gps_lock); return NULL; } static void core_alua_put_tg_pt_gp_from_name( struct t10_alua_tg_pt_gp *tg_pt_gp) { struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); } /* * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held */ void __core_alua_attach_tg_pt_gp_mem( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, struct t10_alua_tg_pt_gp *tg_pt_gp) { spin_lock(&tg_pt_gp->tg_pt_gp_lock); tg_pt_gp_mem->tg_pt_gp = tg_pt_gp; tg_pt_gp_mem->tg_pt_gp_assoc = 1; list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list, &tg_pt_gp->tg_pt_gp_mem_list); tg_pt_gp->tg_pt_gp_members++; spin_unlock(&tg_pt_gp->tg_pt_gp_lock); } /* * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held */ static void __core_alua_drop_tg_pt_gp_mem( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, struct t10_alua_tg_pt_gp *tg_pt_gp) { spin_lock(&tg_pt_gp->tg_pt_gp_lock); list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); tg_pt_gp_mem->tg_pt_gp = NULL; tg_pt_gp_mem->tg_pt_gp_assoc = 0; tg_pt_gp->tg_pt_gp_members--; spin_unlock(&tg_pt_gp->tg_pt_gp_lock); } ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) { struct config_item *tg_pt_ci; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; ssize_t len = 0; tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; if (!tg_pt_gp_mem) return len; spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; if (tg_pt_gp) { tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" " %hu\nTG Port Primary Access State: %s\nTG Port " "Primary Access Status: %s\nTG Port Secondary Access" " State: %s\nTG Port Secondary Access Status: %s\n", config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(atomic_read( &tg_pt_gp->tg_pt_gp_alua_access_state)), core_alua_dump_status( tg_pt_gp->tg_pt_gp_alua_access_status), (atomic_read(&port->sep_tg_pt_secondary_offline)) ? "Offline" : "None", core_alua_dump_status(port->sep_tg_pt_secondary_stat)); } spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); return len; } ssize_t core_alua_store_tg_pt_gp_info( struct se_port *port, const char *page, size_t count) { struct se_portal_group *tpg; struct se_lun *lun; struct se_device *dev = port->sep_lun->lun_se_dev; struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; unsigned char buf[TG_PT_GROUP_NAME_BUF]; int move = 0; tpg = port->sep_tpg; lun = port->sep_lun; tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; if (!tg_pt_gp_mem) return 0; if (count > TG_PT_GROUP_NAME_BUF) { pr_err("ALUA Target Port Group alias too large!\n"); return -EINVAL; } memset(buf, 0, TG_PT_GROUP_NAME_BUF); memcpy(buf, page, count); /* * Any ALUA target port group alias besides "NULL" means we will be * making a new group association. */ if (strcmp(strstrip(buf), "NULL")) { /* * core_alua_get_tg_pt_gp_by_name() will increment reference to * struct t10_alua_tg_pt_gp. This reference is released with * core_alua_put_tg_pt_gp_from_name() below. */ tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev, strstrip(buf)); if (!tg_pt_gp_new) return -ENODEV; } spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; if (tg_pt_gp) { /* * Clearing an existing tg_pt_gp association, and replacing * with the default_tg_pt_gp. */ if (!tg_pt_gp_new) { pr_debug("Target_Core_ConfigFS: Moving" " %s/tpgt_%hu/%s from ALUA Target Port Group:" " alua/%s, ID: %hu back to" " default_tg_pt_gp\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpg->se_tpg_tfo->tpg_get_tag(tpg), config_item_name(&lun->lun_group.cg_item), config_item_name( &tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id); __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, dev->t10_alua.default_tg_pt_gp); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); return count; } /* * Removing existing association of tg_pt_gp_mem with tg_pt_gp */ __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); move = 1; } /* * Associate tg_pt_gp_mem with tg_pt_gp_new. */ __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" " Target Port Group: alua/%s, ID: %hu\n", (move) ? "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpg->se_tpg_tfo->tpg_get_tag(tpg), config_item_name(&lun->lun_group.cg_item), config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), tg_pt_gp_new->tg_pt_gp_id); core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); return count; } ssize_t core_alua_show_access_type( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) && (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) return sprintf(page, "Implict and Explict\n"); else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA) return sprintf(page, "Implict\n"); else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) return sprintf(page, "Explict\n"); else return sprintf(page, "None\n"); } ssize_t core_alua_store_access_type( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { unsigned long tmp; int ret; ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract alua_access_type\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { pr_err("Illegal value for alua_access_type:" " %lu\n", tmp); return -EINVAL; } if (tmp == 3) tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA; else if (tmp == 2) tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA; else if (tmp == 1) tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA; else tg_pt_gp->tg_pt_gp_alua_access_type = 0; return count; } ssize_t core_alua_show_nonop_delay_msecs( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs); } ssize_t core_alua_store_nonop_delay_msecs( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { unsigned long tmp; int ret; ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract nonop_delay_msecs\n"); return -EINVAL; } if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { pr_err("Passed nonop_delay_msecs: %lu, exceeds" " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, ALUA_MAX_NONOP_DELAY_MSECS); return -EINVAL; } tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp; return count; } ssize_t core_alua_show_trans_delay_msecs( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs); } ssize_t core_alua_store_trans_delay_msecs( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { unsigned long tmp; int ret; ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract trans_delay_msecs\n"); return -EINVAL; } if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { pr_err("Passed trans_delay_msecs: %lu, exceeds" " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, ALUA_MAX_TRANS_DELAY_MSECS); return -EINVAL; } tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp; return count; } ssize_t core_alua_show_implict_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs); } ssize_t core_alua_store_implict_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { unsigned long tmp; int ret; ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract implict_trans_secs\n"); return -EINVAL; } if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) { pr_err("Passed implict_trans_secs: %lu, exceeds" " ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp, ALUA_MAX_IMPLICT_TRANS_SECS); return -EINVAL; } tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp; return count; } ssize_t core_alua_show_preferred_bit( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref); } ssize_t core_alua_store_preferred_bit( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { unsigned long tmp; int ret; ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract preferred ALUA value\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1)) { pr_err("Illegal value for preferred ALUA: %lu\n", tmp); return -EINVAL; } tg_pt_gp->tg_pt_gp_pref = (int)tmp; return count; } ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) { if (!lun->lun_sep) return -ENODEV; return sprintf(page, "%d\n", atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline)); } ssize_t core_alua_store_offline_bit( struct se_lun *lun, const char *page, size_t count) { struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; unsigned long tmp; int ret; if (!lun->lun_sep) return -ENODEV; ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract alua_tg_pt_offline value\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1)) { pr_err("Illegal value for alua_tg_pt_offline: %lu\n", tmp); return -EINVAL; } tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem; if (!tg_pt_gp_mem) { pr_err("Unable to locate *tg_pt_gp_mem\n"); return -EINVAL; } ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem, lun->lun_sep, 0, (int)tmp); if (ret < 0) return -EINVAL; return count; } ssize_t core_alua_show_secondary_status( struct se_lun *lun, char *page) { return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat); } ssize_t core_alua_store_secondary_status( struct se_lun *lun, const char *page, size_t count) { unsigned long tmp; int ret; ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract alua_tg_pt_status\n"); return -EINVAL; } if ((tmp != ALUA_STATUS_NONE) && (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { pr_err("Illegal value for alua_tg_pt_status: %lu\n", tmp); return -EINVAL; } lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp; return count; } ssize_t core_alua_show_secondary_write_metadata( struct se_lun *lun, char *page) { return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_write_md); } ssize_t core_alua_store_secondary_write_metadata( struct se_lun *lun, const char *page, size_t count) { unsigned long tmp; int ret; ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract alua_tg_pt_write_md\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1)) { pr_err("Illegal value for alua_tg_pt_write_md:" " %lu\n", tmp); return -EINVAL; } lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp; return count; } int core_setup_alua(struct se_device *dev) { if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { struct t10_alua_lu_gp_member *lu_gp_mem; /* * Associate this struct se_device with the default ALUA * LUN Group. */ lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); if (IS_ERR(lu_gp_mem)) return PTR_ERR(lu_gp_mem); spin_lock(&lu_gp_mem->lu_gp_mem_lock); __core_alua_attach_lu_gp_mem(lu_gp_mem, default_lu_gp); spin_unlock(&lu_gp_mem->lu_gp_mem_lock); pr_debug("%s: Adding to default ALUA LU Group:" " core/alua/lu_gps/default_lu_gp\n", dev->transport->name); } return 0; }
gpl-2.0
crewrktablets/android_kernel_odys_neox_3.0.8
drivers/net/sky2.c
794
133431
/* * New driver for Marvell Yukon 2 chipset. * Based on earlier sk98lin, and skge driver. * * This driver intentionally does not support all the features * of the original driver such as link fail-over and link management because * those should be done at higher levels. * * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/crc32.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/pci.h> #include <linux/ip.h> #include <linux/slab.h> #include <net/ip.h> #include <linux/tcp.h> #include <linux/in.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/if_vlan.h> #include <linux/prefetch.h> #include <linux/debugfs.h> #include <linux/mii.h> #include <asm/irq.h> #include "sky2.h" #define DRV_NAME "sky2" #define DRV_VERSION "1.28" /* * The Yukon II chipset takes 64 bit command blocks (called list elements) * that are organized into three (receive, transmit, status) different rings * similar to Tigon3. */ #define RX_LE_SIZE 1024 #define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) #define RX_MAX_PENDING (RX_LE_SIZE/6 - 2) #define RX_DEF_PENDING RX_MAX_PENDING /* This is the worst case number of transmit list elements for a single skb: VLAN:GSO + CKSUM + Data + skb_frags * DMA */ #define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1)) #define TX_MIN_PENDING (MAX_SKB_TX_LE+1) #define TX_MAX_PENDING 1024 #define TX_DEF_PENDING 127 #define TX_WATCHDOG (5 * HZ) #define NAPI_WEIGHT 64 #define PHY_RETRIES 1000 #define SKY2_EEPROM_MAGIC 0x9955aabb #define RING_NEXT(x, s) (((x)+1) & ((s)-1)) static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; static int debug = -1; /* defaults above */ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); static int copybreak __read_mostly = 128; module_param(copybreak, int, 0); MODULE_PARM_DESC(copybreak, "Receive copy threshold"); static int disable_msi = 0; module_param(disable_msi, int, 0); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = { { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) }, /* DGE-550T */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */ { 0 } }; MODULE_DEVICE_TABLE(pci, sky2_id_table); /* Avoid conditionals by using array */ static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 }; static void sky2_set_multicast(struct net_device *dev); /* Access to PHY via serial interconnect */ static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) { int i; gma_write16(hw, port, GM_SMI_DATA, val); gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg)); for (i = 0; i < PHY_RETRIES; i++) { u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL); if (ctrl == 0xffff) goto io_error; if (!(ctrl & GM_SMI_CT_BUSY)) return 0; udelay(10); } dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name); return -ETIMEDOUT; io_error: dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); return -EIO; } static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val) { int i; gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); for (i = 0; i < PHY_RETRIES; i++) { u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL); if (ctrl == 0xffff) goto io_error; if (ctrl & GM_SMI_CT_RD_VAL) { *val = gma_read16(hw, port, GM_SMI_DATA); return 0; } udelay(10); } dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name); return -ETIMEDOUT; io_error: dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); return -EIO; } static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) { u16 v; __gm_phy_read(hw, port, reg, &v); return v; } static void sky2_power_on(struct sky2_hw *hw) { /* switch power to VCC (WA for VAUX problem) */ sky2_write8(hw, B0_POWER_CTRL, PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); /* disable Core Clock Division, */ sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) /* enable bits are inverted */ sky2_write8(hw, B2_Y2_CLK_GATE, Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); else sky2_write8(hw, B2_Y2_CLK_GATE, 0); if (hw->flags & SKY2_HW_ADV_POWER_CTL) { u32 reg; sky2_pci_write32(hw, PCI_DEV_REG3, 0); reg = sky2_pci_read32(hw, PCI_DEV_REG4); /* set all bits to 0 except bits 15..12 and 8 */ reg &= P_ASPM_CONTROL_MSK; sky2_pci_write32(hw, PCI_DEV_REG4, reg); reg = sky2_pci_read32(hw, PCI_DEV_REG5); /* set all bits to 0 except bits 28 & 27 */ reg &= P_CTL_TIM_VMAIN_AV_MSK; sky2_pci_write32(hw, PCI_DEV_REG5, reg); sky2_pci_write32(hw, PCI_CFG_REG_1, 0); sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON); /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */ reg = sky2_read32(hw, B2_GP_IO); reg |= GLB_GPIO_STAT_RACE_DIS; sky2_write32(hw, B2_GP_IO, reg); sky2_read32(hw, B2_GP_IO); } /* Turn on "driver loaded" LED */ sky2_write16(hw, B0_CTST, Y2_LED_STAT_ON); } static void sky2_power_aux(struct sky2_hw *hw) { if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) sky2_write8(hw, B2_Y2_CLK_GATE, 0); else /* enable bits are inverted */ sky2_write8(hw, B2_Y2_CLK_GATE, Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); /* switch power to VAUX if supported and PME from D3cold */ if ( (sky2_read32(hw, B0_CTST) & Y2_VAUX_AVAIL) && pci_pme_capable(hw->pdev, PCI_D3cold)) sky2_write8(hw, B0_POWER_CTRL, (PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF)); /* turn off "driver loaded LED" */ sky2_write16(hw, B0_CTST, Y2_LED_STAT_OFF); } static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) { u16 reg; /* disable all GMAC IRQ's */ sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ gma_write16(hw, port, GM_MC_ADDR_H2, 0); gma_write16(hw, port, GM_MC_ADDR_H3, 0); gma_write16(hw, port, GM_MC_ADDR_H4, 0); reg = gma_read16(hw, port, GM_RX_CTRL); reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA; gma_write16(hw, port, GM_RX_CTRL, reg); } /* flow control to advertise bits */ static const u16 copper_fc_adv[] = { [FC_NONE] = 0, [FC_TX] = PHY_M_AN_ASP, [FC_RX] = PHY_M_AN_PC, [FC_BOTH] = PHY_M_AN_PC | PHY_M_AN_ASP, }; /* flow control to advertise bits when using 1000BaseX */ static const u16 fiber_fc_adv[] = { [FC_NONE] = PHY_M_P_NO_PAUSE_X, [FC_TX] = PHY_M_P_ASYM_MD_X, [FC_RX] = PHY_M_P_SYM_MD_X, [FC_BOTH] = PHY_M_P_BOTH_MD_X, }; /* flow control to GMA disable bits */ static const u16 gm_fc_disable[] = { [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS, [FC_TX] = GM_GPCR_FC_RX_DIS, [FC_RX] = GM_GPCR_FC_TX_DIS, [FC_BOTH] = 0, }; static void sky2_phy_init(struct sky2_hw *hw, unsigned port) { struct sky2_port *sky2 = netdev_priv(hw->dev[port]); u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg; if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) && !(hw->flags & SKY2_HW_NEWER_PHY)) { u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | PHY_M_EC_MAC_S_MSK); ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */ if (hw->chip_id == CHIP_ID_YUKON_EC) /* set downshift counter to 3x and enable downshift */ ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA; else /* set master & slave downshift counter to 1x */ ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); } ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); if (sky2_is_copper(hw)) { if (!(hw->flags & SKY2_HW_GIGABIT)) { /* enable automatic crossover */ ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1; if (hw->chip_id == CHIP_ID_YUKON_FE_P && hw->chip_rev == CHIP_REV_YU_FE2_A0) { u16 spec; /* Enable Class A driver for FE+ A0 */ spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2); spec |= PHY_M_FESC_SEL_CL_A; gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); } } else { /* disable energy detect */ ctrl &= ~PHY_M_PC_EN_DET_MSK; /* enable automatic crossover */ ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); /* downshift on PHY 88E1112 and 88E1149 is changed */ if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) && (hw->flags & SKY2_HW_NEWER_PHY)) { /* set downshift counter to 3x and enable downshift */ ctrl &= ~PHY_M_PC_DSC_MSK; ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; } } } else { /* workaround for deviation #4.88 (CRC errors) */ /* disable Automatic Crossover */ ctrl &= ~PHY_M_PC_MDIX_MSK; } gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); /* special setup for PHY 88E1112 Fiber */ if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) { pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); ctrl &= ~PHY_M_MAC_MD_MSK; ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX); gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); if (hw->pmd_type == 'P') { /* select page 1 to access Fiber registers */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1); /* for SFP-module set SIGDET polarity to low */ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); ctrl |= PHY_M_FIB_SIGD_POL; gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); } gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); } ctrl = PHY_CT_RESET; ct1000 = 0; adv = PHY_AN_CSMA; reg = 0; if (sky2->flags & SKY2_FLAG_AUTO_SPEED) { if (sky2_is_copper(hw)) { if (sky2->advertising & ADVERTISED_1000baseT_Full) ct1000 |= PHY_M_1000C_AFD; if (sky2->advertising & ADVERTISED_1000baseT_Half) ct1000 |= PHY_M_1000C_AHD; if (sky2->advertising & ADVERTISED_100baseT_Full) adv |= PHY_M_AN_100_FD; if (sky2->advertising & ADVERTISED_100baseT_Half) adv |= PHY_M_AN_100_HD; if (sky2->advertising & ADVERTISED_10baseT_Full) adv |= PHY_M_AN_10_FD; if (sky2->advertising & ADVERTISED_10baseT_Half) adv |= PHY_M_AN_10_HD; } else { /* special defines for FIBER (88E1040S only) */ if (sky2->advertising & ADVERTISED_1000baseT_Full) adv |= PHY_M_AN_1000X_AFD; if (sky2->advertising & ADVERTISED_1000baseT_Half) adv |= PHY_M_AN_1000X_AHD; } /* Restart Auto-negotiation */ ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; } else { /* forced speed/duplex settings */ ct1000 = PHY_M_1000C_MSE; /* Disable auto update for duplex flow control and duplex */ reg |= GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_SPD_DIS; switch (sky2->speed) { case SPEED_1000: ctrl |= PHY_CT_SP1000; reg |= GM_GPCR_SPEED_1000; break; case SPEED_100: ctrl |= PHY_CT_SP100; reg |= GM_GPCR_SPEED_100; break; } if (sky2->duplex == DUPLEX_FULL) { reg |= GM_GPCR_DUP_FULL; ctrl |= PHY_CT_DUP_MD; } else if (sky2->speed < SPEED_1000) sky2->flow_mode = FC_NONE; } if (sky2->flags & SKY2_FLAG_AUTO_PAUSE) { if (sky2_is_copper(hw)) adv |= copper_fc_adv[sky2->flow_mode]; else adv |= fiber_fc_adv[sky2->flow_mode]; } else { reg |= GM_GPCR_AU_FCT_DIS; reg |= gm_fc_disable[sky2->flow_mode]; /* Forward pause packets to GMAC? */ if (sky2->flow_mode & FC_RX) sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); else sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); } gma_write16(hw, port, GM_GP_CTRL, reg); if (hw->flags & SKY2_HW_GIGABIT) gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); /* Setup Phy LED's */ ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS); ledover = 0; switch (hw->chip_id) { case CHIP_ID_YUKON_FE: /* on 88E3082 these bits are at 11..9 (shifted left) */ ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1; ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR); /* delete ACT LED control bits */ ctrl &= ~PHY_M_FELP_LED1_MSK; /* change ACT LED control to blink mode */ ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL); gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); break; case CHIP_ID_YUKON_FE_P: /* Enable Link Partner Next Page */ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); ctrl |= PHY_M_PC_ENA_LIP_NP; /* disable Energy Detect and enable scrambler */ ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB); gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */ ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) | PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) | PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED); gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); break; case CHIP_ID_YUKON_XL: pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); /* select page 3 to access LED control register */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); /* set LED Function Control register */ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */ /* set Polarity Control register */ gm_phy_write(hw, port, PHY_MARV_PHY_STAT, (PHY_M_POLC_LS1_P_MIX(4) | PHY_M_POLC_IS0_P_MIX(4) | PHY_M_POLC_LOS_CTRL(2) | PHY_M_POLC_INIT_CTRL(2) | PHY_M_POLC_STA1_CTRL(2) | PHY_M_POLC_STA0_CTRL(2))); /* restore page register */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); break; case CHIP_ID_YUKON_EC_U: case CHIP_ID_YUKON_EX: case CHIP_ID_YUKON_SUPR: pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); /* select page 3 to access LED control register */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); /* set LED Function Control register */ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */ PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */ /* set Blink Rate in LED Timer Control Register */ gm_phy_write(hw, port, PHY_MARV_INT_MASK, ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS)); /* restore page register */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); break; default: /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL; /* turn off the Rx LED (LED_RX) */ ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); } if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) { /* apply fixes in PHY AFE */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255); /* increase differential signal amplitude in 10BASE-T */ gm_phy_write(hw, port, 0x18, 0xaa99); gm_phy_write(hw, port, 0x17, 0x2011); if (hw->chip_id == CHIP_ID_YUKON_EC_U) { /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ gm_phy_write(hw, port, 0x18, 0xa204); gm_phy_write(hw, port, 0x17, 0x2002); } /* set page register to 0 */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); } else if (hw->chip_id == CHIP_ID_YUKON_FE_P && hw->chip_rev == CHIP_REV_YU_FE2_A0) { /* apply workaround for integrated resistors calibration */ gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17); gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60); } else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { /* apply fixes in PHY AFE */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff); /* apply RDAC termination workaround */ gm_phy_write(hw, port, 24, 0x2800); gm_phy_write(hw, port, 23, 0x2001); /* set page register back to 0 */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); } else if (hw->chip_id != CHIP_ID_YUKON_EX && hw->chip_id < CHIP_ID_YUKON_SUPR) { /* no effect on Yukon-XL */ gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); if (!(sky2->flags & SKY2_FLAG_AUTO_SPEED) || sky2->speed == SPEED_100) { /* turn on 100 Mbps LED (LED_LINK100) */ ledover |= PHY_M_LED_MO_100(MO_LED_ON); } if (ledover) gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); } /* Enable phy interrupt on auto-negotiation complete (or link up) */ if (sky2->flags & SKY2_FLAG_AUTO_SPEED) gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); else gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); } static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA }; static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port) { u32 reg1; sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); reg1 &= ~phy_power[port]; if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) reg1 |= coma_mode[port]; sky2_pci_write32(hw, PCI_DEV_REG1, reg1); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); sky2_pci_read32(hw, PCI_DEV_REG1); if (hw->chip_id == CHIP_ID_YUKON_FE) gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE); else if (hw->flags & SKY2_HW_ADV_POWER_CTL) sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); } static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) { u32 reg1; u16 ctrl; /* release GPHY Control reset */ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); /* release GMAC reset */ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); if (hw->flags & SKY2_HW_NEWER_PHY) { /* select page 2 to access MAC control register */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); /* allow GMII Power Down */ ctrl &= ~PHY_M_MAC_GMIF_PUP; gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); /* set page register back to 0 */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); } /* setup General Purpose Control Register */ gma_write16(hw, port, GM_GP_CTRL, GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 | GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS | GM_GPCR_AU_SPD_DIS); if (hw->chip_id != CHIP_ID_YUKON_EC) { if (hw->chip_id == CHIP_ID_YUKON_EC_U) { /* select page 2 to access MAC control register */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); /* enable Power Down */ ctrl |= PHY_M_PC_POW_D_ENA; gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); /* set page register back to 0 */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); } /* set IEEE compatible Power Down Mode (dev. #4.99) */ gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); } sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ sky2_pci_write32(hw, PCI_DEV_REG1, reg1); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); } /* Enable Rx/Tx */ static void sky2_enable_rx_tx(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u16 reg; reg = gma_read16(hw, port, GM_GP_CTRL); reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; gma_write16(hw, port, GM_GP_CTRL, reg); } /* Force a renegotiation */ static void sky2_phy_reinit(struct sky2_port *sky2) { spin_lock_bh(&sky2->phy_lock); sky2_phy_init(sky2->hw, sky2->port); sky2_enable_rx_tx(sky2); spin_unlock_bh(&sky2->phy_lock); } /* Put device in state to listen for Wake On Lan */ static void sky2_wol_init(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; enum flow_control save_mode; u16 ctrl; /* Bring hardware out of reset */ sky2_write16(hw, B0_CTST, CS_RST_CLR); sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); /* Force to 10/100 * sky2_reset will re-enable on resume */ save_mode = sky2->flow_mode; ctrl = sky2->advertising; sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full); sky2->flow_mode = FC_NONE; spin_lock_bh(&sky2->phy_lock); sky2_phy_power_up(hw, port); sky2_phy_init(hw, port); spin_unlock_bh(&sky2->phy_lock); sky2->flow_mode = save_mode; sky2->advertising = ctrl; /* Set GMAC to no flow control and auto update for speed/duplex */ gma_write16(hw, port, GM_GP_CTRL, GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); /* Set WOL address */ memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), sky2->netdev->dev_addr, ETH_ALEN); /* Turn on appropriate WOL control bits */ sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); ctrl = 0; if (sky2->wol & WAKE_PHY) ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; else ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; if (sky2->wol & WAKE_MAGIC) ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; else ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); /* Disable PiG firmware */ sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF); /* block receiver */ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); } static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port) { struct net_device *dev = hw->dev[port]; if ( (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev != CHIP_REV_YU_EX_A0) || hw->chip_id >= CHIP_ID_YUKON_FE_P) { /* Yukon-Extreme B0 and further Extreme devices */ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA); } else if (dev->mtu > ETH_DATA_LEN) { /* set Tx GMAC FIFO Almost Empty Threshold */ sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), (ECU_JUMBO_WM << 16) | ECU_AE_THR); sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS); } else sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA); } static void sky2_mac_init(struct sky2_hw *hw, unsigned port) { struct sky2_port *sky2 = netdev_priv(hw->dev[port]); u16 reg; u32 rx_reg; int i; const u8 *addr = hw->dev[port]->dev_addr; sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == CHIP_REV_YU_XL_A0 && port == 1) { /* WA DEV_472 -- looks like crossed wires on port 2 */ /* clear GMAC 1 Control reset */ sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR); do { sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET); sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR); } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL || gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 || gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0); } sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); /* Enable Transmit FIFO Underrun */ sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); spin_lock_bh(&sky2->phy_lock); sky2_phy_power_up(hw, port); sky2_phy_init(hw, port); spin_unlock_bh(&sky2->phy_lock); /* MIB clear */ reg = gma_read16(hw, port, GM_PHY_ADDR); gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4) gma_read16(hw, port, i); gma_write16(hw, port, GM_PHY_ADDR, reg); /* transmit control */ gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); /* receive control reg: unicast + multicast + no FCS */ gma_write16(hw, port, GM_RX_CTRL, GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); /* transmit flow control */ gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); /* transmit parameter */ gma_write16(hw, port, GM_TX_PARAM, TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); /* serial mode register */ reg = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); if (hw->dev[port]->mtu > ETH_DATA_LEN) reg |= GM_SMOD_JUMBO_ENA; if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_U_B1) reg |= GM_NEW_FLOW_CTRL; gma_write16(hw, port, GM_SERIAL_MODE, reg); /* virtual address for data */ gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); /* physical address: used for pause frames */ gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); /* ignore counter overflows */ gma_write16(hw, port, GM_TX_IRQ_MSK, 0); gma_write16(hw, port, GM_RX_IRQ_MSK, 0); gma_write16(hw, port, GM_TR_IRQ_MSK, 0); /* Configure Rx MAC FIFO */ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON; if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_FE_P) rx_reg |= GMF_RX_OVER_ON; sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg); if (hw->chip_id == CHIP_ID_YUKON_XL) { /* Hardware errata - clear flush mask */ sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0); } else { /* Flush Rx MAC FIFO on any flow control or error */ sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); } /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */ reg = RX_GMF_FL_THR_DEF + 1; /* Another magic mystery workaround from sk98lin */ if (hw->chip_id == CHIP_ID_YUKON_FE_P && hw->chip_rev == CHIP_REV_YU_FE2_A0) reg = 0x178; sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg); /* Configure Tx MAC FIFO */ sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); /* On chips without ram buffer, pause is controlled by MAC level */ if (!(hw->flags & SKY2_HW_RAM_BUFFER)) { /* Pause threshold is scaled by 8 in bytes */ if (hw->chip_id == CHIP_ID_YUKON_FE_P && hw->chip_rev == CHIP_REV_YU_FE2_A0) reg = 1568 / 8; else reg = 1024 / 8; sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg); sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8); sky2_set_tx_stfwd(hw, port); } if (hw->chip_id == CHIP_ID_YUKON_FE_P && hw->chip_rev == CHIP_REV_YU_FE2_A0) { /* disable dynamic watermark */ reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA)); reg &= ~TX_DYN_WM_ENA; sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg); } } /* Assign Ram Buffer allocation to queue */ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) { u32 end; /* convert from K bytes to qwords used for hw register */ start *= 1024/8; space *= 1024/8; end = start + space - 1; sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); sky2_write32(hw, RB_ADDR(q, RB_START), start); sky2_write32(hw, RB_ADDR(q, RB_END), end); sky2_write32(hw, RB_ADDR(q, RB_WP), start); sky2_write32(hw, RB_ADDR(q, RB_RP), start); if (q == Q_R1 || q == Q_R2) { u32 tp = space - space/4; /* On receive queue's set the thresholds * give receiver priority when > 3/4 full * send pause when down to 2K */ sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); tp = space - 2048/8; sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); } else { /* Enable store & forward on Tx queue's because * Tx FIFO is only 1K on Yukon */ sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); } sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); sky2_read8(hw, RB_ADDR(q, RB_CTRL)); } /* Setup Bus Memory Interface */ static void sky2_qset(struct sky2_hw *hw, u16 q) { sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET); sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT); sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON); sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT); } /* Setup prefetch unit registers. This is the interface between * hardware and driver list elements */ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr, dma_addr_t addr, u32 last) { sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR); sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), upper_32_bits(addr)); sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), lower_32_bits(addr)); sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last); sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON); sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL)); } static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) { struct sky2_tx_le *le = sky2->tx_le + *slot; *slot = RING_NEXT(*slot, sky2->tx_ring_size); le->ctrl = 0; return le; } static void tx_init(struct sky2_port *sky2) { struct sky2_tx_le *le; sky2->tx_prod = sky2->tx_cons = 0; sky2->tx_tcpsum = 0; sky2->tx_last_mss = 0; le = get_tx_le(sky2, &sky2->tx_prod); le->addr = 0; le->opcode = OP_ADDR64 | HW_OWNER; sky2->tx_last_upper = 0; } /* Update chip's next pointer */ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) { /* Make sure write' to descriptors are complete before we tell hardware */ wmb(); sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); /* Synchronize I/O on since next processor may write to tail */ mmiowb(); } static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) { struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE); le->ctrl = 0; return le; } static unsigned sky2_get_rx_threshold(struct sky2_port *sky2) { unsigned size; /* Space needed for frame data + headers rounded up */ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); /* Stopping point for hardware truncation */ return (size - 8) / sizeof(u32); } static unsigned sky2_get_rx_data_size(struct sky2_port *sky2) { struct rx_ring_info *re; unsigned size; /* Space needed for frame data + headers rounded up */ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); sky2->rx_nfrags = size >> PAGE_SHIFT; BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr)); /* Compute residue after pages */ size -= sky2->rx_nfrags << PAGE_SHIFT; /* Optimize to handle small packets and headers */ if (size < copybreak) size = copybreak; if (size < ETH_HLEN) size = ETH_HLEN; return size; } /* Build description to hardware for one receive segment */ static void sky2_rx_add(struct sky2_port *sky2, u8 op, dma_addr_t map, unsigned len) { struct sky2_rx_le *le; if (sizeof(dma_addr_t) > sizeof(u32)) { le = sky2_next_rx(sky2); le->addr = cpu_to_le32(upper_32_bits(map)); le->opcode = OP_ADDR64 | HW_OWNER; } le = sky2_next_rx(sky2); le->addr = cpu_to_le32(lower_32_bits(map)); le->length = cpu_to_le16(len); le->opcode = op | HW_OWNER; } /* Build description to hardware for one possibly fragmented skb */ static void sky2_rx_submit(struct sky2_port *sky2, const struct rx_ring_info *re) { int i; sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size); for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++) sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE); } static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, unsigned size) { struct sk_buff *skb = re->skb; int i; re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(pdev, re->data_addr)) goto mapping_error; dma_unmap_len_set(re, data_size, size); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; re->frag_addr[i] = pci_map_page(pdev, frag->page, frag->page_offset, frag->size, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(pdev, re->frag_addr[i])) goto map_page_error; } return 0; map_page_error: while (--i >= 0) { pci_unmap_page(pdev, re->frag_addr[i], skb_shinfo(skb)->frags[i].size, PCI_DMA_FROMDEVICE); } pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size), PCI_DMA_FROMDEVICE); mapping_error: if (net_ratelimit()) dev_warn(&pdev->dev, "%s: rx mapping error\n", skb->dev->name); return -EIO; } static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) { struct sk_buff *skb = re->skb; int i; pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size), PCI_DMA_FROMDEVICE); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) pci_unmap_page(pdev, re->frag_addr[i], skb_shinfo(skb)->frags[i].size, PCI_DMA_FROMDEVICE); } /* Tell chip where to start receive checksum. * Actually has two checksums, but set both same to avoid possible byte * order problems. */ static void rx_set_checksum(struct sky2_port *sky2) { struct sky2_rx_le *le = sky2_next_rx(sky2); le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN); le->ctrl = 0; le->opcode = OP_TCPSTART | HW_OWNER; sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), (sky2->netdev->features & NETIF_F_RXCSUM) ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); } /* Enable/disable receive hash calculation (RSS) */ static void rx_set_rss(struct net_device *dev, u32 features) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; int i, nkeys = 4; /* Supports IPv6 and other modes */ if (hw->flags & SKY2_HW_NEW_LE) { nkeys = 10; sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL); } /* Program RSS initial values */ if (features & NETIF_F_RXHASH) { u32 key[nkeys]; get_random_bytes(key, nkeys * sizeof(u32)); for (i = 0; i < nkeys; i++) sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4), key[i]); /* Need to turn on (undocumented) flag to make hashing work */ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_STFW_ENA); sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), BMU_ENA_RX_RSS_HASH); } else sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), BMU_DIS_RX_RSS_HASH); } /* * The RX Stop command will not work for Yukon-2 if the BMU does not * reach the end of packet and since we can't make sure that we have * incoming data, we must reset the BMU while it is not doing a DMA * transfer. Since it is possible that the RX path is still active, * the RX RAM buffer will be stopped first, so any possible incoming * data will not trigger a DMA. After the RAM buffer is stopped, the * BMU is polled until any DMA in progress is ended and only then it * will be reset. */ static void sky2_rx_stop(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned rxq = rxqaddr[sky2->port]; int i; /* disable the RAM Buffer receive queue */ sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD); for (i = 0; i < 0xffff; i++) if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL)) == sky2_read8(hw, RB_ADDR(rxq, Q_RL))) goto stopped; netdev_warn(sky2->netdev, "receiver stop failed\n"); stopped: sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); /* reset the Rx prefetch unit */ sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); mmiowb(); } /* Clean out receive buffer area, assumes receiver hardware stopped */ static void sky2_rx_clean(struct sky2_port *sky2) { unsigned i; memset(sky2->rx_le, 0, RX_LE_BYTES); for (i = 0; i < sky2->rx_pending; i++) { struct rx_ring_info *re = sky2->rx_ring + i; if (re->skb) { sky2_rx_unmap_skb(sky2->hw->pdev, re); kfree_skb(re->skb); re->skb = NULL; } } } /* Basic MII support */ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *data = if_mii(ifr); struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; int err = -EOPNOTSUPP; if (!netif_running(dev)) return -ENODEV; /* Phy still in reset */ switch (cmd) { case SIOCGMIIPHY: data->phy_id = PHY_ADDR_MARV; /* fallthru */ case SIOCGMIIREG: { u16 val = 0; spin_lock_bh(&sky2->phy_lock); err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val); spin_unlock_bh(&sky2->phy_lock); data->val_out = val; break; } case SIOCSMIIREG: spin_lock_bh(&sky2->phy_lock); err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f, data->val_in); spin_unlock_bh(&sky2->phy_lock); break; } return err; } #define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO) static void sky2_vlan_mode(struct net_device *dev, u32 features) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; u16 port = sky2->port; if (features & NETIF_F_HW_VLAN_RX) sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON); else sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); if (features & NETIF_F_HW_VLAN_TX) { sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON); dev->vlan_features |= SKY2_VLAN_OFFLOADS; } else { sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); /* Can't do transmit offload of vlan without hw vlan */ dev->vlan_features &= ~SKY2_VLAN_OFFLOADS; } } /* Amount of required worst case padding in rx buffer */ static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) { return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2; } /* * Allocate an skb for receiving. If the MTU is large enough * make the skb non-linear with a fragment list of pages. */ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2) { struct sk_buff *skb; int i; skb = netdev_alloc_skb(sky2->netdev, sky2->rx_data_size + sky2_rx_pad(sky2->hw)); if (!skb) goto nomem; if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) { unsigned char *start; /* * Workaround for a bug in FIFO that cause hang * if the FIFO if the receive buffer is not 64 byte aligned. * The buffer returned from netdev_alloc_skb is * aligned except if slab debugging is enabled. */ start = PTR_ALIGN(skb->data, 8); skb_reserve(skb, start - skb->data); } else skb_reserve(skb, NET_IP_ALIGN); for (i = 0; i < sky2->rx_nfrags; i++) { struct page *page = alloc_page(GFP_ATOMIC); if (!page) goto free_partial; skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE); } return skb; free_partial: kfree_skb(skb); nomem: return NULL; } static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq) { sky2_put_idx(sky2->hw, rxq, sky2->rx_put); } static int sky2_alloc_rx_skbs(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned i; sky2->rx_data_size = sky2_get_rx_data_size(sky2); /* Fill Rx ring */ for (i = 0; i < sky2->rx_pending; i++) { struct rx_ring_info *re = sky2->rx_ring + i; re->skb = sky2_rx_alloc(sky2); if (!re->skb) return -ENOMEM; if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) { dev_kfree_skb(re->skb); re->skb = NULL; return -ENOMEM; } } return 0; } /* * Setup receiver buffer pool. * Normal case this ends up creating one list element for skb * in the receive ring. Worst case if using large MTU and each * allocation falls on a different 64 bit region, that results * in 6 list elements per ring entry. * One element is used for checksum enable/disable, and one * extra to avoid wrap. */ static void sky2_rx_start(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; struct rx_ring_info *re; unsigned rxq = rxqaddr[sky2->port]; unsigned i, thresh; sky2->rx_put = sky2->rx_next = 0; sky2_qset(hw, rxq); /* On PCI express lowering the watermark gives better performance */ if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX); /* These chips have no ram buffer? * MAC Rx RAM Read is controlled by hardware */ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev > CHIP_REV_YU_EC_U_A0) sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS); sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); if (!(hw->flags & SKY2_HW_NEW_LE)) rx_set_checksum(sky2); if (!(hw->flags & SKY2_HW_RSS_BROKEN)) rx_set_rss(sky2->netdev, sky2->netdev->features); /* submit Rx ring */ for (i = 0; i < sky2->rx_pending; i++) { re = sky2->rx_ring + i; sky2_rx_submit(sky2, re); } /* * The receiver hangs if it receives frames larger than the * packet buffer. As a workaround, truncate oversize frames, but * the register is limited to 9 bits, so if you do frames > 2052 * you better get the MTU right! */ thresh = sky2_get_rx_threshold(sky2); if (thresh > 0x1ff) sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF); else { sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh); sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON); } /* Tell chip about available buffers */ sky2_rx_update(sky2, rxq); if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_SUPR) { /* * Disable flushing of non ASF packets; * must be done after initializing the BMUs; * drivers without ASF support should do this too, otherwise * it may happen that they cannot run on ASF devices; * remember that the MAC FIFO isn't reset during initialization. */ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF); } if (hw->chip_id >= CHIP_ID_YUKON_SUPR) { /* Enable RX Home Address & Routing Header checksum fix */ sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL), RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA); /* Enable TX Home Address & Routing Header checksum fix */ sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST), TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN); } } static int sky2_alloc_buffers(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; /* must be power of 2 */ sky2->tx_le = pci_alloc_consistent(hw->pdev, sky2->tx_ring_size * sizeof(struct sky2_tx_le), &sky2->tx_le_map); if (!sky2->tx_le) goto nomem; sky2->tx_ring = kcalloc(sky2->tx_ring_size, sizeof(struct tx_ring_info), GFP_KERNEL); if (!sky2->tx_ring) goto nomem; sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES, &sky2->rx_le_map); if (!sky2->rx_le) goto nomem; memset(sky2->rx_le, 0, RX_LE_BYTES); sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info), GFP_KERNEL); if (!sky2->rx_ring) goto nomem; return sky2_alloc_rx_skbs(sky2); nomem: return -ENOMEM; } static void sky2_free_buffers(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; sky2_rx_clean(sky2); if (sky2->rx_le) { pci_free_consistent(hw->pdev, RX_LE_BYTES, sky2->rx_le, sky2->rx_le_map); sky2->rx_le = NULL; } if (sky2->tx_le) { pci_free_consistent(hw->pdev, sky2->tx_ring_size * sizeof(struct sky2_tx_le), sky2->tx_le, sky2->tx_le_map); sky2->tx_le = NULL; } kfree(sky2->tx_ring); kfree(sky2->rx_ring); sky2->tx_ring = NULL; sky2->rx_ring = NULL; } static void sky2_hw_up(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u32 ramsize; int cap; struct net_device *otherdev = hw->dev[sky2->port^1]; tx_init(sky2); /* * On dual port PCI-X card, there is an problem where status * can be received out of order due to split transactions */ if (otherdev && netif_running(otherdev) && (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { u16 cmd; cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); cmd &= ~PCI_X_CMD_MAX_SPLIT; sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); } sky2_mac_init(hw, port); /* Register is number of 4K blocks on internal RAM buffer. */ ramsize = sky2_read8(hw, B2_E_0) * 4; if (ramsize > 0) { u32 rxspace; netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize); if (ramsize < 16) rxspace = ramsize / 2; else rxspace = 8 + (2*(ramsize - 16))/3; sky2_ramset(hw, rxqaddr[port], 0, rxspace); sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace); /* Make sure SyncQ is disabled */ sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL), RB_RST_SET); } sky2_qset(hw, txqaddr[port]); /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */ if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0) sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF); /* Set almost empty threshold */ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_U_A0) sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV); sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, sky2->tx_ring_size - 1); sky2_vlan_mode(sky2->netdev, sky2->netdev->features); netdev_update_features(sky2->netdev); sky2_rx_start(sky2); } /* Bring up network interface. */ static int sky2_up(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u32 imask; int err; netif_carrier_off(dev); err = sky2_alloc_buffers(sky2); if (err) goto err_out; sky2_hw_up(sky2); /* Enable interrupts from phy/mac for port */ imask = sky2_read32(hw, B0_IMSK); imask |= portirq_msk[port]; sky2_write32(hw, B0_IMSK, imask); sky2_read32(hw, B0_IMSK); netif_info(sky2, ifup, dev, "enabling interface\n"); return 0; err_out: sky2_free_buffers(sky2); return err; } /* Modular subtraction in ring */ static inline int tx_inuse(const struct sky2_port *sky2) { return (sky2->tx_prod - sky2->tx_cons) & (sky2->tx_ring_size - 1); } /* Number of list elements available for next tx */ static inline int tx_avail(const struct sky2_port *sky2) { return sky2->tx_pending - tx_inuse(sky2); } /* Estimate of number of transmit list elements required */ static unsigned tx_le_req(const struct sk_buff *skb) { unsigned count; count = (skb_shinfo(skb)->nr_frags + 1) * (sizeof(dma_addr_t) / sizeof(u32)); if (skb_is_gso(skb)) ++count; else if (sizeof(dma_addr_t) == sizeof(u32)) ++count; /* possible vlan */ if (skb->ip_summed == CHECKSUM_PARTIAL) ++count; return count; } static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) { if (re->flags & TX_MAP_SINGLE) pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr), dma_unmap_len(re, maplen), PCI_DMA_TODEVICE); else if (re->flags & TX_MAP_PAGE) pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr), dma_unmap_len(re, maplen), PCI_DMA_TODEVICE); re->flags = 0; } /* * Put one packet in ring for transmit. * A single packet can generate multiple list elements, and * the number of ring elements will probably be less than the number * of list elements used. */ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; struct sky2_tx_le *le = NULL; struct tx_ring_info *re; unsigned i, len; dma_addr_t mapping; u32 upper; u16 slot; u16 mss; u8 ctrl; if (unlikely(tx_avail(sky2) < tx_le_req(skb))) return NETDEV_TX_BUSY; len = skb_headlen(skb); mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(hw->pdev, mapping)) goto mapping_error; slot = sky2->tx_prod; netif_printk(sky2, tx_queued, KERN_DEBUG, dev, "tx queued, slot %u, len %d\n", slot, skb->len); /* Send high bits if needed */ upper = upper_32_bits(mapping); if (upper != sky2->tx_last_upper) { le = get_tx_le(sky2, &slot); le->addr = cpu_to_le32(upper); sky2->tx_last_upper = upper; le->opcode = OP_ADDR64 | HW_OWNER; } /* Check for TCP Segmentation Offload */ mss = skb_shinfo(skb)->gso_size; if (mss != 0) { if (!(hw->flags & SKY2_HW_NEW_LE)) mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); if (mss != sky2->tx_last_mss) { le = get_tx_le(sky2, &slot); le->addr = cpu_to_le32(mss); if (hw->flags & SKY2_HW_NEW_LE) le->opcode = OP_MSS | HW_OWNER; else le->opcode = OP_LRGLEN | HW_OWNER; sky2->tx_last_mss = mss; } } ctrl = 0; /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ if (vlan_tx_tag_present(skb)) { if (!le) { le = get_tx_le(sky2, &slot); le->addr = 0; le->opcode = OP_VLAN|HW_OWNER; } else le->opcode |= OP_VLAN; le->length = cpu_to_be16(vlan_tx_tag_get(skb)); ctrl |= INS_VLAN; } /* Handle TCP checksum offload */ if (skb->ip_summed == CHECKSUM_PARTIAL) { /* On Yukon EX (some versions) encoding change. */ if (hw->flags & SKY2_HW_AUTO_TX_SUM) ctrl |= CALSUM; /* auto checksum */ else { const unsigned offset = skb_transport_offset(skb); u32 tcpsum; tcpsum = offset << 16; /* sum start */ tcpsum |= offset + skb->csum_offset; /* sum write */ ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; if (ip_hdr(skb)->protocol == IPPROTO_UDP) ctrl |= UDPTCP; if (tcpsum != sky2->tx_tcpsum) { sky2->tx_tcpsum = tcpsum; le = get_tx_le(sky2, &slot); le->addr = cpu_to_le32(tcpsum); le->length = 0; /* initial checksum value */ le->ctrl = 1; /* one packet */ le->opcode = OP_TCPLISW | HW_OWNER; } } } re = sky2->tx_ring + slot; re->flags = TX_MAP_SINGLE; dma_unmap_addr_set(re, mapaddr, mapping); dma_unmap_len_set(re, maplen, len); le = get_tx_le(sky2, &slot); le->addr = cpu_to_le32(lower_32_bits(mapping)); le->length = cpu_to_le16(len); le->ctrl = ctrl; le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(hw->pdev, mapping)) goto mapping_unwind; upper = upper_32_bits(mapping); if (upper != sky2->tx_last_upper) { le = get_tx_le(sky2, &slot); le->addr = cpu_to_le32(upper); sky2->tx_last_upper = upper; le->opcode = OP_ADDR64 | HW_OWNER; } re = sky2->tx_ring + slot; re->flags = TX_MAP_PAGE; dma_unmap_addr_set(re, mapaddr, mapping); dma_unmap_len_set(re, maplen, frag->size); le = get_tx_le(sky2, &slot); le->addr = cpu_to_le32(lower_32_bits(mapping)); le->length = cpu_to_le16(frag->size); le->ctrl = ctrl; le->opcode = OP_BUFFER | HW_OWNER; } re->skb = skb; le->ctrl |= EOP; sky2->tx_prod = slot; if (tx_avail(sky2) <= MAX_SKB_TX_LE) netif_stop_queue(dev); sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); return NETDEV_TX_OK; mapping_unwind: for (i = sky2->tx_prod; i != slot; i = RING_NEXT(i, sky2->tx_ring_size)) { re = sky2->tx_ring + i; sky2_tx_unmap(hw->pdev, re); } mapping_error: if (net_ratelimit()) dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); dev_kfree_skb(skb); return NETDEV_TX_OK; } /* * Free ring elements from starting at tx_cons until "done" * * NB: * 1. The hardware will tell us about partial completion of multi-part * buffers so make sure not to free skb to early. * 2. This may run in parallel start_xmit because the it only * looks at the tail of the queue of FIFO (tx_cons), not * the head (tx_prod) */ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) { struct net_device *dev = sky2->netdev; unsigned idx; BUG_ON(done >= sky2->tx_ring_size); for (idx = sky2->tx_cons; idx != done; idx = RING_NEXT(idx, sky2->tx_ring_size)) { struct tx_ring_info *re = sky2->tx_ring + idx; struct sk_buff *skb = re->skb; sky2_tx_unmap(sky2->hw->pdev, re); if (skb) { netif_printk(sky2, tx_done, KERN_DEBUG, dev, "tx done %u\n", idx); u64_stats_update_begin(&sky2->tx_stats.syncp); ++sky2->tx_stats.packets; sky2->tx_stats.bytes += skb->len; u64_stats_update_end(&sky2->tx_stats.syncp); re->skb = NULL; dev_kfree_skb_any(skb); sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); } } sky2->tx_cons = idx; smp_mb(); } static void sky2_tx_reset(struct sky2_hw *hw, unsigned port) { /* Disable Force Sync bit and Enable Alloc bit */ sky2_write8(hw, SK_REG(port, TXA_CTRL), TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); /* Stop Interval Timer and Limit Counter of Tx Arbiter */ sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); /* Reset the PCI FIFO of the async Tx queue */ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_RST_SET | BMU_FIFO_RST); /* Reset the Tx prefetch units */ sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL), PREF_UNIT_RST_SET); sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); } static void sky2_hw_down(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u16 ctrl; /* Force flow control off */ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); /* Stop transmitter */ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP); sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR)); sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET | RB_DIS_OP_MD); ctrl = gma_read16(hw, port, GM_GP_CTRL); ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); gma_write16(hw, port, GM_GP_CTRL, ctrl); sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); /* Workaround shared GMAC reset */ if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 0 && hw->dev[1] && netif_running(hw->dev[1]))) sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); /* Force any delayed status interrrupt and NAPI */ sky2_write32(hw, STAT_LEV_TIMER_CNT, 0); sky2_write32(hw, STAT_TX_TIMER_CNT, 0); sky2_write32(hw, STAT_ISR_TIMER_CNT, 0); sky2_read8(hw, STAT_ISR_TIMER_CTRL); sky2_rx_stop(sky2); spin_lock_bh(&sky2->phy_lock); sky2_phy_power_down(hw, port); spin_unlock_bh(&sky2->phy_lock); sky2_tx_reset(hw, port); /* Free any pending frames stuck in HW queue */ sky2_tx_complete(sky2, sky2->tx_prod); } /* Network shutdown */ static int sky2_down(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; /* Never really got started! */ if (!sky2->tx_le) return 0; netif_info(sky2, ifdown, dev, "disabling interface\n"); /* Disable port IRQ */ sky2_write32(hw, B0_IMSK, sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]); sky2_read32(hw, B0_IMSK); synchronize_irq(hw->pdev->irq); napi_synchronize(&hw->napi); sky2_hw_down(sky2); sky2_free_buffers(sky2); return 0; } static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux) { if (hw->flags & SKY2_HW_FIBRE_PHY) return SPEED_1000; if (!(hw->flags & SKY2_HW_GIGABIT)) { if (aux & PHY_M_PS_SPEED_100) return SPEED_100; else return SPEED_10; } switch (aux & PHY_M_PS_SPEED_MSK) { case PHY_M_PS_SPEED_1000: return SPEED_1000; case PHY_M_PS_SPEED_100: return SPEED_100; default: return SPEED_10; } } static void sky2_link_up(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; static const char *fc_name[] = { [FC_NONE] = "none", [FC_TX] = "tx", [FC_RX] = "rx", [FC_BOTH] = "both", }; sky2_enable_rx_tx(sky2); gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); netif_carrier_on(sky2->netdev); mod_timer(&hw->watchdog_timer, jiffies + 1); /* Turn on link LED */ sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); netif_info(sky2, link, sky2->netdev, "Link is up at %d Mbps, %s duplex, flow control %s\n", sky2->speed, sky2->duplex == DUPLEX_FULL ? "full" : "half", fc_name[sky2->flow_status]); } static void sky2_link_down(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u16 reg; gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); reg = gma_read16(hw, port, GM_GP_CTRL); reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); gma_write16(hw, port, GM_GP_CTRL, reg); netif_carrier_off(sky2->netdev); /* Turn off link LED */ sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); netif_info(sky2, link, sky2->netdev, "Link is down\n"); sky2_phy_init(hw, port); } static enum flow_control sky2_flow(int rx, int tx) { if (rx) return tx ? FC_BOTH : FC_RX; else return tx ? FC_TX : FC_NONE; } static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u16 advert, lpa; advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP); if (lpa & PHY_M_AN_RF) { netdev_err(sky2->netdev, "remote fault\n"); return -1; } if (!(aux & PHY_M_PS_SPDUP_RES)) { netdev_err(sky2->netdev, "speed/duplex mismatch\n"); return -1; } sky2->speed = sky2_phy_speed(hw, aux); sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; /* Since the pause result bits seem to in different positions on * different chips. look at registers. */ if (hw->flags & SKY2_HW_FIBRE_PHY) { /* Shift for bits in fiber PHY */ advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM); lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM); if (advert & ADVERTISE_1000XPAUSE) advert |= ADVERTISE_PAUSE_CAP; if (advert & ADVERTISE_1000XPSE_ASYM) advert |= ADVERTISE_PAUSE_ASYM; if (lpa & LPA_1000XPAUSE) lpa |= LPA_PAUSE_CAP; if (lpa & LPA_1000XPAUSE_ASYM) lpa |= LPA_PAUSE_ASYM; } sky2->flow_status = FC_NONE; if (advert & ADVERTISE_PAUSE_CAP) { if (lpa & LPA_PAUSE_CAP) sky2->flow_status = FC_BOTH; else if (advert & ADVERTISE_PAUSE_ASYM) sky2->flow_status = FC_RX; } else if (advert & ADVERTISE_PAUSE_ASYM) { if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM)) sky2->flow_status = FC_TX; } if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 && !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX)) sky2->flow_status = FC_NONE; if (sky2->flow_status & FC_TX) sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); else sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); return 0; } /* Interrupt from PHY */ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) { struct net_device *dev = hw->dev[port]; struct sky2_port *sky2 = netdev_priv(dev); u16 istatus, phystat; if (!netif_running(dev)) return; spin_lock(&sky2->phy_lock); istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n", istatus, phystat); if (istatus & PHY_M_IS_AN_COMPL) { if (sky2_autoneg_done(sky2, phystat) == 0 && !netif_carrier_ok(dev)) sky2_link_up(sky2); goto out; } if (istatus & PHY_M_IS_LSP_CHANGE) sky2->speed = sky2_phy_speed(hw, phystat); if (istatus & PHY_M_IS_DUP_CHANGE) sky2->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; if (istatus & PHY_M_IS_LST_CHANGE) { if (phystat & PHY_M_PS_LINK_UP) sky2_link_up(sky2); else sky2_link_down(sky2); } out: spin_unlock(&sky2->phy_lock); } /* Special quick link interrupt (Yukon-2 Optima only) */ static void sky2_qlink_intr(struct sky2_hw *hw) { struct sky2_port *sky2 = netdev_priv(hw->dev[0]); u32 imask; u16 phy; /* disable irq */ imask = sky2_read32(hw, B0_IMSK); imask &= ~Y2_IS_PHY_QLNK; sky2_write32(hw, B0_IMSK, imask); /* reset PHY Link Detect */ phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); sky2_link_up(sky2); } /* Transmit timeout is only called if we are running, carrier is up * and tx queue is full (stopped). */ static void sky2_tx_timeout(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; netif_err(sky2, timer, dev, "tx timeout\n"); netdev_printk(KERN_DEBUG, dev, "transmit ring %u .. %u report=%u done=%u\n", sky2->tx_cons, sky2->tx_prod, sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE))); /* can't restart safely under softirq */ schedule_work(&hw->restart_work); } static int sky2_change_mtu(struct net_device *dev, int new_mtu) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; int err; u16 ctl, mode; u32 imask; /* MTU size outside the spec */ if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) return -EINVAL; /* MTU > 1500 on yukon FE and FE+ not allowed */ if (new_mtu > ETH_DATA_LEN && (hw->chip_id == CHIP_ID_YUKON_FE || hw->chip_id == CHIP_ID_YUKON_FE_P)) return -EINVAL; if (!netif_running(dev)) { dev->mtu = new_mtu; netdev_update_features(dev); return 0; } imask = sky2_read32(hw, B0_IMSK); sky2_write32(hw, B0_IMSK, 0); dev->trans_start = jiffies; /* prevent tx timeout */ napi_disable(&hw->napi); netif_tx_disable(dev); synchronize_irq(hw->pdev->irq); if (!(hw->flags & SKY2_HW_RAM_BUFFER)) sky2_set_tx_stfwd(hw, port); ctl = gma_read16(hw, port, GM_GP_CTRL); gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); sky2_rx_stop(sky2); sky2_rx_clean(sky2); dev->mtu = new_mtu; netdev_update_features(dev); mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); if (dev->mtu > ETH_DATA_LEN) mode |= GM_SMOD_JUMBO_ENA; gma_write16(hw, port, GM_SERIAL_MODE, mode); sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD); err = sky2_alloc_rx_skbs(sky2); if (!err) sky2_rx_start(sky2); else sky2_rx_clean(sky2); sky2_write32(hw, B0_IMSK, imask); sky2_read32(hw, B0_Y2_SP_LISR); napi_enable(&hw->napi); if (err) dev_close(dev); else { gma_write16(hw, port, GM_GP_CTRL, ctl); netif_wake_queue(dev); } return err; } /* For small just reuse existing skb for next receive */ static struct sk_buff *receive_copy(struct sky2_port *sky2, const struct rx_ring_info *re, unsigned length) { struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(sky2->netdev, length); if (likely(skb)) { pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, length, PCI_DMA_FROMDEVICE); skb_copy_from_linear_data(re->skb, skb->data, length); skb->ip_summed = re->skb->ip_summed; skb->csum = re->skb->csum; pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, length, PCI_DMA_FROMDEVICE); re->skb->ip_summed = CHECKSUM_NONE; skb_put(skb, length); } return skb; } /* Adjust length of skb with fragments to match received data */ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, unsigned int length) { int i, num_frags; unsigned int size; /* put header into skb */ size = min(length, hdr_space); skb->tail += size; skb->len += size; length -= size; num_frags = skb_shinfo(skb)->nr_frags; for (i = 0; i < num_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (length == 0) { /* don't need this page */ __free_page(frag->page); --skb_shinfo(skb)->nr_frags; } else { size = min(length, (unsigned) PAGE_SIZE); frag->size = size; skb->data_len += size; skb->truesize += size; skb->len += size; length -= size; } } } /* Normal packet - take skb from ring element and put in a new one */ static struct sk_buff *receive_new(struct sky2_port *sky2, struct rx_ring_info *re, unsigned int length) { struct sk_buff *skb; struct rx_ring_info nre; unsigned hdr_space = sky2->rx_data_size; nre.skb = sky2_rx_alloc(sky2); if (unlikely(!nre.skb)) goto nobuf; if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space)) goto nomap; skb = re->skb; sky2_rx_unmap_skb(sky2->hw->pdev, re); prefetch(skb->data); *re = nre; if (skb_shinfo(skb)->nr_frags) skb_put_frags(skb, hdr_space, length); else skb_put(skb, length); return skb; nomap: dev_kfree_skb(nre.skb); nobuf: return NULL; } /* * Receive one packet. * For larger packets, get new buffer. */ static struct sk_buff *sky2_receive(struct net_device *dev, u16 length, u32 status) { struct sky2_port *sky2 = netdev_priv(dev); struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next; struct sk_buff *skb = NULL; u16 count = (status & GMR_FS_LEN) >> 16; if (status & GMR_FS_VLAN) count -= VLAN_HLEN; /* Account for vlan tag */ netif_printk(sky2, rx_status, KERN_DEBUG, dev, "rx slot %u status 0x%x len %d\n", sky2->rx_next, status, length); sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; prefetch(sky2->rx_ring + sky2->rx_next); /* This chip has hardware problems that generates bogus status. * So do only marginal checking and expect higher level protocols * to handle crap frames. */ if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 && length != count) goto okay; if (status & GMR_FS_ANY_ERR) goto error; if (!(status & GMR_FS_RX_OK)) goto resubmit; /* if length reported by DMA does not match PHY, packet was truncated */ if (length != count) goto error; okay: if (length < copybreak) skb = receive_copy(sky2, re, length); else skb = receive_new(sky2, re, length); dev->stats.rx_dropped += (skb == NULL); resubmit: sky2_rx_submit(sky2, re); return skb; error: ++dev->stats.rx_errors; if (net_ratelimit()) netif_info(sky2, rx_err, dev, "rx error, status 0x%x length %d\n", status, length); goto resubmit; } /* Transmit complete */ static inline void sky2_tx_done(struct net_device *dev, u16 last) { struct sky2_port *sky2 = netdev_priv(dev); if (netif_running(dev)) { sky2_tx_complete(sky2, last); /* Wake unless it's detached, and called e.g. from sky2_down() */ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) netif_wake_queue(dev); } } static inline void sky2_skb_rx(const struct sky2_port *sky2, u32 status, struct sk_buff *skb) { if (status & GMR_FS_VLAN) __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag)); if (skb->ip_summed == CHECKSUM_NONE) netif_receive_skb(skb); else napi_gro_receive(&sky2->hw->napi, skb); } static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port, unsigned packets, unsigned bytes) { struct net_device *dev = hw->dev[port]; struct sky2_port *sky2 = netdev_priv(dev); if (packets == 0) return; u64_stats_update_begin(&sky2->rx_stats.syncp); sky2->rx_stats.packets += packets; sky2->rx_stats.bytes += bytes; u64_stats_update_end(&sky2->rx_stats.syncp); dev->last_rx = jiffies; sky2_rx_update(netdev_priv(dev), rxqaddr[port]); } static void sky2_rx_checksum(struct sky2_port *sky2, u32 status) { /* If this happens then driver assuming wrong format for chip type */ BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE); /* Both checksum counters are programmed to start at * the same offset, so unless there is a problem they * should match. This failure is an early indication that * hardware receive checksumming won't work. */ if (likely((u16)(status >> 16) == (u16)status)) { struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb; skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = le16_to_cpu(status); } else { dev_notice(&sky2->hw->pdev->dev, "%s: receive checksum problem (status = %#x)\n", sky2->netdev->name, status); /* Disable checksum offload * It will be reenabled on next ndo_set_features, but if it's * really broken, will get disabled again */ sky2->netdev->features &= ~NETIF_F_RXCSUM; sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), BMU_DIS_RX_CHKSUM); } } static void sky2_rx_hash(struct sky2_port *sky2, u32 status) { struct sk_buff *skb; skb = sky2->rx_ring[sky2->rx_next].skb; skb->rxhash = le32_to_cpu(status); } /* Process status response ring */ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) { int work_done = 0; unsigned int total_bytes[2] = { 0 }; unsigned int total_packets[2] = { 0 }; rmb(); do { struct sky2_port *sky2; struct sky2_status_le *le = hw->st_le + hw->st_idx; unsigned port; struct net_device *dev; struct sk_buff *skb; u32 status; u16 length; u8 opcode = le->opcode; if (!(opcode & HW_OWNER)) break; hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size); port = le->css & CSS_LINK_BIT; dev = hw->dev[port]; sky2 = netdev_priv(dev); length = le16_to_cpu(le->length); status = le32_to_cpu(le->status); le->opcode = 0; switch (opcode & ~HW_OWNER) { case OP_RXSTAT: total_packets[port]++; total_bytes[port] += length; skb = sky2_receive(dev, length, status); if (!skb) break; /* This chip reports checksum status differently */ if (hw->flags & SKY2_HW_NEW_LE) { if ((dev->features & NETIF_F_RXCSUM) && (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) && (le->css & CSS_TCPUDPCSOK)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; } skb->protocol = eth_type_trans(skb, dev); sky2_skb_rx(sky2, status, skb); /* Stop after net poll weight */ if (++work_done >= to_do) goto exit_loop; break; case OP_RXVLAN: sky2->rx_tag = length; break; case OP_RXCHKSVLAN: sky2->rx_tag = length; /* fall through */ case OP_RXCHKS: if (likely(dev->features & NETIF_F_RXCSUM)) sky2_rx_checksum(sky2, status); break; case OP_RSS_HASH: sky2_rx_hash(sky2, status); break; case OP_TXINDEXLE: /* TX index reports status for both ports */ sky2_tx_done(hw->dev[0], status & 0xfff); if (hw->dev[1]) sky2_tx_done(hw->dev[1], ((status >> 24) & 0xff) | (u16)(length & 0xf) << 8); break; default: if (net_ratelimit()) pr_warning("unknown status opcode 0x%x\n", opcode); } } while (hw->st_idx != idx); /* Fully processed status ring so clear irq */ sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); exit_loop: sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]); sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]); return work_done; } static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) { struct net_device *dev = hw->dev[port]; if (net_ratelimit()) netdev_info(dev, "hw error interrupt status 0x%x\n", status); if (status & Y2_IS_PAR_RD1) { if (net_ratelimit()) netdev_err(dev, "ram data read parity error\n"); /* Clear IRQ */ sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR); } if (status & Y2_IS_PAR_WR1) { if (net_ratelimit()) netdev_err(dev, "ram data write parity error\n"); sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR); } if (status & Y2_IS_PAR_MAC1) { if (net_ratelimit()) netdev_err(dev, "MAC parity error\n"); sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE); } if (status & Y2_IS_PAR_RX1) { if (net_ratelimit()) netdev_err(dev, "RX parity error\n"); sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR); } if (status & Y2_IS_TCP_TXA1) { if (net_ratelimit()) netdev_err(dev, "TCP segmentation error\n"); sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP); } } static void sky2_hw_intr(struct sky2_hw *hw) { struct pci_dev *pdev = hw->pdev; u32 status = sky2_read32(hw, B0_HWE_ISRC); u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); status &= hwmsk; if (status & Y2_IS_TIST_OV) sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { u16 pci_err; sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); pci_err = sky2_pci_read16(hw, PCI_STATUS); if (net_ratelimit()) dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", pci_err); sky2_pci_write16(hw, PCI_STATUS, pci_err | PCI_STATUS_ERROR_BITS); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); } if (status & Y2_IS_PCI_EXP) { /* PCI-Express uncorrectable Error occurred */ u32 err; sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, 0xfffffffful); if (net_ratelimit()) dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); } if (status & Y2_HWE_L1_MASK) sky2_hw_error(hw, 0, status); status >>= 8; if (status & Y2_HWE_L1_MASK) sky2_hw_error(hw, 1, status); } static void sky2_mac_intr(struct sky2_hw *hw, unsigned port) { struct net_device *dev = hw->dev[port]; struct sky2_port *sky2 = netdev_priv(dev); u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status); if (status & GM_IS_RX_CO_OV) gma_read16(hw, port, GM_RX_IRQ_SRC); if (status & GM_IS_TX_CO_OV) gma_read16(hw, port, GM_TX_IRQ_SRC); if (status & GM_IS_RX_FF_OR) { ++dev->stats.rx_fifo_errors; sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); } if (status & GM_IS_TX_FF_UR) { ++dev->stats.tx_fifo_errors; sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); } } /* This should never happen it is a bug. */ static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q) { struct net_device *dev = hw->dev[port]; u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n", dev->name, (unsigned) q, (unsigned) idx, (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX))); sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK); } static int sky2_rx_hung(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; unsigned rxq = rxqaddr[port]; u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP)); u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV)); u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP)); u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL)); /* If idle and MAC or PCI is stuck */ if (sky2->check.last == dev->last_rx && ((mac_rp == sky2->check.mac_rp && mac_lev != 0 && mac_lev >= sky2->check.mac_lev) || /* Check if the PCI RX hang */ (fifo_rp == sky2->check.fifo_rp && fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) { netdev_printk(KERN_DEBUG, dev, "hung mac %d:%d fifo %d (%d:%d)\n", mac_lev, mac_rp, fifo_lev, fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP))); return 1; } else { sky2->check.last = dev->last_rx; sky2->check.mac_rp = mac_rp; sky2->check.mac_lev = mac_lev; sky2->check.fifo_rp = fifo_rp; sky2->check.fifo_lev = fifo_lev; return 0; } } static void sky2_watchdog(unsigned long arg) { struct sky2_hw *hw = (struct sky2_hw *) arg; /* Check for lost IRQ once a second */ if (sky2_read32(hw, B0_ISRC)) { napi_schedule(&hw->napi); } else { int i, active = 0; for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; if (!netif_running(dev)) continue; ++active; /* For chips with Rx FIFO, check if stuck */ if ((hw->flags & SKY2_HW_RAM_BUFFER) && sky2_rx_hung(dev)) { netdev_info(dev, "receiver hang detected\n"); schedule_work(&hw->restart_work); return; } } if (active == 0) return; } mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ)); } /* Hardware/software error handling */ static void sky2_err_intr(struct sky2_hw *hw, u32 status) { if (net_ratelimit()) dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status); if (status & Y2_IS_HW_ERR) sky2_hw_intr(hw); if (status & Y2_IS_IRQ_MAC1) sky2_mac_intr(hw, 0); if (status & Y2_IS_IRQ_MAC2) sky2_mac_intr(hw, 1); if (status & Y2_IS_CHK_RX1) sky2_le_error(hw, 0, Q_R1); if (status & Y2_IS_CHK_RX2) sky2_le_error(hw, 1, Q_R2); if (status & Y2_IS_CHK_TXA1) sky2_le_error(hw, 0, Q_XA1); if (status & Y2_IS_CHK_TXA2) sky2_le_error(hw, 1, Q_XA2); } static int sky2_poll(struct napi_struct *napi, int work_limit) { struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi); u32 status = sky2_read32(hw, B0_Y2_SP_EISR); int work_done = 0; u16 idx; if (unlikely(status & Y2_IS_ERROR)) sky2_err_intr(hw, status); if (status & Y2_IS_IRQ_PHY1) sky2_phy_intr(hw, 0); if (status & Y2_IS_IRQ_PHY2) sky2_phy_intr(hw, 1); if (status & Y2_IS_PHY_QLNK) sky2_qlink_intr(hw); while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) { work_done += sky2_status_intr(hw, work_limit - work_done, idx); if (work_done >= work_limit) goto done; } napi_complete(napi); sky2_read32(hw, B0_Y2_SP_LISR); done: return work_done; } static irqreturn_t sky2_intr(int irq, void *dev_id) { struct sky2_hw *hw = dev_id; u32 status; /* Reading this mask interrupts as side effect */ status = sky2_read32(hw, B0_Y2_SP_ISRC2); if (status == 0 || status == ~0) return IRQ_NONE; prefetch(&hw->st_le[hw->st_idx]); napi_schedule(&hw->napi); return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER static void sky2_netpoll(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); napi_schedule(&sky2->hw->napi); } #endif /* Chip internal frequency for clock calculations */ static u32 sky2_mhz(const struct sky2_hw *hw) { switch (hw->chip_id) { case CHIP_ID_YUKON_EC: case CHIP_ID_YUKON_EC_U: case CHIP_ID_YUKON_EX: case CHIP_ID_YUKON_SUPR: case CHIP_ID_YUKON_UL_2: case CHIP_ID_YUKON_OPT: return 125; case CHIP_ID_YUKON_FE: return 100; case CHIP_ID_YUKON_FE_P: return 50; case CHIP_ID_YUKON_XL: return 156; default: BUG(); } } static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us) { return sky2_mhz(hw) * us; } static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk) { return clk / sky2_mhz(hw); } static int __devinit sky2_init(struct sky2_hw *hw) { u8 t8; /* Enable all clocks and check for bad PCI access */ sky2_pci_write32(hw, PCI_DEV_REG3, 0); sky2_write8(hw, B0_CTST, CS_RST_CLR); hw->chip_id = sky2_read8(hw, B2_CHIP_ID); hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; switch (hw->chip_id) { case CHIP_ID_YUKON_XL: hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY; if (hw->chip_rev < CHIP_REV_YU_XL_A2) hw->flags |= SKY2_HW_RSS_BROKEN; break; case CHIP_ID_YUKON_EC_U: hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY | SKY2_HW_ADV_POWER_CTL; break; case CHIP_ID_YUKON_EX: hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY | SKY2_HW_NEW_LE | SKY2_HW_ADV_POWER_CTL; /* New transmit checksum */ if (hw->chip_rev != CHIP_REV_YU_EX_B0) hw->flags |= SKY2_HW_AUTO_TX_SUM; break; case CHIP_ID_YUKON_EC: /* This rev is really old, and requires untested workarounds */ if (hw->chip_rev == CHIP_REV_YU_EC_A1) { dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); return -EOPNOTSUPP; } hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN; break; case CHIP_ID_YUKON_FE: hw->flags = SKY2_HW_RSS_BROKEN; break; case CHIP_ID_YUKON_FE_P: hw->flags = SKY2_HW_NEWER_PHY | SKY2_HW_NEW_LE | SKY2_HW_AUTO_TX_SUM | SKY2_HW_ADV_POWER_CTL; /* The workaround for status conflicts VLAN tag detection. */ if (hw->chip_rev == CHIP_REV_YU_FE2_A0) hw->flags |= SKY2_HW_VLAN_BROKEN; break; case CHIP_ID_YUKON_SUPR: hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY | SKY2_HW_NEW_LE | SKY2_HW_AUTO_TX_SUM | SKY2_HW_ADV_POWER_CTL; break; case CHIP_ID_YUKON_UL_2: hw->flags = SKY2_HW_GIGABIT | SKY2_HW_ADV_POWER_CTL; break; case CHIP_ID_YUKON_OPT: hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEW_LE | SKY2_HW_ADV_POWER_CTL; break; default: dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", hw->chip_id); return -EOPNOTSUPP; } hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') hw->flags |= SKY2_HW_FIBRE_PHY; hw->ports = 1; t8 = sky2_read8(hw, B2_Y2_HW_RES); if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) ++hw->ports; } if (sky2_read8(hw, B2_E_0)) hw->flags |= SKY2_HW_RAM_BUFFER; return 0; } static void sky2_reset(struct sky2_hw *hw) { struct pci_dev *pdev = hw->pdev; u16 status; int i, cap; u32 hwe_mask = Y2_HWE_ALL_MASK; /* disable ASF */ if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_SUPR) { sky2_write32(hw, CPU_WDOG, 0); status = sky2_read16(hw, HCU_CCSR); status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE | HCU_CCSR_UC_STATE_MSK); /* * CPU clock divider shouldn't be used because * - ASF firmware may malfunction * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks */ status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK; sky2_write16(hw, HCU_CCSR, status); sky2_write32(hw, CPU_WDOG, 0); } else sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); /* do a SW reset */ sky2_write8(hw, B0_CTST, CS_RST_SET); sky2_write8(hw, B0_CTST, CS_RST_CLR); /* allow writes to PCI config */ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); /* clear PCI errors, if any */ status = sky2_pci_read16(hw, PCI_STATUS); status |= PCI_STATUS_ERROR_BITS; sky2_pci_write16(hw, PCI_STATUS, status); sky2_write8(hw, B0_CTST, CS_MRST_CLR); cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (cap) { sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, 0xfffffffful); /* If error bit is stuck on ignore it */ if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP) dev_info(&pdev->dev, "ignoring stuck error report bit\n"); else hwe_mask |= Y2_IS_PCI_EXP; } sky2_power_on(hw); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); for (i = 0; i < hw->ports; i++) { sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_SUPR) sky2_write16(hw, SK_REG(i, GMAC_CTRL), GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | GMC_BYP_RETR_ON); } if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) { /* enable MACSec clock gating */ sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS); } if (hw->chip_id == CHIP_ID_YUKON_OPT) { u16 reg; u32 msk; if (hw->chip_rev == 0) { /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */ sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7)); /* set PHY Link Detect Timer to 1.1 second (11x 100ms) */ reg = 10; } else { /* set PHY Link Detect Timer to 0.4 second (4x 100ms) */ reg = 3; } reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; /* reset PHY Link Detect */ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); sky2_pci_write16(hw, PSM_CONFIG_REG4, reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT); sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); /* enable PHY Quick Link */ msk = sky2_read32(hw, B0_IMSK); msk |= Y2_IS_PHY_QLNK; sky2_write32(hw, B0_IMSK, msk); /* check if PSMv2 was running before */ reg = sky2_pci_read16(hw, PSM_CONFIG_REG3); if (reg & PCI_EXP_LNKCTL_ASPMC) { cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); /* restore the PCIe Link Control register */ sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg); } sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); } /* Clear I2C IRQ noise */ sky2_write32(hw, B2_I2C_IRQ, 1); /* turn off hardware timer (unused) */ sky2_write8(hw, B2_TI_CTRL, TIM_STOP); sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); /* Turn off descriptor polling */ sky2_write32(hw, B28_DPT_CTRL, DPT_STOP); /* Turn off receive timestamp */ sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP); sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); /* enable the Tx Arbiters */ for (i = 0; i < hw->ports; i++) sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); /* Initialize ram interface */ for (i = 0; i < hw->ports; i++) { sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53); } sky2_write32(hw, B0_HWE_IMSK, hwe_mask); for (i = 0; i < hw->ports; i++) sky2_gmac_reset(hw, i); memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le)); hw->st_idx = 0; sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET); sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR); sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma); sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32); /* Set the list last index */ sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1); sky2_write16(hw, STAT_TX_IDX_TH, 10); sky2_write8(hw, STAT_FIFO_WM, 16); /* set Status-FIFO ISR watermark */ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0) sky2_write8(hw, STAT_FIFO_ISR_WM, 4); else sky2_write8(hw, STAT_FIFO_ISR_WM, 16); sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20)); sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100)); /* enable status unit */ sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON); sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); } /* Take device down (offline). * Equivalent to doing dev_stop() but this does not * inform upper layers of the transition. */ static void sky2_detach(struct net_device *dev) { if (netif_running(dev)) { netif_tx_lock(dev); netif_device_detach(dev); /* stop txq */ netif_tx_unlock(dev); sky2_down(dev); } } /* Bring device back after doing sky2_detach */ static int sky2_reattach(struct net_device *dev) { int err = 0; if (netif_running(dev)) { err = sky2_up(dev); if (err) { netdev_info(dev, "could not restart %d\n", err); dev_close(dev); } else { netif_device_attach(dev); sky2_set_multicast(dev); } } return err; } static void sky2_all_down(struct sky2_hw *hw) { int i; sky2_read32(hw, B0_IMSK); sky2_write32(hw, B0_IMSK, 0); synchronize_irq(hw->pdev->irq); napi_disable(&hw->napi); for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; struct sky2_port *sky2 = netdev_priv(dev); if (!netif_running(dev)) continue; netif_carrier_off(dev); netif_tx_disable(dev); sky2_hw_down(sky2); } } static void sky2_all_up(struct sky2_hw *hw) { u32 imask = Y2_IS_BASE; int i; for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; struct sky2_port *sky2 = netdev_priv(dev); if (!netif_running(dev)) continue; sky2_hw_up(sky2); sky2_set_multicast(dev); imask |= portirq_msk[i]; netif_wake_queue(dev); } sky2_write32(hw, B0_IMSK, imask); sky2_read32(hw, B0_IMSK); sky2_read32(hw, B0_Y2_SP_LISR); napi_enable(&hw->napi); } static void sky2_restart(struct work_struct *work) { struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work); rtnl_lock(); sky2_all_down(hw); sky2_reset(hw); sky2_all_up(hw); rtnl_unlock(); } static inline u8 sky2_wol_supported(const struct sky2_hw *hw) { return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; } static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { const struct sky2_port *sky2 = netdev_priv(dev); wol->supported = sky2_wol_supported(sky2->hw); wol->wolopts = sky2->wol; } static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; bool enable_wakeup = false; int i; if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) || !device_can_wakeup(&hw->pdev->dev)) return -EOPNOTSUPP; sky2->wol = wol->wolopts; for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; struct sky2_port *sky2 = netdev_priv(dev); if (sky2->wol) enable_wakeup = true; } device_set_wakeup_enable(&hw->pdev->dev, enable_wakeup); return 0; } static u32 sky2_supported_modes(const struct sky2_hw *hw) { if (sky2_is_copper(hw)) { u32 modes = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; if (hw->flags & SKY2_HW_GIGABIT) modes |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; return modes; } else return SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; } static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; ecmd->transceiver = XCVR_INTERNAL; ecmd->supported = sky2_supported_modes(hw); ecmd->phy_address = PHY_ADDR_MARV; if (sky2_is_copper(hw)) { ecmd->port = PORT_TP; ethtool_cmd_speed_set(ecmd, sky2->speed); ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP; } else { ethtool_cmd_speed_set(ecmd, SPEED_1000); ecmd->port = PORT_FIBRE; ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; } ecmd->advertising = sky2->advertising; ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED) ? AUTONEG_ENABLE : AUTONEG_DISABLE; ecmd->duplex = sky2->duplex; return 0; } static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct sky2_port *sky2 = netdev_priv(dev); const struct sky2_hw *hw = sky2->hw; u32 supported = sky2_supported_modes(hw); if (ecmd->autoneg == AUTONEG_ENABLE) { if (ecmd->advertising & ~supported) return -EINVAL; if (sky2_is_copper(hw)) sky2->advertising = ecmd->advertising | ADVERTISED_TP | ADVERTISED_Autoneg; else sky2->advertising = ecmd->advertising | ADVERTISED_FIBRE | ADVERTISED_Autoneg; sky2->flags |= SKY2_FLAG_AUTO_SPEED; sky2->duplex = -1; sky2->speed = -1; } else { u32 setting; u32 speed = ethtool_cmd_speed(ecmd); switch (speed) { case SPEED_1000: if (ecmd->duplex == DUPLEX_FULL) setting = SUPPORTED_1000baseT_Full; else if (ecmd->duplex == DUPLEX_HALF) setting = SUPPORTED_1000baseT_Half; else return -EINVAL; break; case SPEED_100: if (ecmd->duplex == DUPLEX_FULL) setting = SUPPORTED_100baseT_Full; else if (ecmd->duplex == DUPLEX_HALF) setting = SUPPORTED_100baseT_Half; else return -EINVAL; break; case SPEED_10: if (ecmd->duplex == DUPLEX_FULL) setting = SUPPORTED_10baseT_Full; else if (ecmd->duplex == DUPLEX_HALF) setting = SUPPORTED_10baseT_Half; else return -EINVAL; break; default: return -EINVAL; } if ((setting & supported) == 0) return -EINVAL; sky2->speed = speed; sky2->duplex = ecmd->duplex; sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; } if (netif_running(dev)) { sky2_phy_reinit(sky2); sky2_set_multicast(dev); } return 0; } static void sky2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct sky2_port *sky2 = netdev_priv(dev); strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); strcpy(info->fw_version, "N/A"); strcpy(info->bus_info, pci_name(sky2->hw->pdev)); } static const struct sky2_stat { char name[ETH_GSTRING_LEN]; u16 offset; } sky2_stats[] = { { "tx_bytes", GM_TXO_OK_HI }, { "rx_bytes", GM_RXO_OK_HI }, { "tx_broadcast", GM_TXF_BC_OK }, { "rx_broadcast", GM_RXF_BC_OK }, { "tx_multicast", GM_TXF_MC_OK }, { "rx_multicast", GM_RXF_MC_OK }, { "tx_unicast", GM_TXF_UC_OK }, { "rx_unicast", GM_RXF_UC_OK }, { "tx_mac_pause", GM_TXF_MPAUSE }, { "rx_mac_pause", GM_RXF_MPAUSE }, { "collisions", GM_TXF_COL }, { "late_collision",GM_TXF_LAT_COL }, { "aborted", GM_TXF_ABO_COL }, { "single_collisions", GM_TXF_SNG_COL }, { "multi_collisions", GM_TXF_MUL_COL }, { "rx_short", GM_RXF_SHT }, { "rx_runt", GM_RXE_FRAG }, { "rx_64_byte_packets", GM_RXF_64B }, { "rx_65_to_127_byte_packets", GM_RXF_127B }, { "rx_128_to_255_byte_packets", GM_RXF_255B }, { "rx_256_to_511_byte_packets", GM_RXF_511B }, { "rx_512_to_1023_byte_packets", GM_RXF_1023B }, { "rx_1024_to_1518_byte_packets", GM_RXF_1518B }, { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ }, { "rx_too_long", GM_RXF_LNG_ERR }, { "rx_fifo_overflow", GM_RXE_FIFO_OV }, { "rx_jabber", GM_RXF_JAB_PKT }, { "rx_fcs_error", GM_RXF_FCS_ERR }, { "tx_64_byte_packets", GM_TXF_64B }, { "tx_65_to_127_byte_packets", GM_TXF_127B }, { "tx_128_to_255_byte_packets", GM_TXF_255B }, { "tx_256_to_511_byte_packets", GM_TXF_511B }, { "tx_512_to_1023_byte_packets", GM_TXF_1023B }, { "tx_1024_to_1518_byte_packets", GM_TXF_1518B }, { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ }, { "tx_fifo_underrun", GM_TXE_FIFO_UR }, }; static u32 sky2_get_msglevel(struct net_device *netdev) { struct sky2_port *sky2 = netdev_priv(netdev); return sky2->msg_enable; } static int sky2_nway_reset(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); if (!netif_running(dev) || !(sky2->flags & SKY2_FLAG_AUTO_SPEED)) return -EINVAL; sky2_phy_reinit(sky2); sky2_set_multicast(dev); return 0; } static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; int i; data[0] = get_stats64(hw, port, GM_TXO_OK_LO); data[1] = get_stats64(hw, port, GM_RXO_OK_LO); for (i = 2; i < count; i++) data[i] = get_stats32(hw, port, sky2_stats[i].offset); } static void sky2_set_msglevel(struct net_device *netdev, u32 value) { struct sky2_port *sky2 = netdev_priv(netdev); sky2->msg_enable = value; } static int sky2_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(sky2_stats); default: return -EOPNOTSUPP; } } static void sky2_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 * data) { struct sky2_port *sky2 = netdev_priv(dev); sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats)); } static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data) { int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(sky2_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, sky2_stats[i].name, ETH_GSTRING_LEN); break; } } static int sky2_set_mac_address(struct net_device *dev, void *p) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; const struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); memcpy_toio(hw->regs + B2_MAC_1 + port * 8, dev->dev_addr, ETH_ALEN); memcpy_toio(hw->regs + B2_MAC_2 + port * 8, dev->dev_addr, ETH_ALEN); /* virtual address for data */ gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); /* physical address: used for pause frames */ gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); return 0; } static inline void sky2_add_filter(u8 filter[8], const u8 *addr) { u32 bit; bit = ether_crc(ETH_ALEN, addr) & 63; filter[bit >> 3] |= 1 << (bit & 7); } static void sky2_set_multicast(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; struct netdev_hw_addr *ha; u16 reg; u8 filter[8]; int rx_pause; static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH); memset(filter, 0, sizeof(filter)); reg = gma_read16(hw, port, GM_RX_CTRL); reg |= GM_RXCR_UCF_ENA; if (dev->flags & IFF_PROMISC) /* promiscuous */ reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); else if (dev->flags & IFF_ALLMULTI) memset(filter, 0xff, sizeof(filter)); else if (netdev_mc_empty(dev) && !rx_pause) reg &= ~GM_RXCR_MCF_ENA; else { reg |= GM_RXCR_MCF_ENA; if (rx_pause) sky2_add_filter(filter, pause_mc_addr); netdev_for_each_mc_addr(ha, dev) sky2_add_filter(filter, ha->addr); } gma_write16(hw, port, GM_MC_ADDR_H1, (u16) filter[0] | ((u16) filter[1] << 8)); gma_write16(hw, port, GM_MC_ADDR_H2, (u16) filter[2] | ((u16) filter[3] << 8)); gma_write16(hw, port, GM_MC_ADDR_H3, (u16) filter[4] | ((u16) filter[5] << 8)); gma_write16(hw, port, GM_MC_ADDR_H4, (u16) filter[6] | ((u16) filter[7] << 8)); gma_write16(hw, port, GM_RX_CTRL, reg); } static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; unsigned int start; u64 _bytes, _packets; do { start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp); _bytes = sky2->rx_stats.bytes; _packets = sky2->rx_stats.packets; } while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start)); stats->rx_packets = _packets; stats->rx_bytes = _bytes; do { start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp); _bytes = sky2->tx_stats.bytes; _packets = sky2->tx_stats.packets; } while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start)); stats->tx_packets = _packets; stats->tx_bytes = _bytes; stats->multicast = get_stats32(hw, port, GM_RXF_MC_OK) + get_stats32(hw, port, GM_RXF_BC_OK); stats->collisions = get_stats32(hw, port, GM_TXF_COL); stats->rx_length_errors = get_stats32(hw, port, GM_RXF_LNG_ERR); stats->rx_crc_errors = get_stats32(hw, port, GM_RXF_FCS_ERR); stats->rx_frame_errors = get_stats32(hw, port, GM_RXF_SHT) + get_stats32(hw, port, GM_RXE_FRAG); stats->rx_over_errors = get_stats32(hw, port, GM_RXE_FIFO_OV); stats->rx_dropped = dev->stats.rx_dropped; stats->rx_fifo_errors = dev->stats.rx_fifo_errors; stats->tx_fifo_errors = dev->stats.tx_fifo_errors; return stats; } /* Can have one global because blinking is controlled by * ethtool and that is always under RTNL mutex */ static void sky2_led(struct sky2_port *sky2, enum led_mode mode) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; spin_lock_bh(&sky2->phy_lock); if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_SUPR) { u16 pg; pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); switch (mode) { case MO_LED_OFF: gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(8) | PHY_M_LEDC_INIT_CTRL(8) | PHY_M_LEDC_STA1_CTRL(8) | PHY_M_LEDC_STA0_CTRL(8)); break; case MO_LED_ON: gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(9) | PHY_M_LEDC_INIT_CTRL(9) | PHY_M_LEDC_STA1_CTRL(9) | PHY_M_LEDC_STA0_CTRL(9)); break; case MO_LED_BLINK: gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(0xa) | PHY_M_LEDC_INIT_CTRL(0xa) | PHY_M_LEDC_STA1_CTRL(0xa) | PHY_M_LEDC_STA0_CTRL(0xa)); break; case MO_LED_NORM: gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(1) | PHY_M_LEDC_INIT_CTRL(8) | PHY_M_LEDC_STA1_CTRL(7) | PHY_M_LEDC_STA0_CTRL(7)); } gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); } else gm_phy_write(hw, port, PHY_MARV_LED_OVER, PHY_M_LED_MO_DUP(mode) | PHY_M_LED_MO_10(mode) | PHY_M_LED_MO_100(mode) | PHY_M_LED_MO_1000(mode) | PHY_M_LED_MO_RX(mode) | PHY_M_LED_MO_TX(mode)); spin_unlock_bh(&sky2->phy_lock); } /* blink LED's for finding board */ static int sky2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { struct sky2_port *sky2 = netdev_priv(dev); switch (state) { case ETHTOOL_ID_ACTIVE: return 1; /* cycle on/off once per second */ case ETHTOOL_ID_INACTIVE: sky2_led(sky2, MO_LED_NORM); break; case ETHTOOL_ID_ON: sky2_led(sky2, MO_LED_ON); break; case ETHTOOL_ID_OFF: sky2_led(sky2, MO_LED_OFF); break; } return 0; } static void sky2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *ecmd) { struct sky2_port *sky2 = netdev_priv(dev); switch (sky2->flow_mode) { case FC_NONE: ecmd->tx_pause = ecmd->rx_pause = 0; break; case FC_TX: ecmd->tx_pause = 1, ecmd->rx_pause = 0; break; case FC_RX: ecmd->tx_pause = 0, ecmd->rx_pause = 1; break; case FC_BOTH: ecmd->tx_pause = ecmd->rx_pause = 1; } ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_PAUSE) ? AUTONEG_ENABLE : AUTONEG_DISABLE; } static int sky2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *ecmd) { struct sky2_port *sky2 = netdev_priv(dev); if (ecmd->autoneg == AUTONEG_ENABLE) sky2->flags |= SKY2_FLAG_AUTO_PAUSE; else sky2->flags &= ~SKY2_FLAG_AUTO_PAUSE; sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause); if (netif_running(dev)) sky2_phy_reinit(sky2); return 0; } static int sky2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ecmd) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP) ecmd->tx_coalesce_usecs = 0; else { u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI); ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks); } ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH); if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP) ecmd->rx_coalesce_usecs = 0; else { u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI); ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks); } ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM); if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP) ecmd->rx_coalesce_usecs_irq = 0; else { u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI); ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks); } ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM); return 0; } /* Note: this affect both ports */ static int sky2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ecmd) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; const u32 tmax = sky2_clk2us(hw, 0x0ffffff); if (ecmd->tx_coalesce_usecs > tmax || ecmd->rx_coalesce_usecs > tmax || ecmd->rx_coalesce_usecs_irq > tmax) return -EINVAL; if (ecmd->tx_max_coalesced_frames >= sky2->tx_ring_size-1) return -EINVAL; if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING) return -EINVAL; if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING) return -EINVAL; if (ecmd->tx_coalesce_usecs == 0) sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); else { sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, ecmd->tx_coalesce_usecs)); sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); } sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames); if (ecmd->rx_coalesce_usecs == 0) sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP); else { sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, ecmd->rx_coalesce_usecs)); sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); } sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames); if (ecmd->rx_coalesce_usecs_irq == 0) sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP); else { sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq)); sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); } sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq); return 0; } static void sky2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct sky2_port *sky2 = netdev_priv(dev); ering->rx_max_pending = RX_MAX_PENDING; ering->rx_mini_max_pending = 0; ering->rx_jumbo_max_pending = 0; ering->tx_max_pending = TX_MAX_PENDING; ering->rx_pending = sky2->rx_pending; ering->rx_mini_pending = 0; ering->rx_jumbo_pending = 0; ering->tx_pending = sky2->tx_pending; } static int sky2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct sky2_port *sky2 = netdev_priv(dev); if (ering->rx_pending > RX_MAX_PENDING || ering->rx_pending < 8 || ering->tx_pending < TX_MIN_PENDING || ering->tx_pending > TX_MAX_PENDING) return -EINVAL; sky2_detach(dev); sky2->rx_pending = ering->rx_pending; sky2->tx_pending = ering->tx_pending; sky2->tx_ring_size = roundup_pow_of_two(sky2->tx_pending+1); return sky2_reattach(dev); } static int sky2_get_regs_len(struct net_device *dev) { return 0x4000; } static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b) { /* This complicated switch statement is to make sure and * only access regions that are unreserved. * Some blocks are only valid on dual port cards. */ switch (b) { /* second port */ case 5: /* Tx Arbiter 2 */ case 9: /* RX2 */ case 14 ... 15: /* TX2 */ case 17: case 19: /* Ram Buffer 2 */ case 22 ... 23: /* Tx Ram Buffer 2 */ case 25: /* Rx MAC Fifo 1 */ case 27: /* Tx MAC Fifo 2 */ case 31: /* GPHY 2 */ case 40 ... 47: /* Pattern Ram 2 */ case 52: case 54: /* TCP Segmentation 2 */ case 112 ... 116: /* GMAC 2 */ return hw->ports > 1; case 0: /* Control */ case 2: /* Mac address */ case 4: /* Tx Arbiter 1 */ case 7: /* PCI express reg */ case 8: /* RX1 */ case 12 ... 13: /* TX1 */ case 16: case 18:/* Rx Ram Buffer 1 */ case 20 ... 21: /* Tx Ram Buffer 1 */ case 24: /* Rx MAC Fifo 1 */ case 26: /* Tx MAC Fifo 1 */ case 28 ... 29: /* Descriptor and status unit */ case 30: /* GPHY 1*/ case 32 ... 39: /* Pattern Ram 1 */ case 48: case 50: /* TCP Segmentation 1 */ case 56 ... 60: /* PCI space */ case 80 ... 84: /* GMAC 1 */ return 1; default: return 0; } } /* * Returns copy of control register region * Note: ethtool_get_regs always provides full size (16k) buffer */ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { const struct sky2_port *sky2 = netdev_priv(dev); const void __iomem *io = sky2->hw->regs; unsigned int b; regs->version = 1; for (b = 0; b < 128; b++) { /* skip poisonous diagnostic ram region in block 3 */ if (b == 3) memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10); else if (sky2_reg_access_ok(sky2->hw, b)) memcpy_fromio(p, io, 128); else memset(p, 0, 128); p += 128; io += 128; } } static int sky2_get_eeprom_len(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; u16 reg2; reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); } static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy) { unsigned long start = jiffies; while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) { /* Can take up to 10.6 ms for write */ if (time_after(jiffies, start + HZ/4)) { dev_err(&hw->pdev->dev, "VPD cycle timed out\n"); return -ETIMEDOUT; } mdelay(1); } return 0; } static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data, u16 offset, size_t length) { int rc = 0; while (length > 0) { u32 val; sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset); rc = sky2_vpd_wait(hw, cap, 0); if (rc) break; val = sky2_pci_read32(hw, cap + PCI_VPD_DATA); memcpy(data, &val, min(sizeof(val), length)); offset += sizeof(u32); data += sizeof(u32); length -= sizeof(u32); } return rc; } static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data, u16 offset, unsigned int length) { unsigned int i; int rc = 0; for (i = 0; i < length; i += sizeof(u32)) { u32 val = *(u32 *)(data + i); sky2_pci_write32(hw, cap + PCI_VPD_DATA, val); sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F); if (rc) break; } return rc; } static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct sky2_port *sky2 = netdev_priv(dev); int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); if (!cap) return -EINVAL; eeprom->magic = SKY2_EEPROM_MAGIC; return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len); } static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct sky2_port *sky2 = netdev_priv(dev); int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); if (!cap) return -EINVAL; if (eeprom->magic != SKY2_EEPROM_MAGIC) return -EINVAL; /* Partial writes not supported */ if ((eeprom->offset & 3) || (eeprom->len & 3)) return -EINVAL; return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len); } static u32 sky2_fix_features(struct net_device *dev, u32 features) { const struct sky2_port *sky2 = netdev_priv(dev); const struct sky2_hw *hw = sky2->hw; /* In order to do Jumbo packets on these chips, need to turn off the * transmit store/forward. Therefore checksum offload won't work. */ if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM); return features; } static int sky2_set_features(struct net_device *dev, u32 features) { struct sky2_port *sky2 = netdev_priv(dev); u32 changed = dev->features ^ features; if (changed & NETIF_F_RXCSUM) { u32 on = features & NETIF_F_RXCSUM; sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); } if (changed & NETIF_F_RXHASH) rx_set_rss(dev, features); if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) sky2_vlan_mode(dev, features); return 0; } static const struct ethtool_ops sky2_ethtool_ops = { .get_settings = sky2_get_settings, .set_settings = sky2_set_settings, .get_drvinfo = sky2_get_drvinfo, .get_wol = sky2_get_wol, .set_wol = sky2_set_wol, .get_msglevel = sky2_get_msglevel, .set_msglevel = sky2_set_msglevel, .nway_reset = sky2_nway_reset, .get_regs_len = sky2_get_regs_len, .get_regs = sky2_get_regs, .get_link = ethtool_op_get_link, .get_eeprom_len = sky2_get_eeprom_len, .get_eeprom = sky2_get_eeprom, .set_eeprom = sky2_set_eeprom, .get_strings = sky2_get_strings, .get_coalesce = sky2_get_coalesce, .set_coalesce = sky2_set_coalesce, .get_ringparam = sky2_get_ringparam, .set_ringparam = sky2_set_ringparam, .get_pauseparam = sky2_get_pauseparam, .set_pauseparam = sky2_set_pauseparam, .set_phys_id = sky2_set_phys_id, .get_sset_count = sky2_get_sset_count, .get_ethtool_stats = sky2_get_ethtool_stats, }; #ifdef CONFIG_SKY2_DEBUG static struct dentry *sky2_debug; /* * Read and parse the first part of Vital Product Data */ #define VPD_SIZE 128 #define VPD_MAGIC 0x82 static const struct vpd_tag { char tag[2]; char *label; } vpd_tags[] = { { "PN", "Part Number" }, { "EC", "Engineering Level" }, { "MN", "Manufacturer" }, { "SN", "Serial Number" }, { "YA", "Asset Tag" }, { "VL", "First Error Log Message" }, { "VF", "Second Error Log Message" }, { "VB", "Boot Agent ROM Configuration" }, { "VE", "EFI UNDI Configuration" }, }; static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw) { size_t vpd_size; loff_t offs; u8 len; unsigned char *buf; u16 reg2; reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev)); buf = kmalloc(vpd_size, GFP_KERNEL); if (!buf) { seq_puts(seq, "no memory!\n"); return; } if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) { seq_puts(seq, "VPD read failed\n"); goto out; } if (buf[0] != VPD_MAGIC) { seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]); goto out; } len = buf[1]; if (len == 0 || len > vpd_size - 4) { seq_printf(seq, "Invalid id length: %d\n", len); goto out; } seq_printf(seq, "%.*s\n", len, buf + 3); offs = len + 3; while (offs < vpd_size - 4) { int i; if (!memcmp("RW", buf + offs, 2)) /* end marker */ break; len = buf[offs + 2]; if (offs + len + 3 >= vpd_size) break; for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) { if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) { seq_printf(seq, " %s: %.*s\n", vpd_tags[i].label, len, buf + offs + 3); break; } } offs += len + 3; } out: kfree(buf); } static int sky2_debug_show(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; const struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; unsigned idx, last; int sop; sky2_show_vpd(seq, hw); seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n", sky2_read32(hw, B0_ISRC), sky2_read32(hw, B0_IMSK), sky2_read32(hw, B0_Y2_SP_ICR)); if (!netif_running(dev)) { seq_printf(seq, "network not running\n"); return 0; } napi_disable(&hw->napi); last = sky2_read16(hw, STAT_PUT_IDX); seq_printf(seq, "Status ring %u\n", hw->st_size); if (hw->st_idx == last) seq_puts(seq, "Status ring (empty)\n"); else { seq_puts(seq, "Status ring\n"); for (idx = hw->st_idx; idx != last && idx < hw->st_size; idx = RING_NEXT(idx, hw->st_size)) { const struct sky2_status_le *le = hw->st_le + idx; seq_printf(seq, "[%d] %#x %d %#x\n", idx, le->opcode, le->length, le->status); } seq_puts(seq, "\n"); } seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n", sky2->tx_cons, sky2->tx_prod, sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE))); /* Dump contents of tx ring */ sop = 1; for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < sky2->tx_ring_size; idx = RING_NEXT(idx, sky2->tx_ring_size)) { const struct sky2_tx_le *le = sky2->tx_le + idx; u32 a = le32_to_cpu(le->addr); if (sop) seq_printf(seq, "%u:", idx); sop = 0; switch (le->opcode & ~HW_OWNER) { case OP_ADDR64: seq_printf(seq, " %#x:", a); break; case OP_LRGLEN: seq_printf(seq, " mtu=%d", a); break; case OP_VLAN: seq_printf(seq, " vlan=%d", be16_to_cpu(le->length)); break; case OP_TCPLISW: seq_printf(seq, " csum=%#x", a); break; case OP_LARGESEND: seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length)); break; case OP_PACKET: seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length)); break; case OP_BUFFER: seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length)); break; default: seq_printf(seq, " op=%#x,%#x(%d)", le->opcode, a, le16_to_cpu(le->length)); } if (le->ctrl & EOP) { seq_putc(seq, '\n'); sop = 1; } } seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n", sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)), sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)), sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX))); sky2_read32(hw, B0_Y2_SP_LISR); napi_enable(&hw->napi); return 0; } static int sky2_debug_open(struct inode *inode, struct file *file) { return single_open(file, sky2_debug_show, inode->i_private); } static const struct file_operations sky2_debug_fops = { .owner = THIS_MODULE, .open = sky2_debug_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * Use network device events to create/remove/rename * debugfs file entries */ static int sky2_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct sky2_port *sky2 = netdev_priv(dev); if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug) return NOTIFY_DONE; switch (event) { case NETDEV_CHANGENAME: if (sky2->debugfs) { sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs, sky2_debug, dev->name); } break; case NETDEV_GOING_DOWN: if (sky2->debugfs) { netdev_printk(KERN_DEBUG, dev, "remove debugfs\n"); debugfs_remove(sky2->debugfs); sky2->debugfs = NULL; } break; case NETDEV_UP: sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO, sky2_debug, dev, &sky2_debug_fops); if (IS_ERR(sky2->debugfs)) sky2->debugfs = NULL; } return NOTIFY_DONE; } static struct notifier_block sky2_notifier = { .notifier_call = sky2_device_event, }; static __init void sky2_debug_init(void) { struct dentry *ent; ent = debugfs_create_dir("sky2", NULL); if (!ent || IS_ERR(ent)) return; sky2_debug = ent; register_netdevice_notifier(&sky2_notifier); } static __exit void sky2_debug_cleanup(void) { if (sky2_debug) { unregister_netdevice_notifier(&sky2_notifier); debugfs_remove(sky2_debug); sky2_debug = NULL; } } #else #define sky2_debug_init() #define sky2_debug_cleanup() #endif /* Two copies of network device operations to handle special case of not allowing netpoll on second port */ static const struct net_device_ops sky2_netdev_ops[2] = { { .ndo_open = sky2_up, .ndo_stop = sky2_down, .ndo_start_xmit = sky2_xmit_frame, .ndo_do_ioctl = sky2_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = sky2_set_mac_address, .ndo_set_multicast_list = sky2_set_multicast, .ndo_change_mtu = sky2_change_mtu, .ndo_fix_features = sky2_fix_features, .ndo_set_features = sky2_set_features, .ndo_tx_timeout = sky2_tx_timeout, .ndo_get_stats64 = sky2_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = sky2_netpoll, #endif }, { .ndo_open = sky2_up, .ndo_stop = sky2_down, .ndo_start_xmit = sky2_xmit_frame, .ndo_do_ioctl = sky2_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = sky2_set_mac_address, .ndo_set_multicast_list = sky2_set_multicast, .ndo_change_mtu = sky2_change_mtu, .ndo_fix_features = sky2_fix_features, .ndo_set_features = sky2_set_features, .ndo_tx_timeout = sky2_tx_timeout, .ndo_get_stats64 = sky2_get_stats, }, }; /* Initialize network device */ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, int highmem, int wol) { struct sky2_port *sky2; struct net_device *dev = alloc_etherdev(sizeof(*sky2)); if (!dev) { dev_err(&hw->pdev->dev, "etherdev alloc failed\n"); return NULL; } SET_NETDEV_DEV(dev, &hw->pdev->dev); dev->irq = hw->pdev->irq; SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops); dev->watchdog_timeo = TX_WATCHDOG; dev->netdev_ops = &sky2_netdev_ops[port]; sky2 = netdev_priv(dev); sky2->netdev = dev; sky2->hw = hw; sky2->msg_enable = netif_msg_init(debug, default_msg); /* Auto speed and flow control */ sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE; if (hw->chip_id != CHIP_ID_YUKON_XL) dev->hw_features |= NETIF_F_RXCSUM; sky2->flow_mode = FC_BOTH; sky2->duplex = -1; sky2->speed = -1; sky2->advertising = sky2_supported_modes(hw); sky2->wol = wol; spin_lock_init(&sky2->phy_lock); sky2->tx_pending = TX_DEF_PENDING; sky2->tx_ring_size = roundup_pow_of_two(TX_DEF_PENDING+1); sky2->rx_pending = RX_DEF_PENDING; hw->dev[port] = dev; sky2->port = port; dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; if (highmem) dev->features |= NETIF_F_HIGHDMA; /* Enable receive hashing unless hardware is known broken */ if (!(hw->flags & SKY2_HW_RSS_BROKEN)) dev->hw_features |= NETIF_F_RXHASH; if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) { dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->vlan_features |= SKY2_VLAN_OFFLOADS; } dev->features |= dev->hw_features; /* read the mac address */ memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); return dev; } static void __devinit sky2_show_addr(struct net_device *dev) { const struct sky2_port *sky2 = netdev_priv(dev); netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr); } /* Handle software interrupt used during MSI test */ static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id) { struct sky2_hw *hw = dev_id; u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2); if (status == 0) return IRQ_NONE; if (status & Y2_IS_IRQ_SW) { hw->flags |= SKY2_HW_USE_MSI; wake_up(&hw->msi_wait); sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); } sky2_write32(hw, B0_Y2_SP_ICR, 2); return IRQ_HANDLED; } /* Test interrupt path by forcing a a software IRQ */ static int __devinit sky2_test_msi(struct sky2_hw *hw) { struct pci_dev *pdev = hw->pdev; int err; init_waitqueue_head(&hw->msi_wait); sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); if (err) { dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); return err; } sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); sky2_read8(hw, B0_CTST); wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10); if (!(hw->flags & SKY2_HW_USE_MSI)) { /* MSI test failed, go back to INTx mode */ dev_info(&pdev->dev, "No interrupt generated using MSI, " "switching to INTx mode.\n"); err = -EOPNOTSUPP; sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); } sky2_write32(hw, B0_IMSK, 0); sky2_read32(hw, B0_IMSK); free_irq(pdev->irq, hw); return err; } /* This driver supports yukon2 chipset only */ static const char *sky2_name(u8 chipid, char *buf, int sz) { const char *name[] = { "XL", /* 0xb3 */ "EC Ultra", /* 0xb4 */ "Extreme", /* 0xb5 */ "EC", /* 0xb6 */ "FE", /* 0xb7 */ "FE+", /* 0xb8 */ "Supreme", /* 0xb9 */ "UL 2", /* 0xba */ "Unknown", /* 0xbb */ "Optima", /* 0xbc */ }; if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OPT) strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz); else snprintf(buf, sz, "(chip %#x)", chipid); return buf; } static int __devinit sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct sky2_hw *hw; int err, using_dac = 0, wol_default; u32 reg; char buf1[16]; err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "cannot enable PCI device\n"); goto err_out; } /* Get configuration information * Note: only regular PCI config access once to test for HW issues * other PCI access through shared memory for speed and to * avoid MMCONFIG problems. */ err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg); if (err) { dev_err(&pdev->dev, "PCI read config failed\n"); goto err_out; } if (~reg == 0) { dev_err(&pdev->dev, "PCI configuration read error\n"); goto err_out; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(&pdev->dev, "cannot obtain PCI resources\n"); goto err_out_disable; } pci_set_master(pdev); if (sizeof(dma_addr_t) > sizeof(u32) && !(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))) { using_dac = 1; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err < 0) { dev_err(&pdev->dev, "unable to obtain 64 bit DMA " "for consistent allocations\n"); goto err_out_free_regions; } } else { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto err_out_free_regions; } } #ifdef __BIG_ENDIAN /* The sk98lin vendor driver uses hardware byte swapping but * this driver uses software swapping. */ reg &= ~PCI_REV_DESC; err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg); if (err) { dev_err(&pdev->dev, "PCI write config failed\n"); goto err_out_free_regions; } #endif wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0; err = -ENOMEM; hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") + strlen(pci_name(pdev)) + 1, GFP_KERNEL); if (!hw) { dev_err(&pdev->dev, "cannot allocate hardware struct\n"); goto err_out_free_regions; } hw->pdev = pdev; sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); if (!hw->regs) { dev_err(&pdev->dev, "cannot map device registers\n"); goto err_out_free_hw; } err = sky2_init(hw); if (err) goto err_out_iounmap; /* ring for status responses */ hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING); hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), &hw->st_dma); if (!hw->st_le) goto err_out_reset; dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n", sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev); sky2_reset(hw); dev = sky2_init_netdev(hw, 0, using_dac, wol_default); if (!dev) { err = -ENOMEM; goto err_out_free_pci; } if (!disable_msi && pci_enable_msi(pdev) == 0) { err = sky2_test_msi(hw); if (err == -EOPNOTSUPP) pci_disable_msi(pdev); else if (err) goto err_out_free_netdev; } err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "cannot register net device\n"); goto err_out_free_netdev; } netif_carrier_off(dev); netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); err = request_irq(pdev->irq, sky2_intr, (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, hw->irq_name, hw); if (err) { dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); goto err_out_unregister; } sky2_write32(hw, B0_IMSK, Y2_IS_BASE); napi_enable(&hw->napi); sky2_show_addr(dev); if (hw->ports > 1) { struct net_device *dev1; err = -ENOMEM; dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default); if (dev1 && (err = register_netdev(dev1)) == 0) sky2_show_addr(dev1); else { dev_warn(&pdev->dev, "register of second port failed (%d)\n", err); hw->dev[1] = NULL; hw->ports = 1; if (dev1) free_netdev(dev1); } } setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw); INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); pdev->d3_delay = 150; return 0; err_out_unregister: if (hw->flags & SKY2_HW_USE_MSI) pci_disable_msi(pdev); unregister_netdev(dev); err_out_free_netdev: free_netdev(dev); err_out_free_pci: pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), hw->st_le, hw->st_dma); err_out_reset: sky2_write8(hw, B0_CTST, CS_RST_SET); err_out_iounmap: iounmap(hw->regs); err_out_free_hw: kfree(hw); err_out_free_regions: pci_release_regions(pdev); err_out_disable: pci_disable_device(pdev); err_out: pci_set_drvdata(pdev, NULL); return err; } static void __devexit sky2_remove(struct pci_dev *pdev) { struct sky2_hw *hw = pci_get_drvdata(pdev); int i; if (!hw) return; del_timer_sync(&hw->watchdog_timer); cancel_work_sync(&hw->restart_work); for (i = hw->ports-1; i >= 0; --i) unregister_netdev(hw->dev[i]); sky2_write32(hw, B0_IMSK, 0); sky2_power_aux(hw); sky2_write8(hw, B0_CTST, CS_RST_SET); sky2_read8(hw, B0_CTST); free_irq(pdev->irq, hw); if (hw->flags & SKY2_HW_USE_MSI) pci_disable_msi(pdev); pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), hw->st_le, hw->st_dma); pci_release_regions(pdev); pci_disable_device(pdev); for (i = hw->ports-1; i >= 0; --i) free_netdev(hw->dev[i]); iounmap(hw->regs); kfree(hw); pci_set_drvdata(pdev, NULL); } static int sky2_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct sky2_hw *hw = pci_get_drvdata(pdev); int i; if (!hw) return 0; del_timer_sync(&hw->watchdog_timer); cancel_work_sync(&hw->restart_work); rtnl_lock(); sky2_all_down(hw); for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; struct sky2_port *sky2 = netdev_priv(dev); if (sky2->wol) sky2_wol_init(sky2); } sky2_power_aux(hw); rtnl_unlock(); return 0; } #ifdef CONFIG_PM_SLEEP static int sky2_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct sky2_hw *hw = pci_get_drvdata(pdev); int err; if (!hw) return 0; /* Re-enable all clocks */ err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0); if (err) { dev_err(&pdev->dev, "PCI write config failed\n"); goto out; } rtnl_lock(); sky2_reset(hw); sky2_all_up(hw); rtnl_unlock(); return 0; out: dev_err(&pdev->dev, "resume failed (%d)\n", err); pci_disable_device(pdev); return err; } static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume); #define SKY2_PM_OPS (&sky2_pm_ops) #else #define SKY2_PM_OPS NULL #endif static void sky2_shutdown(struct pci_dev *pdev) { sky2_suspend(&pdev->dev); pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); pci_set_power_state(pdev, PCI_D3hot); } static struct pci_driver sky2_driver = { .name = DRV_NAME, .id_table = sky2_id_table, .probe = sky2_probe, .remove = __devexit_p(sky2_remove), .shutdown = sky2_shutdown, .driver.pm = SKY2_PM_OPS, }; static int __init sky2_init_module(void) { pr_info("driver version " DRV_VERSION "\n"); sky2_debug_init(); return pci_register_driver(&sky2_driver); } static void __exit sky2_cleanup_module(void) { pci_unregister_driver(&sky2_driver); sky2_debug_cleanup(); } module_init(sky2_init_module); module_exit(sky2_cleanup_module); MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver"); MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
p350user/ICY-PECAN-KERNEL
arch/powerpc/mm/dma-noncoherent.c
1050
10180
/* * PowerPC version derived from arch/arm/mm/consistent.c * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) * * Copyright (C) 2000 Russell King * * Consistent memory allocators. Used for DMA devices that want to * share uncached memory with the processor core. The function return * is the virtual address and 'dma_handle' is the physical address. * Mostly stolen from the ARM port, with some changes for PowerPC. * -- Dan * * Reorganized to get rid of the arch-specific consistent_* functions * and provide non-coherent implementations for the DMA API. -Matt * * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent() * implementation. This is pulled straight from ARM and barely * modified. -Matt * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/highmem.h> #include <linux/dma-mapping.h> #include <asm/tlbflush.h> #include "mmu_decl.h" /* * This address range defaults to a value that is safe for all * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It * can be further configured for specific applications under * the "Advanced Setup" menu. -Matt */ #define CONSISTENT_BASE (IOREMAP_TOP) #define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE) #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) /* * This is the page table (2MB) covering uncached, DMA consistent allocations */ static DEFINE_SPINLOCK(consistent_lock); /* * VM region handling support. * * This should become something generic, handling VM region allocations for * vmalloc and similar (ioremap, module space, etc). * * I envisage vmalloc()'s supporting vm_struct becoming: * * struct vm_struct { * struct vm_region region; * unsigned long flags; * struct page **pages; * unsigned int nr_pages; * unsigned long phys_addr; * }; * * get_vm_area() would then call vm_region_alloc with an appropriate * struct vm_region head (eg): * * struct vm_region vmalloc_head = { * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), * .vm_start = VMALLOC_START, * .vm_end = VMALLOC_END, * }; * * However, vmalloc_head.vm_start is variable (typically, it is dependent on * the amount of RAM found at boot time.) I would imagine that get_vm_area() * would have to initialise this each time prior to calling vm_region_alloc(). */ struct ppc_vm_region { struct list_head vm_list; unsigned long vm_start; unsigned long vm_end; }; static struct ppc_vm_region consistent_head = { .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), .vm_start = CONSISTENT_BASE, .vm_end = CONSISTENT_END, }; static struct ppc_vm_region * ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp) { unsigned long addr = head->vm_start, end = head->vm_end - size; unsigned long flags; struct ppc_vm_region *c, *new; new = kmalloc(sizeof(struct ppc_vm_region), gfp); if (!new) goto out; spin_lock_irqsave(&consistent_lock, flags); list_for_each_entry(c, &head->vm_list, vm_list) { if ((addr + size) < addr) goto nospc; if ((addr + size) <= c->vm_start) goto found; addr = c->vm_end; if (addr > end) goto nospc; } found: /* * Insert this entry _before_ the one we found. */ list_add_tail(&new->vm_list, &c->vm_list); new->vm_start = addr; new->vm_end = addr + size; spin_unlock_irqrestore(&consistent_lock, flags); return new; nospc: spin_unlock_irqrestore(&consistent_lock, flags); kfree(new); out: return NULL; } static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr) { struct ppc_vm_region *c; list_for_each_entry(c, &head->vm_list, vm_list) { if (c->vm_start == addr) goto out; } c = NULL; out: return c; } /* * Allocate DMA-coherent memory space and return both the kernel remapped * virtual and bus address for that space. */ void * __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) { struct page *page; struct ppc_vm_region *c; unsigned long order; u64 mask = ISA_DMA_THRESHOLD, limit; if (dev) { mask = dev->coherent_dma_mask; /* * Sanity check the DMA mask - it must be non-zero, and * must be able to be satisfied by a DMA allocation. */ if (mask == 0) { dev_warn(dev, "coherent DMA mask is unset\n"); goto no_page; } if ((~mask) & ISA_DMA_THRESHOLD) { dev_warn(dev, "coherent DMA mask %#llx is smaller " "than system GFP_DMA mask %#llx\n", mask, (unsigned long long)ISA_DMA_THRESHOLD); goto no_page; } } size = PAGE_ALIGN(size); limit = (mask + 1) & ~mask; if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) { printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n", size, mask); return NULL; } order = get_order(size); /* Might be useful if we ever have a real legacy DMA zone... */ if (mask != 0xffffffff) gfp |= GFP_DMA; page = alloc_pages(gfp, order); if (!page) goto no_page; /* * Invalidate any data that might be lurking in the * kernel direct-mapped region for device DMA. */ { unsigned long kaddr = (unsigned long)page_address(page); memset(page_address(page), 0, size); flush_dcache_range(kaddr, kaddr + size); } /* * Allocate a virtual address in the consistent mapping region. */ c = ppc_vm_region_alloc(&consistent_head, size, gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); if (c) { unsigned long vaddr = c->vm_start; struct page *end = page + (1 << order); split_page(page, order); /* * Set the "dma handle" */ *handle = page_to_phys(page); do { SetPageReserved(page); map_page(vaddr, page_to_phys(page), pgprot_noncached(PAGE_KERNEL)); page++; vaddr += PAGE_SIZE; } while (size -= PAGE_SIZE); /* * Free the otherwise unused pages. */ while (page < end) { __free_page(page); page++; } return (void *)c->vm_start; } if (page) __free_pages(page, order); no_page: return NULL; } EXPORT_SYMBOL(__dma_alloc_coherent); /* * free a page as defined by the above mapping. */ void __dma_free_coherent(size_t size, void *vaddr) { struct ppc_vm_region *c; unsigned long flags, addr; size = PAGE_ALIGN(size); spin_lock_irqsave(&consistent_lock, flags); c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr); if (!c) goto no_area; if ((c->vm_end - c->vm_start) != size) { printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", __func__, c->vm_end - c->vm_start, size); dump_stack(); size = c->vm_end - c->vm_start; } addr = c->vm_start; do { pte_t *ptep; unsigned long pfn; ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr), addr); if (!pte_none(*ptep) && pte_present(*ptep)) { pfn = pte_pfn(*ptep); pte_clear(&init_mm, addr, ptep); if (pfn_valid(pfn)) { struct page *page = pfn_to_page(pfn); ClearPageReserved(page); __free_page(page); } } addr += PAGE_SIZE; } while (size -= PAGE_SIZE); flush_tlb_kernel_range(c->vm_start, c->vm_end); list_del(&c->vm_list); spin_unlock_irqrestore(&consistent_lock, flags); kfree(c); return; no_area: spin_unlock_irqrestore(&consistent_lock, flags); printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", __func__, vaddr); dump_stack(); } EXPORT_SYMBOL(__dma_free_coherent); /* * make an area consistent. */ void __dma_sync(void *vaddr, size_t size, int direction) { unsigned long start = (unsigned long)vaddr; unsigned long end = start + size; switch (direction) { case DMA_NONE: BUG(); case DMA_FROM_DEVICE: /* * invalidate only when cache-line aligned otherwise there is * the potential for discarding uncommitted data from the cache */ if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1))) flush_dcache_range(start, end); else invalidate_dcache_range(start, end); break; case DMA_TO_DEVICE: /* writeback only */ clean_dcache_range(start, end); break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ flush_dcache_range(start, end); break; } } EXPORT_SYMBOL(__dma_sync); #ifdef CONFIG_HIGHMEM /* * __dma_sync_page() implementation for systems using highmem. * In this case, each page of a buffer must be kmapped/kunmapped * in order to have a virtual address for __dma_sync(). This must * not sleep so kmap_atomic()/kunmap_atomic() are used. * * Note: yes, it is possible and correct to have a buffer extend * beyond the first page. */ static inline void __dma_sync_page_highmem(struct page *page, unsigned long offset, size_t size, int direction) { size_t seg_size = min((size_t)(PAGE_SIZE - offset), size); size_t cur_size = seg_size; unsigned long flags, start, seg_offset = offset; int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; int seg_nr = 0; local_irq_save(flags); do { start = (unsigned long)kmap_atomic(page + seg_nr, KM_PPC_SYNC_PAGE) + seg_offset; /* Sync this buffer segment */ __dma_sync((void *)start, seg_size, direction); kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE); seg_nr++; /* Calculate next buffer segment size */ seg_size = min((size_t)PAGE_SIZE, size - cur_size); /* Add the segment size to our running total */ cur_size += seg_size; seg_offset = 0; } while (seg_nr < nr_segs); local_irq_restore(flags); } #endif /* CONFIG_HIGHMEM */ /* * __dma_sync_page makes memory consistent. identical to __dma_sync, but * takes a struct page instead of a virtual address */ void __dma_sync_page(struct page *page, unsigned long offset, size_t size, int direction) { #ifdef CONFIG_HIGHMEM __dma_sync_page_highmem(page, offset, size, direction); #else unsigned long start = (unsigned long)page_address(page) + offset; __dma_sync((void *)start, size, direction); #endif } EXPORT_SYMBOL(__dma_sync_page);
gpl-2.0
spairal/linux-for-lobster
arch/um/os-Linux/util.c
1818
2764
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <signal.h> #include <string.h> #include <termios.h> #include <wait.h> #include <sys/mman.h> #include <sys/utsname.h> #include "kern_constants.h" #include "os.h" #include "user.h" void stack_protections(unsigned long address) { if (mprotect((void *) address, UM_THREAD_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC) < 0) panic("protecting stack failed, errno = %d", errno); } int raw(int fd) { struct termios tt; int err; CATCH_EINTR(err = tcgetattr(fd, &tt)); if (err < 0) return -errno; cfmakeraw(&tt); CATCH_EINTR(err = tcsetattr(fd, TCSADRAIN, &tt)); if (err < 0) return -errno; /* * XXX tcsetattr could have applied only some changes * (and cfmakeraw() is a set of changes) */ return 0; } void setup_machinename(char *machine_out) { struct utsname host; uname(&host); #ifdef UML_CONFIG_UML_X86 # ifndef UML_CONFIG_64BIT if (!strcmp(host.machine, "x86_64")) { strcpy(machine_out, "i686"); return; } # else if (!strcmp(host.machine, "i686")) { strcpy(machine_out, "x86_64"); return; } # endif #endif strcpy(machine_out, host.machine); } void setup_hostinfo(char *buf, int len) { struct utsname host; uname(&host); snprintf(buf, len, "%s %s %s %s %s", host.sysname, host.nodename, host.release, host.version, host.machine); } void os_dump_core(void) { int pid; signal(SIGSEGV, SIG_DFL); /* * We are about to SIGTERM this entire process group to ensure that * nothing is around to run after the kernel exits. The * kernel wants to abort, not die through SIGTERM, so we * ignore it here. */ signal(SIGTERM, SIG_IGN); kill(0, SIGTERM); /* * Most of the other processes associated with this UML are * likely sTopped, so give them a SIGCONT so they see the * SIGTERM. */ kill(0, SIGCONT); /* * Now, having sent signals to everyone but us, make sure they * die by ptrace. Processes can survive what's been done to * them so far - the mechanism I understand is receiving a * SIGSEGV and segfaulting immediately upon return. There is * always a SIGSEGV pending, and (I'm guessing) signals are * processed in numeric order so the SIGTERM (signal 15 vs * SIGSEGV being signal 11) is never handled. * * Run a waitpid loop until we get some kind of error. * Hopefully, it's ECHILD, but there's not a lot we can do if * it's something else. Tell os_kill_ptraced_process not to * wait for the child to report its death because there's * nothing reasonable to do if that fails. */ while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0) os_kill_ptraced_process(pid, 0); abort(); }
gpl-2.0
bq/aquaris-E5-4G
drivers/usb/gadget/hid.c
2330
6410
/* * hid.c -- HID Composite driver * * Based on multi.c * * Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/list.h> #include <linux/module.h> #include <linux/usb/composite.h> #include "gadget_chips.h" #define DRIVER_DESC "HID Gadget" #define DRIVER_VERSION "2010/03/16" /*-------------------------------------------------------------------------*/ #define HIDG_VENDOR_NUM 0x0525 /* XXX NetChip */ #define HIDG_PRODUCT_NUM 0xa4ac /* Linux-USB HID gadget */ /*-------------------------------------------------------------------------*/ /* * kbuild is not very cooperative with respect to linking separately * compiled library objects into one module. So for now we won't use * separate compilation ... ensuring init/exit sections work to shrink * the runtime footprint, and giving us at least some parts of what * a "gcc --combine ... part1.c part2.c part3.c ... " build would. */ #include "f_hid.c" struct hidg_func_node { struct list_head node; struct hidg_func_descriptor *func; }; static LIST_HEAD(hidg_func_list); /*-------------------------------------------------------------------------*/ USB_GADGET_COMPOSITE_OPTIONS(); static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = cpu_to_le16(0x0200), /* .bDeviceClass = USB_CLASS_COMM, */ /* .bDeviceSubClass = 0, */ /* .bDeviceProtocol = 0, */ .bDeviceClass = USB_CLASS_PER_INTERFACE, .bDeviceSubClass = 0, .bDeviceProtocol = 0, /* .bMaxPacketSize0 = f(hardware) */ /* Vendor and product id can be overridden by module parameters. */ .idVendor = cpu_to_le16(HIDG_VENDOR_NUM), .idProduct = cpu_to_le16(HIDG_PRODUCT_NUM), /* .bcdDevice = f(hardware) */ /* .iManufacturer = DYNAMIC */ /* .iProduct = DYNAMIC */ /* NO SERIAL NUMBER */ .bNumConfigurations = 1, }; static struct usb_otg_descriptor otg_descriptor = { .bLength = sizeof otg_descriptor, .bDescriptorType = USB_DT_OTG, /* REVISIT SRP-only hardware is possible, although * it would not be called "OTG" ... */ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP, }; static const struct usb_descriptor_header *otg_desc[] = { (struct usb_descriptor_header *) &otg_descriptor, NULL, }; /* string IDs are assigned dynamically */ static struct usb_string strings_dev[] = { [USB_GADGET_MANUFACTURER_IDX].s = "", [USB_GADGET_PRODUCT_IDX].s = DRIVER_DESC, [USB_GADGET_SERIAL_IDX].s = "", { } /* end of list */ }; static struct usb_gadget_strings stringtab_dev = { .language = 0x0409, /* en-us */ .strings = strings_dev, }; static struct usb_gadget_strings *dev_strings[] = { &stringtab_dev, NULL, }; /****************************** Configurations ******************************/ static int __init do_config(struct usb_configuration *c) { struct hidg_func_node *e; int func = 0, status = 0; if (gadget_is_otg(c->cdev->gadget)) { c->descriptors = otg_desc; c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; } list_for_each_entry(e, &hidg_func_list, node) { status = hidg_bind_config(c, e->func, func++); if (status) break; } return status; } static struct usb_configuration config_driver = { .label = "HID Gadget", .bConfigurationValue = 1, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; /****************************** Gadget Bind ******************************/ static int __init hid_bind(struct usb_composite_dev *cdev) { struct usb_gadget *gadget = cdev->gadget; struct list_head *tmp; int status, funcs = 0; list_for_each(tmp, &hidg_func_list) funcs++; if (!funcs) return -ENODEV; /* set up HID */ status = ghid_setup(cdev->gadget, funcs); if (status < 0) return status; /* Allocate string descriptor numbers ... note that string * contents can be overridden by the composite_dev glue. */ status = usb_string_ids_tab(cdev, strings_dev); if (status < 0) return status; device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id; device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id; /* register our configuration */ status = usb_add_config(cdev, &config_driver, do_config); if (status < 0) return status; usb_composite_overwrite_options(cdev, &coverwrite); dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n"); return 0; } static int __exit hid_unbind(struct usb_composite_dev *cdev) { ghid_cleanup(); return 0; } static int __init hidg_plat_driver_probe(struct platform_device *pdev) { struct hidg_func_descriptor *func = pdev->dev.platform_data; struct hidg_func_node *entry; if (!func) { dev_err(&pdev->dev, "Platform data missing\n"); return -ENODEV; } entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; entry->func = func; list_add_tail(&entry->node, &hidg_func_list); return 0; } static int hidg_plat_driver_remove(struct platform_device *pdev) { struct hidg_func_node *e, *n; list_for_each_entry_safe(e, n, &hidg_func_list, node) { list_del(&e->node); kfree(e); } return 0; } /****************************** Some noise ******************************/ static __refdata struct usb_composite_driver hidg_driver = { .name = "g_hid", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_HIGH, .bind = hid_bind, .unbind = __exit_p(hid_unbind), }; static struct platform_driver hidg_plat_driver = { .remove = hidg_plat_driver_remove, .driver = { .owner = THIS_MODULE, .name = "hidg", }, }; MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Fabien Chouteau, Peter Korsgaard"); MODULE_LICENSE("GPL"); static int __init hidg_init(void) { int status; status = platform_driver_probe(&hidg_plat_driver, hidg_plat_driver_probe); if (status < 0) return status; status = usb_composite_probe(&hidg_driver); if (status < 0) platform_driver_unregister(&hidg_plat_driver); return status; } module_init(hidg_init); static void __exit hidg_cleanup(void) { platform_driver_unregister(&hidg_plat_driver); usb_composite_unregister(&hidg_driver); } module_exit(hidg_cleanup);
gpl-2.0
1N4148/agni
arch/tile/kernel/tile-desc_64.c
2586
96616
/* This define is BFD_RELOC_##x for real bfd, or -1 for everyone else. */ #define BFD_RELOC(x) -1 /* Special registers. */ #define TREG_LR 55 #define TREG_SN 56 #define TREG_ZERO 63 /* FIXME: Rename this. */ #include <asm/opcode-tile_64.h> #include <linux/stddef.h> const struct tilegx_opcode tilegx_opcodes[334] = { { "bpt", TILEGX_OPC_BPT, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "info", TILEGX_OPC_INFO, 0xf, 1, TREG_ZERO, 1, { { 0 }, { 1 }, { 2 }, { 3 }, { 0, } }, }, { "infol", TILEGX_OPC_INFOL, 0x3, 1, TREG_ZERO, 1, { { 4 }, { 5 }, { 0, }, { 0, }, { 0, } }, }, { "move", TILEGX_OPC_MOVE, 0xf, 2, TREG_ZERO, 1, { { 6, 7 }, { 8, 9 }, { 10, 11 }, { 12, 13 }, { 0, } }, }, { "movei", TILEGX_OPC_MOVEI, 0xf, 2, TREG_ZERO, 1, { { 6, 0 }, { 8, 1 }, { 10, 2 }, { 12, 3 }, { 0, } }, }, { "moveli", TILEGX_OPC_MOVELI, 0x3, 2, TREG_ZERO, 1, { { 6, 4 }, { 8, 5 }, { 0, }, { 0, }, { 0, } }, }, { "prefetch", TILEGX_OPC_PREFETCH, 0x12, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } }, }, { "prefetch_add_l1", TILEGX_OPC_PREFETCH_ADD_L1, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "prefetch_add_l1_fault", TILEGX_OPC_PREFETCH_ADD_L1_FAULT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "prefetch_add_l2", TILEGX_OPC_PREFETCH_ADD_L2, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "prefetch_add_l2_fault", TILEGX_OPC_PREFETCH_ADD_L2_FAULT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "prefetch_add_l3", TILEGX_OPC_PREFETCH_ADD_L3, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "prefetch_add_l3_fault", TILEGX_OPC_PREFETCH_ADD_L3_FAULT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "prefetch_l1", TILEGX_OPC_PREFETCH_L1, 0x12, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } }, }, { "prefetch_l1_fault", TILEGX_OPC_PREFETCH_L1_FAULT, 0x12, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } }, }, { "prefetch_l2", TILEGX_OPC_PREFETCH_L2, 0x12, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } }, }, { "prefetch_l2_fault", TILEGX_OPC_PREFETCH_L2_FAULT, 0x12, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } }, }, { "prefetch_l3", TILEGX_OPC_PREFETCH_L3, 0x12, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } }, }, { "prefetch_l3_fault", TILEGX_OPC_PREFETCH_L3_FAULT, 0x12, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } }, }, { "raise", TILEGX_OPC_RAISE, 0x2, 0, TREG_ZERO, 1, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "add", TILEGX_OPC_ADD, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "addi", TILEGX_OPC_ADDI, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } }, }, { "addli", TILEGX_OPC_ADDLI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 4 }, { 8, 9, 5 }, { 0, }, { 0, }, { 0, } }, }, { "addx", TILEGX_OPC_ADDX, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "addxi", TILEGX_OPC_ADDXI, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } }, }, { "addxli", TILEGX_OPC_ADDXLI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 4 }, { 8, 9, 5 }, { 0, }, { 0, }, { 0, } }, }, { "addxsc", TILEGX_OPC_ADDXSC, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "and", TILEGX_OPC_AND, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "andi", TILEGX_OPC_ANDI, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } }, }, { "beqz", TILEGX_OPC_BEQZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "beqzt", TILEGX_OPC_BEQZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bfexts", TILEGX_OPC_BFEXTS, 0x1, 4, TREG_ZERO, 1, { { 6, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "bfextu", TILEGX_OPC_BFEXTU, 0x1, 4, TREG_ZERO, 1, { { 6, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "bfins", TILEGX_OPC_BFINS, 0x1, 4, TREG_ZERO, 1, { { 23, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "bgez", TILEGX_OPC_BGEZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgezt", TILEGX_OPC_BGEZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgtz", TILEGX_OPC_BGTZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgtzt", TILEGX_OPC_BGTZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blbc", TILEGX_OPC_BLBC, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blbct", TILEGX_OPC_BLBCT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blbs", TILEGX_OPC_BLBS, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blbst", TILEGX_OPC_BLBST, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blez", TILEGX_OPC_BLEZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blezt", TILEGX_OPC_BLEZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bltz", TILEGX_OPC_BLTZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bltzt", TILEGX_OPC_BLTZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bnez", TILEGX_OPC_BNEZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bnezt", TILEGX_OPC_BNEZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } }, }, { "clz", TILEGX_OPC_CLZ, 0x5, 2, TREG_ZERO, 1, { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } }, }, { "cmoveqz", TILEGX_OPC_CMOVEQZ, 0x5, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } }, }, { "cmovnez", TILEGX_OPC_CMOVNEZ, 0x5, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } }, }, { "cmpeq", TILEGX_OPC_CMPEQ, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "cmpeqi", TILEGX_OPC_CMPEQI, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } }, }, { "cmpexch", TILEGX_OPC_CMPEXCH, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "cmpexch4", TILEGX_OPC_CMPEXCH4, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "cmples", TILEGX_OPC_CMPLES, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "cmpleu", TILEGX_OPC_CMPLEU, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "cmplts", TILEGX_OPC_CMPLTS, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "cmpltsi", TILEGX_OPC_CMPLTSI, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } }, }, { "cmpltu", TILEGX_OPC_CMPLTU, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "cmpltui", TILEGX_OPC_CMPLTUI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "cmpne", TILEGX_OPC_CMPNE, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "cmul", TILEGX_OPC_CMUL, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "cmula", TILEGX_OPC_CMULA, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "cmulaf", TILEGX_OPC_CMULAF, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "cmulf", TILEGX_OPC_CMULF, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "cmulfr", TILEGX_OPC_CMULFR, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "cmulh", TILEGX_OPC_CMULH, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "cmulhr", TILEGX_OPC_CMULHR, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "crc32_32", TILEGX_OPC_CRC32_32, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "crc32_8", TILEGX_OPC_CRC32_8, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "ctz", TILEGX_OPC_CTZ, 0x5, 2, TREG_ZERO, 1, { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } }, }, { "dblalign", TILEGX_OPC_DBLALIGN, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "dblalign2", TILEGX_OPC_DBLALIGN2, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "dblalign4", TILEGX_OPC_DBLALIGN4, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "dblalign6", TILEGX_OPC_DBLALIGN6, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "drain", TILEGX_OPC_DRAIN, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "dtlbpr", TILEGX_OPC_DTLBPR, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } }, }, { "exch", TILEGX_OPC_EXCH, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "exch4", TILEGX_OPC_EXCH4, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "fdouble_add_flags", TILEGX_OPC_FDOUBLE_ADD_FLAGS, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "fdouble_addsub", TILEGX_OPC_FDOUBLE_ADDSUB, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "fdouble_mul_flags", TILEGX_OPC_FDOUBLE_MUL_FLAGS, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "fdouble_pack1", TILEGX_OPC_FDOUBLE_PACK1, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "fdouble_pack2", TILEGX_OPC_FDOUBLE_PACK2, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "fdouble_sub_flags", TILEGX_OPC_FDOUBLE_SUB_FLAGS, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "fdouble_unpack_max", TILEGX_OPC_FDOUBLE_UNPACK_MAX, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "fdouble_unpack_min", TILEGX_OPC_FDOUBLE_UNPACK_MIN, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "fetchadd", TILEGX_OPC_FETCHADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "fetchadd4", TILEGX_OPC_FETCHADD4, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "fetchaddgez", TILEGX_OPC_FETCHADDGEZ, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "fetchaddgez4", TILEGX_OPC_FETCHADDGEZ4, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "fetchand", TILEGX_OPC_FETCHAND, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "fetchand4", TILEGX_OPC_FETCHAND4, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "fetchor", TILEGX_OPC_FETCHOR, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "fetchor4", TILEGX_OPC_FETCHOR4, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "finv", TILEGX_OPC_FINV, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } }, }, { "flush", TILEGX_OPC_FLUSH, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } }, }, { "flushwb", TILEGX_OPC_FLUSHWB, 0x2, 0, TREG_ZERO, 1, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "fnop", TILEGX_OPC_FNOP, 0xf, 0, TREG_ZERO, 1, { { }, { }, { }, { }, { 0, } }, }, { "fsingle_add1", TILEGX_OPC_FSINGLE_ADD1, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "fsingle_addsub2", TILEGX_OPC_FSINGLE_ADDSUB2, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "fsingle_mul1", TILEGX_OPC_FSINGLE_MUL1, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "fsingle_mul2", TILEGX_OPC_FSINGLE_MUL2, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "fsingle_pack1", TILEGX_OPC_FSINGLE_PACK1, 0x5, 2, TREG_ZERO, 1, { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } }, }, { "fsingle_pack2", TILEGX_OPC_FSINGLE_PACK2, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "fsingle_sub1", TILEGX_OPC_FSINGLE_SUB1, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "icoh", TILEGX_OPC_ICOH, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } }, }, { "ill", TILEGX_OPC_ILL, 0xa, 0, TREG_ZERO, 1, { { 0, }, { }, { 0, }, { }, { 0, } }, }, { "inv", TILEGX_OPC_INV, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } }, }, { "iret", TILEGX_OPC_IRET, 0x2, 0, TREG_ZERO, 1, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "j", TILEGX_OPC_J, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 25 }, { 0, }, { 0, }, { 0, } }, }, { "jal", TILEGX_OPC_JAL, 0x2, 1, TREG_LR, 1, { { 0, }, { 25 }, { 0, }, { 0, }, { 0, } }, }, { "jalr", TILEGX_OPC_JALR, 0xa, 1, TREG_LR, 1, { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } }, }, { "jalrp", TILEGX_OPC_JALRP, 0xa, 1, TREG_LR, 1, { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } }, }, { "jr", TILEGX_OPC_JR, 0xa, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } }, }, { "jrp", TILEGX_OPC_JRP, 0xa, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } }, }, { "ld", TILEGX_OPC_LD, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } }, }, { "ld1s", TILEGX_OPC_LD1S, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } }, }, { "ld1s_add", TILEGX_OPC_LD1S_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "ld1u", TILEGX_OPC_LD1U, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } }, }, { "ld1u_add", TILEGX_OPC_LD1U_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "ld2s", TILEGX_OPC_LD2S, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } }, }, { "ld2s_add", TILEGX_OPC_LD2S_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "ld2u", TILEGX_OPC_LD2U, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } }, }, { "ld2u_add", TILEGX_OPC_LD2U_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "ld4s", TILEGX_OPC_LD4S, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } }, }, { "ld4s_add", TILEGX_OPC_LD4S_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "ld4u", TILEGX_OPC_LD4U, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } }, }, { "ld4u_add", TILEGX_OPC_LD4U_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "ld_add", TILEGX_OPC_LD_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "ldna", TILEGX_OPC_LDNA, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } }, }, { "ldna_add", TILEGX_OPC_LDNA_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "ldnt", TILEGX_OPC_LDNT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } }, }, { "ldnt1s", TILEGX_OPC_LDNT1S, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } }, }, { "ldnt1s_add", TILEGX_OPC_LDNT1S_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "ldnt1u", TILEGX_OPC_LDNT1U, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } }, }, { "ldnt1u_add", TILEGX_OPC_LDNT1U_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "ldnt2s", TILEGX_OPC_LDNT2S, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } }, }, { "ldnt2s_add", TILEGX_OPC_LDNT2S_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "ldnt2u", TILEGX_OPC_LDNT2U, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } }, }, { "ldnt2u_add", TILEGX_OPC_LDNT2U_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "ldnt4s", TILEGX_OPC_LDNT4S, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } }, }, { "ldnt4s_add", TILEGX_OPC_LDNT4S_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "ldnt4u", TILEGX_OPC_LDNT4U, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } }, }, { "ldnt4u_add", TILEGX_OPC_LDNT4U_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "ldnt_add", TILEGX_OPC_LDNT_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lnk", TILEGX_OPC_LNK, 0xa, 1, TREG_ZERO, 1, { { 0, }, { 8 }, { 0, }, { 12 }, { 0, } }, }, { "mf", TILEGX_OPC_MF, 0x2, 0, TREG_ZERO, 1, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "mfspr", TILEGX_OPC_MFSPR, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 8, 27 }, { 0, }, { 0, }, { 0, } }, }, { "mm", TILEGX_OPC_MM, 0x1, 4, TREG_ZERO, 1, { { 23, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mnz", TILEGX_OPC_MNZ, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "mtspr", TILEGX_OPC_MTSPR, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 28, 9 }, { 0, }, { 0, }, { 0, } }, }, { "mul_hs_hs", TILEGX_OPC_MUL_HS_HS, 0x5, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } }, }, { "mul_hs_hu", TILEGX_OPC_MUL_HS_HU, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mul_hs_ls", TILEGX_OPC_MUL_HS_LS, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mul_hs_lu", TILEGX_OPC_MUL_HS_LU, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mul_hu_hu", TILEGX_OPC_MUL_HU_HU, 0x5, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } }, }, { "mul_hu_ls", TILEGX_OPC_MUL_HU_LS, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mul_hu_lu", TILEGX_OPC_MUL_HU_LU, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mul_ls_ls", TILEGX_OPC_MUL_LS_LS, 0x5, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } }, }, { "mul_ls_lu", TILEGX_OPC_MUL_LS_LU, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mul_lu_lu", TILEGX_OPC_MUL_LU_LU, 0x5, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } }, }, { "mula_hs_hs", TILEGX_OPC_MULA_HS_HS, 0x5, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } }, }, { "mula_hs_hu", TILEGX_OPC_MULA_HS_HU, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mula_hs_ls", TILEGX_OPC_MULA_HS_LS, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mula_hs_lu", TILEGX_OPC_MULA_HS_LU, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mula_hu_hu", TILEGX_OPC_MULA_HU_HU, 0x5, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } }, }, { "mula_hu_ls", TILEGX_OPC_MULA_HU_LS, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mula_hu_lu", TILEGX_OPC_MULA_HU_LU, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mula_ls_ls", TILEGX_OPC_MULA_LS_LS, 0x5, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } }, }, { "mula_ls_lu", TILEGX_OPC_MULA_LS_LU, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mula_lu_lu", TILEGX_OPC_MULA_LU_LU, 0x5, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } }, }, { "mulax", TILEGX_OPC_MULAX, 0x5, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } }, }, { "mulx", TILEGX_OPC_MULX, 0x5, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } }, }, { "mz", TILEGX_OPC_MZ, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "nap", TILEGX_OPC_NAP, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "nop", TILEGX_OPC_NOP, 0xf, 0, TREG_ZERO, 1, { { }, { }, { }, { }, { 0, } }, }, { "nor", TILEGX_OPC_NOR, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "or", TILEGX_OPC_OR, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "ori", TILEGX_OPC_ORI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "pcnt", TILEGX_OPC_PCNT, 0x5, 2, TREG_ZERO, 1, { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } }, }, { "revbits", TILEGX_OPC_REVBITS, 0x5, 2, TREG_ZERO, 1, { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } }, }, { "revbytes", TILEGX_OPC_REVBYTES, 0x5, 2, TREG_ZERO, 1, { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } }, }, { "rotl", TILEGX_OPC_ROTL, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "rotli", TILEGX_OPC_ROTLI, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } }, }, { "shl", TILEGX_OPC_SHL, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "shl16insli", TILEGX_OPC_SHL16INSLI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 4 }, { 8, 9, 5 }, { 0, }, { 0, }, { 0, } }, }, { "shl1add", TILEGX_OPC_SHL1ADD, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "shl1addx", TILEGX_OPC_SHL1ADDX, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "shl2add", TILEGX_OPC_SHL2ADD, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "shl2addx", TILEGX_OPC_SHL2ADDX, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "shl3add", TILEGX_OPC_SHL3ADD, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "shl3addx", TILEGX_OPC_SHL3ADDX, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "shli", TILEGX_OPC_SHLI, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } }, }, { "shlx", TILEGX_OPC_SHLX, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shlxi", TILEGX_OPC_SHLXI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } }, }, { "shrs", TILEGX_OPC_SHRS, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "shrsi", TILEGX_OPC_SHRSI, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } }, }, { "shru", TILEGX_OPC_SHRU, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "shrui", TILEGX_OPC_SHRUI, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } }, }, { "shrux", TILEGX_OPC_SHRUX, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shruxi", TILEGX_OPC_SHRUXI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } }, }, { "shufflebytes", TILEGX_OPC_SHUFFLEBYTES, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "st", TILEGX_OPC_ST, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } }, }, { "st1", TILEGX_OPC_ST1, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } }, }, { "st1_add", TILEGX_OPC_ST1_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } }, }, { "st2", TILEGX_OPC_ST2, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } }, }, { "st2_add", TILEGX_OPC_ST2_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } }, }, { "st4", TILEGX_OPC_ST4, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } }, }, { "st4_add", TILEGX_OPC_ST4_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } }, }, { "st_add", TILEGX_OPC_ST_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } }, }, { "stnt", TILEGX_OPC_STNT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "stnt1", TILEGX_OPC_STNT1, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "stnt1_add", TILEGX_OPC_STNT1_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } }, }, { "stnt2", TILEGX_OPC_STNT2, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "stnt2_add", TILEGX_OPC_STNT2_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } }, }, { "stnt4", TILEGX_OPC_STNT4, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "stnt4_add", TILEGX_OPC_STNT4_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } }, }, { "stnt_add", TILEGX_OPC_STNT_ADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } }, }, { "sub", TILEGX_OPC_SUB, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "subx", TILEGX_OPC_SUBX, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "subxsc", TILEGX_OPC_SUBXSC, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "swint0", TILEGX_OPC_SWINT0, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "swint1", TILEGX_OPC_SWINT1, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "swint2", TILEGX_OPC_SWINT2, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "swint3", TILEGX_OPC_SWINT3, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "tblidxb0", TILEGX_OPC_TBLIDXB0, 0x5, 2, TREG_ZERO, 1, { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } }, }, { "tblidxb1", TILEGX_OPC_TBLIDXB1, 0x5, 2, TREG_ZERO, 1, { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } }, }, { "tblidxb2", TILEGX_OPC_TBLIDXB2, 0x5, 2, TREG_ZERO, 1, { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } }, }, { "tblidxb3", TILEGX_OPC_TBLIDXB3, 0x5, 2, TREG_ZERO, 1, { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } }, }, { "v1add", TILEGX_OPC_V1ADD, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1addi", TILEGX_OPC_V1ADDI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "v1adduc", TILEGX_OPC_V1ADDUC, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1adiffu", TILEGX_OPC_V1ADIFFU, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1avgu", TILEGX_OPC_V1AVGU, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1cmpeq", TILEGX_OPC_V1CMPEQ, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1cmpeqi", TILEGX_OPC_V1CMPEQI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "v1cmples", TILEGX_OPC_V1CMPLES, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1cmpleu", TILEGX_OPC_V1CMPLEU, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1cmplts", TILEGX_OPC_V1CMPLTS, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1cmpltsi", TILEGX_OPC_V1CMPLTSI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "v1cmpltu", TILEGX_OPC_V1CMPLTU, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1cmpltui", TILEGX_OPC_V1CMPLTUI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "v1cmpne", TILEGX_OPC_V1CMPNE, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1ddotpu", TILEGX_OPC_V1DDOTPU, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1ddotpua", TILEGX_OPC_V1DDOTPUA, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1ddotpus", TILEGX_OPC_V1DDOTPUS, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1ddotpusa", TILEGX_OPC_V1DDOTPUSA, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1dotp", TILEGX_OPC_V1DOTP, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1dotpa", TILEGX_OPC_V1DOTPA, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1dotpu", TILEGX_OPC_V1DOTPU, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1dotpua", TILEGX_OPC_V1DOTPUA, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1dotpus", TILEGX_OPC_V1DOTPUS, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1dotpusa", TILEGX_OPC_V1DOTPUSA, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1int_h", TILEGX_OPC_V1INT_H, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1int_l", TILEGX_OPC_V1INT_L, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1maxu", TILEGX_OPC_V1MAXU, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1maxui", TILEGX_OPC_V1MAXUI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "v1minu", TILEGX_OPC_V1MINU, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1minui", TILEGX_OPC_V1MINUI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "v1mnz", TILEGX_OPC_V1MNZ, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1multu", TILEGX_OPC_V1MULTU, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1mulu", TILEGX_OPC_V1MULU, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1mulus", TILEGX_OPC_V1MULUS, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1mz", TILEGX_OPC_V1MZ, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1sadau", TILEGX_OPC_V1SADAU, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1sadu", TILEGX_OPC_V1SADU, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v1shl", TILEGX_OPC_V1SHL, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1shli", TILEGX_OPC_V1SHLI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } }, }, { "v1shrs", TILEGX_OPC_V1SHRS, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1shrsi", TILEGX_OPC_V1SHRSI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } }, }, { "v1shru", TILEGX_OPC_V1SHRU, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1shrui", TILEGX_OPC_V1SHRUI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } }, }, { "v1sub", TILEGX_OPC_V1SUB, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v1subuc", TILEGX_OPC_V1SUBUC, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2add", TILEGX_OPC_V2ADD, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2addi", TILEGX_OPC_V2ADDI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "v2addsc", TILEGX_OPC_V2ADDSC, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2adiffs", TILEGX_OPC_V2ADIFFS, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v2avgs", TILEGX_OPC_V2AVGS, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v2cmpeq", TILEGX_OPC_V2CMPEQ, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2cmpeqi", TILEGX_OPC_V2CMPEQI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "v2cmples", TILEGX_OPC_V2CMPLES, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2cmpleu", TILEGX_OPC_V2CMPLEU, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2cmplts", TILEGX_OPC_V2CMPLTS, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2cmpltsi", TILEGX_OPC_V2CMPLTSI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "v2cmpltu", TILEGX_OPC_V2CMPLTU, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2cmpltui", TILEGX_OPC_V2CMPLTUI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "v2cmpne", TILEGX_OPC_V2CMPNE, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2dotp", TILEGX_OPC_V2DOTP, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v2dotpa", TILEGX_OPC_V2DOTPA, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v2int_h", TILEGX_OPC_V2INT_H, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2int_l", TILEGX_OPC_V2INT_L, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2maxs", TILEGX_OPC_V2MAXS, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2maxsi", TILEGX_OPC_V2MAXSI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "v2mins", TILEGX_OPC_V2MINS, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2minsi", TILEGX_OPC_V2MINSI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "v2mnz", TILEGX_OPC_V2MNZ, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2mulfsc", TILEGX_OPC_V2MULFSC, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v2muls", TILEGX_OPC_V2MULS, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v2mults", TILEGX_OPC_V2MULTS, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v2mz", TILEGX_OPC_V2MZ, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2packh", TILEGX_OPC_V2PACKH, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2packl", TILEGX_OPC_V2PACKL, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2packuc", TILEGX_OPC_V2PACKUC, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2sadas", TILEGX_OPC_V2SADAS, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v2sadau", TILEGX_OPC_V2SADAU, 0x1, 3, TREG_ZERO, 1, { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v2sads", TILEGX_OPC_V2SADS, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v2sadu", TILEGX_OPC_V2SADU, 0x1, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "v2shl", TILEGX_OPC_V2SHL, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2shli", TILEGX_OPC_V2SHLI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } }, }, { "v2shlsc", TILEGX_OPC_V2SHLSC, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2shrs", TILEGX_OPC_V2SHRS, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2shrsi", TILEGX_OPC_V2SHRSI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } }, }, { "v2shru", TILEGX_OPC_V2SHRU, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2shrui", TILEGX_OPC_V2SHRUI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } }, }, { "v2sub", TILEGX_OPC_V2SUB, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v2subsc", TILEGX_OPC_V2SUBSC, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v4add", TILEGX_OPC_V4ADD, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v4addsc", TILEGX_OPC_V4ADDSC, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v4int_h", TILEGX_OPC_V4INT_H, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v4int_l", TILEGX_OPC_V4INT_L, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v4packsc", TILEGX_OPC_V4PACKSC, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v4shl", TILEGX_OPC_V4SHL, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v4shlsc", TILEGX_OPC_V4SHLSC, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v4shrs", TILEGX_OPC_V4SHRS, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v4shru", TILEGX_OPC_V4SHRU, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v4sub", TILEGX_OPC_V4SUB, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "v4subsc", TILEGX_OPC_V4SUBSC, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } }, }, { "wh64", TILEGX_OPC_WH64, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } }, }, { "xor", TILEGX_OPC_XOR, 0xf, 3, TREG_ZERO, 1, { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } }, }, { "xori", TILEGX_OPC_XORI, 0x3, 3, TREG_ZERO, 1, { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { NULL, TILEGX_OPC_NONE, 0, 0, TREG_ZERO, 0, { { 0, } }, } }; #define BITFIELD(start, size) ((start) | (((1 << (size)) - 1) << 6)) #define CHILD(array_index) (TILEGX_OPC_NONE + (array_index)) static const unsigned short decode_X0_fsm[936] = { BITFIELD(22, 9) /* index 0 */, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTU, TILEGX_OPC_BFEXTU, TILEGX_OPC_BFEXTU, TILEGX_OPC_BFEXTU, TILEGX_OPC_BFINS, TILEGX_OPC_BFINS, TILEGX_OPC_BFINS, TILEGX_OPC_BFINS, TILEGX_OPC_MM, TILEGX_OPC_MM, TILEGX_OPC_MM, TILEGX_OPC_MM, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(528), CHILD(578), CHILD(583), CHILD(588), CHILD(593), CHILD(598), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(603), CHILD(620), CHILD(637), CHILD(654), CHILD(671), CHILD(703), CHILD(797), CHILD(814), CHILD(831), CHILD(848), CHILD(865), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(889), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), BITFIELD(6, 2) /* index 513 */, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(518), BITFIELD(8, 2) /* index 518 */, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(523), BITFIELD(10, 2) /* index 523 */, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_MOVELI, BITFIELD(20, 2) /* index 528 */, TILEGX_OPC_NONE, CHILD(533), TILEGX_OPC_ADDXI, CHILD(548), BITFIELD(6, 2) /* index 533 */, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(538), BITFIELD(8, 2) /* index 538 */, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(543), BITFIELD(10, 2) /* index 543 */, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI, BITFIELD(0, 2) /* index 548 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(553), BITFIELD(2, 2) /* index 553 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(558), BITFIELD(4, 2) /* index 558 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(563), BITFIELD(6, 2) /* index 563 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(568), BITFIELD(8, 2) /* index 568 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(573), BITFIELD(10, 2) /* index 573 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO, BITFIELD(20, 2) /* index 578 */, TILEGX_OPC_CMPEQI, TILEGX_OPC_CMPLTSI, TILEGX_OPC_CMPLTUI, TILEGX_OPC_ORI, BITFIELD(20, 2) /* index 583 */, TILEGX_OPC_V1ADDI, TILEGX_OPC_V1CMPEQI, TILEGX_OPC_V1CMPLTSI, TILEGX_OPC_V1CMPLTUI, BITFIELD(20, 2) /* index 588 */, TILEGX_OPC_V1MAXUI, TILEGX_OPC_V1MINUI, TILEGX_OPC_V2ADDI, TILEGX_OPC_V2CMPEQI, BITFIELD(20, 2) /* index 593 */, TILEGX_OPC_V2CMPLTSI, TILEGX_OPC_V2CMPLTUI, TILEGX_OPC_V2MAXSI, TILEGX_OPC_V2MINSI, BITFIELD(20, 2) /* index 598 */, TILEGX_OPC_XORI, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(18, 4) /* index 603 */, TILEGX_OPC_NONE, TILEGX_OPC_ADDXSC, TILEGX_OPC_ADDX, TILEGX_OPC_ADD, TILEGX_OPC_AND, TILEGX_OPC_CMOVEQZ, TILEGX_OPC_CMOVNEZ, TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU, TILEGX_OPC_CMPNE, TILEGX_OPC_CMULAF, TILEGX_OPC_CMULA, TILEGX_OPC_CMULFR, BITFIELD(18, 4) /* index 620 */, TILEGX_OPC_CMULF, TILEGX_OPC_CMULHR, TILEGX_OPC_CMULH, TILEGX_OPC_CMUL, TILEGX_OPC_CRC32_32, TILEGX_OPC_CRC32_8, TILEGX_OPC_DBLALIGN2, TILEGX_OPC_DBLALIGN4, TILEGX_OPC_DBLALIGN6, TILEGX_OPC_DBLALIGN, TILEGX_OPC_FDOUBLE_ADDSUB, TILEGX_OPC_FDOUBLE_ADD_FLAGS, TILEGX_OPC_FDOUBLE_MUL_FLAGS, TILEGX_OPC_FDOUBLE_PACK1, TILEGX_OPC_FDOUBLE_PACK2, TILEGX_OPC_FDOUBLE_SUB_FLAGS, BITFIELD(18, 4) /* index 637 */, TILEGX_OPC_FDOUBLE_UNPACK_MAX, TILEGX_OPC_FDOUBLE_UNPACK_MIN, TILEGX_OPC_FSINGLE_ADD1, TILEGX_OPC_FSINGLE_ADDSUB2, TILEGX_OPC_FSINGLE_MUL1, TILEGX_OPC_FSINGLE_MUL2, TILEGX_OPC_FSINGLE_PACK2, TILEGX_OPC_FSINGLE_SUB1, TILEGX_OPC_MNZ, TILEGX_OPC_MULAX, TILEGX_OPC_MULA_HS_HS, TILEGX_OPC_MULA_HS_HU, TILEGX_OPC_MULA_HS_LS, TILEGX_OPC_MULA_HS_LU, TILEGX_OPC_MULA_HU_HU, TILEGX_OPC_MULA_HU_LS, BITFIELD(18, 4) /* index 654 */, TILEGX_OPC_MULA_HU_LU, TILEGX_OPC_MULA_LS_LS, TILEGX_OPC_MULA_LS_LU, TILEGX_OPC_MULA_LU_LU, TILEGX_OPC_MULX, TILEGX_OPC_MUL_HS_HS, TILEGX_OPC_MUL_HS_HU, TILEGX_OPC_MUL_HS_LS, TILEGX_OPC_MUL_HS_LU, TILEGX_OPC_MUL_HU_HU, TILEGX_OPC_MUL_HU_LS, TILEGX_OPC_MUL_HU_LU, TILEGX_OPC_MUL_LS_LS, TILEGX_OPC_MUL_LS_LU, TILEGX_OPC_MUL_LU_LU, TILEGX_OPC_MZ, BITFIELD(18, 4) /* index 671 */, TILEGX_OPC_NOR, CHILD(688), TILEGX_OPC_ROTL, TILEGX_OPC_SHL1ADDX, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADDX, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL3ADDX, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHLX, TILEGX_OPC_SHL, TILEGX_OPC_SHRS, TILEGX_OPC_SHRUX, TILEGX_OPC_SHRU, TILEGX_OPC_SHUFFLEBYTES, TILEGX_OPC_SUBXSC, BITFIELD(12, 2) /* index 688 */, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(693), BITFIELD(14, 2) /* index 693 */, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(698), BITFIELD(16, 2) /* index 698 */, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE, BITFIELD(18, 4) /* index 703 */, TILEGX_OPC_SUBX, TILEGX_OPC_SUB, CHILD(720), TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADIFFU, TILEGX_OPC_V1AVGU, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1DDOTPUSA, TILEGX_OPC_V1DDOTPUS, TILEGX_OPC_V1DOTPA, BITFIELD(12, 4) /* index 720 */, TILEGX_OPC_NONE, CHILD(737), CHILD(742), CHILD(747), CHILD(752), CHILD(757), CHILD(762), CHILD(767), CHILD(772), CHILD(777), CHILD(782), CHILD(787), CHILD(792), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(16, 2) /* index 737 */, TILEGX_OPC_CLZ, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(16, 2) /* index 742 */, TILEGX_OPC_CTZ, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(16, 2) /* index 747 */, TILEGX_OPC_FNOP, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(16, 2) /* index 752 */, TILEGX_OPC_FSINGLE_PACK1, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(16, 2) /* index 757 */, TILEGX_OPC_NOP, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(16, 2) /* index 762 */, TILEGX_OPC_PCNT, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(16, 2) /* index 767 */, TILEGX_OPC_REVBITS, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(16, 2) /* index 772 */, TILEGX_OPC_REVBYTES, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(16, 2) /* index 777 */, TILEGX_OPC_TBLIDXB0, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(16, 2) /* index 782 */, TILEGX_OPC_TBLIDXB1, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(16, 2) /* index 787 */, TILEGX_OPC_TBLIDXB2, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(16, 2) /* index 792 */, TILEGX_OPC_TBLIDXB3, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(18, 4) /* index 797 */, TILEGX_OPC_V1DOTPUSA, TILEGX_OPC_V1DOTPUS, TILEGX_OPC_V1DOTP, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1MAXU, TILEGX_OPC_V1MINU, TILEGX_OPC_V1MNZ, TILEGX_OPC_V1MULTU, TILEGX_OPC_V1MULUS, TILEGX_OPC_V1MULU, TILEGX_OPC_V1MZ, TILEGX_OPC_V1SADAU, TILEGX_OPC_V1SADU, TILEGX_OPC_V1SHL, TILEGX_OPC_V1SHRS, BITFIELD(18, 4) /* index 814 */, TILEGX_OPC_V1SHRU, TILEGX_OPC_V1SUBUC, TILEGX_OPC_V1SUB, TILEGX_OPC_V2ADDSC, TILEGX_OPC_V2ADD, TILEGX_OPC_V2ADIFFS, TILEGX_OPC_V2AVGS, TILEGX_OPC_V2CMPEQ, TILEGX_OPC_V2CMPLES, TILEGX_OPC_V2CMPLEU, TILEGX_OPC_V2CMPLTS, TILEGX_OPC_V2CMPLTU, TILEGX_OPC_V2CMPNE, TILEGX_OPC_V2DOTPA, TILEGX_OPC_V2DOTP, TILEGX_OPC_V2INT_H, BITFIELD(18, 4) /* index 831 */, TILEGX_OPC_V2INT_L, TILEGX_OPC_V2MAXS, TILEGX_OPC_V2MINS, TILEGX_OPC_V2MNZ, TILEGX_OPC_V2MULFSC, TILEGX_OPC_V2MULS, TILEGX_OPC_V2MULTS, TILEGX_OPC_V2MZ, TILEGX_OPC_V2PACKH, TILEGX_OPC_V2PACKL, TILEGX_OPC_V2PACKUC, TILEGX_OPC_V2SADAS, TILEGX_OPC_V2SADAU, TILEGX_OPC_V2SADS, TILEGX_OPC_V2SADU, TILEGX_OPC_V2SHLSC, BITFIELD(18, 4) /* index 848 */, TILEGX_OPC_V2SHL, TILEGX_OPC_V2SHRS, TILEGX_OPC_V2SHRU, TILEGX_OPC_V2SUBSC, TILEGX_OPC_V2SUB, TILEGX_OPC_V4ADDSC, TILEGX_OPC_V4ADD, TILEGX_OPC_V4INT_H, TILEGX_OPC_V4INT_L, TILEGX_OPC_V4PACKSC, TILEGX_OPC_V4SHLSC, TILEGX_OPC_V4SHL, TILEGX_OPC_V4SHRS, TILEGX_OPC_V4SHRU, TILEGX_OPC_V4SUBSC, TILEGX_OPC_V4SUB, BITFIELD(18, 3) /* index 865 */, CHILD(874), CHILD(877), CHILD(880), CHILD(883), CHILD(886), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(21, 1) /* index 874 */, TILEGX_OPC_XOR, TILEGX_OPC_NONE, BITFIELD(21, 1) /* index 877 */, TILEGX_OPC_V1DDOTPUA, TILEGX_OPC_NONE, BITFIELD(21, 1) /* index 880 */, TILEGX_OPC_V1DDOTPU, TILEGX_OPC_NONE, BITFIELD(21, 1) /* index 883 */, TILEGX_OPC_V1DOTPUA, TILEGX_OPC_NONE, BITFIELD(21, 1) /* index 886 */, TILEGX_OPC_V1DOTPU, TILEGX_OPC_NONE, BITFIELD(18, 4) /* index 889 */, TILEGX_OPC_NONE, TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHLXI, TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI, TILEGX_OPC_SHRUXI, TILEGX_OPC_V1SHLI, TILEGX_OPC_V1SHRSI, TILEGX_OPC_V1SHRUI, TILEGX_OPC_V2SHLI, TILEGX_OPC_V2SHRSI, TILEGX_OPC_V2SHRUI, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(0, 2) /* index 906 */, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, CHILD(911), BITFIELD(2, 2) /* index 911 */, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, CHILD(916), BITFIELD(4, 2) /* index 916 */, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, CHILD(921), BITFIELD(6, 2) /* index 921 */, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, CHILD(926), BITFIELD(8, 2) /* index 926 */, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, CHILD(931), BITFIELD(10, 2) /* index 931 */, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_INFOL, }; static const unsigned short decode_X1_fsm[1206] = { BITFIELD(53, 9) /* index 0 */, CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_BEQZT, TILEGX_OPC_BEQZT, TILEGX_OPC_BEQZ, TILEGX_OPC_BEQZ, TILEGX_OPC_BGEZT, TILEGX_OPC_BGEZT, TILEGX_OPC_BGEZ, TILEGX_OPC_BGEZ, TILEGX_OPC_BGTZT, TILEGX_OPC_BGTZT, TILEGX_OPC_BGTZ, TILEGX_OPC_BGTZ, TILEGX_OPC_BLBCT, TILEGX_OPC_BLBCT, TILEGX_OPC_BLBC, TILEGX_OPC_BLBC, TILEGX_OPC_BLBST, TILEGX_OPC_BLBST, TILEGX_OPC_BLBS, TILEGX_OPC_BLBS, TILEGX_OPC_BLEZT, TILEGX_OPC_BLEZT, TILEGX_OPC_BLEZ, TILEGX_OPC_BLEZ, TILEGX_OPC_BLTZT, TILEGX_OPC_BLTZT, TILEGX_OPC_BLTZ, TILEGX_OPC_BLTZ, TILEGX_OPC_BNEZT, TILEGX_OPC_BNEZT, TILEGX_OPC_BNEZ, TILEGX_OPC_BNEZ, CHILD(528), CHILD(578), CHILD(598), CHILD(663), CHILD(683), CHILD(688), CHILD(693), CHILD(698), CHILD(703), CHILD(708), CHILD(713), CHILD(718), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, CHILD(723), CHILD(740), CHILD(772), CHILD(789), CHILD(1108), CHILD(1125), CHILD(1142), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(1159), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), BITFIELD(37, 2) /* index 513 */, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(518), BITFIELD(39, 2) /* index 518 */, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(523), BITFIELD(41, 2) /* index 523 */, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_MOVELI, BITFIELD(51, 2) /* index 528 */, TILEGX_OPC_NONE, CHILD(533), TILEGX_OPC_ADDXI, CHILD(548), BITFIELD(37, 2) /* index 533 */, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(538), BITFIELD(39, 2) /* index 538 */, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(543), BITFIELD(41, 2) /* index 543 */, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI, BITFIELD(31, 2) /* index 548 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(553), BITFIELD(33, 2) /* index 553 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(558), BITFIELD(35, 2) /* index 558 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(563), BITFIELD(37, 2) /* index 563 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(568), BITFIELD(39, 2) /* index 568 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(573), BITFIELD(41, 2) /* index 573 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO, BITFIELD(51, 2) /* index 578 */, TILEGX_OPC_CMPEQI, TILEGX_OPC_CMPLTSI, TILEGX_OPC_CMPLTUI, CHILD(583), BITFIELD(31, 2) /* index 583 */, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, CHILD(588), BITFIELD(33, 2) /* index 588 */, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, CHILD(593), BITFIELD(35, 2) /* index 593 */, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_PREFETCH_ADD_L1_FAULT, BITFIELD(51, 2) /* index 598 */, CHILD(603), CHILD(618), CHILD(633), CHILD(648), BITFIELD(31, 2) /* index 603 */, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, CHILD(608), BITFIELD(33, 2) /* index 608 */, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, CHILD(613), BITFIELD(35, 2) /* index 613 */, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_PREFETCH_ADD_L1, BITFIELD(31, 2) /* index 618 */, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, CHILD(623), BITFIELD(33, 2) /* index 623 */, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, CHILD(628), BITFIELD(35, 2) /* index 628 */, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_PREFETCH_ADD_L2_FAULT, BITFIELD(31, 2) /* index 633 */, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, CHILD(638), BITFIELD(33, 2) /* index 638 */, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, CHILD(643), BITFIELD(35, 2) /* index 643 */, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_PREFETCH_ADD_L2, BITFIELD(31, 2) /* index 648 */, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, CHILD(653), BITFIELD(33, 2) /* index 653 */, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, CHILD(658), BITFIELD(35, 2) /* index 658 */, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_PREFETCH_ADD_L3_FAULT, BITFIELD(51, 2) /* index 663 */, CHILD(668), TILEGX_OPC_LDNT1S_ADD, TILEGX_OPC_LDNT1U_ADD, TILEGX_OPC_LDNT2S_ADD, BITFIELD(31, 2) /* index 668 */, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, CHILD(673), BITFIELD(33, 2) /* index 673 */, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, CHILD(678), BITFIELD(35, 2) /* index 678 */, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_PREFETCH_ADD_L3, BITFIELD(51, 2) /* index 683 */, TILEGX_OPC_LDNT2U_ADD, TILEGX_OPC_LDNT4S_ADD, TILEGX_OPC_LDNT4U_ADD, TILEGX_OPC_LDNT_ADD, BITFIELD(51, 2) /* index 688 */, TILEGX_OPC_LD_ADD, TILEGX_OPC_LDNA_ADD, TILEGX_OPC_MFSPR, TILEGX_OPC_MTSPR, BITFIELD(51, 2) /* index 693 */, TILEGX_OPC_ORI, TILEGX_OPC_ST1_ADD, TILEGX_OPC_ST2_ADD, TILEGX_OPC_ST4_ADD, BITFIELD(51, 2) /* index 698 */, TILEGX_OPC_STNT1_ADD, TILEGX_OPC_STNT2_ADD, TILEGX_OPC_STNT4_ADD, TILEGX_OPC_STNT_ADD, BITFIELD(51, 2) /* index 703 */, TILEGX_OPC_ST_ADD, TILEGX_OPC_V1ADDI, TILEGX_OPC_V1CMPEQI, TILEGX_OPC_V1CMPLTSI, BITFIELD(51, 2) /* index 708 */, TILEGX_OPC_V1CMPLTUI, TILEGX_OPC_V1MAXUI, TILEGX_OPC_V1MINUI, TILEGX_OPC_V2ADDI, BITFIELD(51, 2) /* index 713 */, TILEGX_OPC_V2CMPEQI, TILEGX_OPC_V2CMPLTSI, TILEGX_OPC_V2CMPLTUI, TILEGX_OPC_V2MAXSI, BITFIELD(51, 2) /* index 718 */, TILEGX_OPC_V2MINSI, TILEGX_OPC_XORI, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(49, 4) /* index 723 */, TILEGX_OPC_NONE, TILEGX_OPC_ADDXSC, TILEGX_OPC_ADDX, TILEGX_OPC_ADD, TILEGX_OPC_AND, TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPEXCH4, TILEGX_OPC_CMPEXCH, TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU, TILEGX_OPC_CMPNE, TILEGX_OPC_DBLALIGN2, TILEGX_OPC_DBLALIGN4, TILEGX_OPC_DBLALIGN6, BITFIELD(49, 4) /* index 740 */, TILEGX_OPC_EXCH4, TILEGX_OPC_EXCH, TILEGX_OPC_FETCHADD4, TILEGX_OPC_FETCHADDGEZ4, TILEGX_OPC_FETCHADDGEZ, TILEGX_OPC_FETCHADD, TILEGX_OPC_FETCHAND4, TILEGX_OPC_FETCHAND, TILEGX_OPC_FETCHOR4, TILEGX_OPC_FETCHOR, TILEGX_OPC_MNZ, TILEGX_OPC_MZ, TILEGX_OPC_NOR, CHILD(757), TILEGX_OPC_ROTL, TILEGX_OPC_SHL1ADDX, BITFIELD(43, 2) /* index 757 */, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(762), BITFIELD(45, 2) /* index 762 */, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(767), BITFIELD(47, 2) /* index 767 */, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE, BITFIELD(49, 4) /* index 772 */, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADDX, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL3ADDX, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHLX, TILEGX_OPC_SHL, TILEGX_OPC_SHRS, TILEGX_OPC_SHRUX, TILEGX_OPC_SHRU, TILEGX_OPC_ST1, TILEGX_OPC_ST2, TILEGX_OPC_ST4, TILEGX_OPC_STNT1, TILEGX_OPC_STNT2, TILEGX_OPC_STNT4, BITFIELD(46, 7) /* index 789 */, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, CHILD(918), CHILD(927), CHILD(1006), CHILD(1090), CHILD(1099), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, BITFIELD(43, 3) /* index 918 */, TILEGX_OPC_NONE, TILEGX_OPC_DRAIN, TILEGX_OPC_DTLBPR, TILEGX_OPC_FINV, TILEGX_OPC_FLUSHWB, TILEGX_OPC_FLUSH, TILEGX_OPC_FNOP, TILEGX_OPC_ICOH, BITFIELD(43, 3) /* index 927 */, CHILD(936), TILEGX_OPC_INV, TILEGX_OPC_IRET, TILEGX_OPC_JALRP, TILEGX_OPC_JALR, TILEGX_OPC_JRP, TILEGX_OPC_JR, CHILD(991), BITFIELD(31, 2) /* index 936 */, CHILD(941), CHILD(966), TILEGX_OPC_ILL, TILEGX_OPC_ILL, BITFIELD(33, 2) /* index 941 */, TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_ILL, CHILD(946), BITFIELD(35, 2) /* index 946 */, TILEGX_OPC_ILL, CHILD(951), TILEGX_OPC_ILL, TILEGX_OPC_ILL, BITFIELD(37, 2) /* index 951 */, TILEGX_OPC_ILL, CHILD(956), TILEGX_OPC_ILL, TILEGX_OPC_ILL, BITFIELD(39, 2) /* index 956 */, TILEGX_OPC_ILL, CHILD(961), TILEGX_OPC_ILL, TILEGX_OPC_ILL, BITFIELD(41, 2) /* index 961 */, TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_BPT, TILEGX_OPC_ILL, BITFIELD(33, 2) /* index 966 */, TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_ILL, CHILD(971), BITFIELD(35, 2) /* index 971 */, TILEGX_OPC_ILL, CHILD(976), TILEGX_OPC_ILL, TILEGX_OPC_ILL, BITFIELD(37, 2) /* index 976 */, TILEGX_OPC_ILL, CHILD(981), TILEGX_OPC_ILL, TILEGX_OPC_ILL, BITFIELD(39, 2) /* index 981 */, TILEGX_OPC_ILL, CHILD(986), TILEGX_OPC_ILL, TILEGX_OPC_ILL, BITFIELD(41, 2) /* index 986 */, TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_RAISE, TILEGX_OPC_ILL, BITFIELD(31, 2) /* index 991 */, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(996), BITFIELD(33, 2) /* index 996 */, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(1001), BITFIELD(35, 2) /* index 1001 */, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_PREFETCH_L1_FAULT, BITFIELD(43, 3) /* index 1006 */, CHILD(1015), CHILD(1030), CHILD(1045), CHILD(1060), CHILD(1075), TILEGX_OPC_LDNA, TILEGX_OPC_LDNT1S, TILEGX_OPC_LDNT1U, BITFIELD(31, 2) /* index 1015 */, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(1020), BITFIELD(33, 2) /* index 1020 */, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(1025), BITFIELD(35, 2) /* index 1025 */, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_PREFETCH, BITFIELD(31, 2) /* index 1030 */, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(1035), BITFIELD(33, 2) /* index 1035 */, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(1040), BITFIELD(35, 2) /* index 1040 */, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_PREFETCH_L2_FAULT, BITFIELD(31, 2) /* index 1045 */, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(1050), BITFIELD(33, 2) /* index 1050 */, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(1055), BITFIELD(35, 2) /* index 1055 */, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_PREFETCH_L2, BITFIELD(31, 2) /* index 1060 */, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(1065), BITFIELD(33, 2) /* index 1065 */, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(1070), BITFIELD(35, 2) /* index 1070 */, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_PREFETCH_L3_FAULT, BITFIELD(31, 2) /* index 1075 */, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(1080), BITFIELD(33, 2) /* index 1080 */, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(1085), BITFIELD(35, 2) /* index 1085 */, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_PREFETCH_L3, BITFIELD(43, 3) /* index 1090 */, TILEGX_OPC_LDNT2S, TILEGX_OPC_LDNT2U, TILEGX_OPC_LDNT4S, TILEGX_OPC_LDNT4U, TILEGX_OPC_LDNT, TILEGX_OPC_LD, TILEGX_OPC_LNK, TILEGX_OPC_MF, BITFIELD(43, 3) /* index 1099 */, TILEGX_OPC_NAP, TILEGX_OPC_NOP, TILEGX_OPC_SWINT0, TILEGX_OPC_SWINT1, TILEGX_OPC_SWINT2, TILEGX_OPC_SWINT3, TILEGX_OPC_WH64, TILEGX_OPC_NONE, BITFIELD(49, 4) /* index 1108 */, TILEGX_OPC_V1MAXU, TILEGX_OPC_V1MINU, TILEGX_OPC_V1MNZ, TILEGX_OPC_V1MZ, TILEGX_OPC_V1SHL, TILEGX_OPC_V1SHRS, TILEGX_OPC_V1SHRU, TILEGX_OPC_V1SUBUC, TILEGX_OPC_V1SUB, TILEGX_OPC_V2ADDSC, TILEGX_OPC_V2ADD, TILEGX_OPC_V2CMPEQ, TILEGX_OPC_V2CMPLES, TILEGX_OPC_V2CMPLEU, TILEGX_OPC_V2CMPLTS, TILEGX_OPC_V2CMPLTU, BITFIELD(49, 4) /* index 1125 */, TILEGX_OPC_V2CMPNE, TILEGX_OPC_V2INT_H, TILEGX_OPC_V2INT_L, TILEGX_OPC_V2MAXS, TILEGX_OPC_V2MINS, TILEGX_OPC_V2MNZ, TILEGX_OPC_V2MZ, TILEGX_OPC_V2PACKH, TILEGX_OPC_V2PACKL, TILEGX_OPC_V2PACKUC, TILEGX_OPC_V2SHLSC, TILEGX_OPC_V2SHL, TILEGX_OPC_V2SHRS, TILEGX_OPC_V2SHRU, TILEGX_OPC_V2SUBSC, TILEGX_OPC_V2SUB, BITFIELD(49, 4) /* index 1142 */, TILEGX_OPC_V4ADDSC, TILEGX_OPC_V4ADD, TILEGX_OPC_V4INT_H, TILEGX_OPC_V4INT_L, TILEGX_OPC_V4PACKSC, TILEGX_OPC_V4SHLSC, TILEGX_OPC_V4SHL, TILEGX_OPC_V4SHRS, TILEGX_OPC_V4SHRU, TILEGX_OPC_V4SUBSC, TILEGX_OPC_V4SUB, TILEGX_OPC_XOR, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(49, 4) /* index 1159 */, TILEGX_OPC_NONE, TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHLXI, TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI, TILEGX_OPC_SHRUXI, TILEGX_OPC_V1SHLI, TILEGX_OPC_V1SHRSI, TILEGX_OPC_V1SHRUI, TILEGX_OPC_V2SHLI, TILEGX_OPC_V2SHRSI, TILEGX_OPC_V2SHRUI, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(31, 2) /* index 1176 */, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, CHILD(1181), BITFIELD(33, 2) /* index 1181 */, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, CHILD(1186), BITFIELD(35, 2) /* index 1186 */, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, CHILD(1191), BITFIELD(37, 2) /* index 1191 */, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, CHILD(1196), BITFIELD(39, 2) /* index 1196 */, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, CHILD(1201), BITFIELD(41, 2) /* index 1201 */, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_INFOL, }; static const unsigned short decode_Y0_fsm[178] = { BITFIELD(27, 4) /* index 0 */, CHILD(17), TILEGX_OPC_ADDXI, CHILD(32), TILEGX_OPC_CMPEQI, TILEGX_OPC_CMPLTSI, CHILD(62), CHILD(67), CHILD(118), CHILD(123), CHILD(128), CHILD(133), CHILD(153), CHILD(158), CHILD(163), CHILD(168), CHILD(173), BITFIELD(6, 2) /* index 17 */, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(22), BITFIELD(8, 2) /* index 22 */, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(27), BITFIELD(10, 2) /* index 27 */, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI, BITFIELD(0, 2) /* index 32 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(37), BITFIELD(2, 2) /* index 37 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(42), BITFIELD(4, 2) /* index 42 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(47), BITFIELD(6, 2) /* index 47 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(52), BITFIELD(8, 2) /* index 52 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(57), BITFIELD(10, 2) /* index 57 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO, BITFIELD(18, 2) /* index 62 */, TILEGX_OPC_ADDX, TILEGX_OPC_ADD, TILEGX_OPC_SUBX, TILEGX_OPC_SUB, BITFIELD(15, 5) /* index 67 */, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, CHILD(100), CHILD(109), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(12, 3) /* index 100 */, TILEGX_OPC_NONE, TILEGX_OPC_CLZ, TILEGX_OPC_CTZ, TILEGX_OPC_FNOP, TILEGX_OPC_FSINGLE_PACK1, TILEGX_OPC_NOP, TILEGX_OPC_PCNT, TILEGX_OPC_REVBITS, BITFIELD(12, 3) /* index 109 */, TILEGX_OPC_REVBYTES, TILEGX_OPC_TBLIDXB0, TILEGX_OPC_TBLIDXB1, TILEGX_OPC_TBLIDXB2, TILEGX_OPC_TBLIDXB3, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(18, 2) /* index 118 */, TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU, BITFIELD(18, 2) /* index 123 */, TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPNE, TILEGX_OPC_MULAX, TILEGX_OPC_MULX, BITFIELD(18, 2) /* index 128 */, TILEGX_OPC_CMOVEQZ, TILEGX_OPC_CMOVNEZ, TILEGX_OPC_MNZ, TILEGX_OPC_MZ, BITFIELD(18, 2) /* index 133 */, TILEGX_OPC_AND, TILEGX_OPC_NOR, CHILD(138), TILEGX_OPC_XOR, BITFIELD(12, 2) /* index 138 */, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(143), BITFIELD(14, 2) /* index 143 */, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(148), BITFIELD(16, 2) /* index 148 */, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE, BITFIELD(18, 2) /* index 153 */, TILEGX_OPC_ROTL, TILEGX_OPC_SHL, TILEGX_OPC_SHRS, TILEGX_OPC_SHRU, BITFIELD(18, 2) /* index 158 */, TILEGX_OPC_NONE, TILEGX_OPC_SHL1ADDX, TILEGX_OPC_SHL2ADDX, TILEGX_OPC_SHL3ADDX, BITFIELD(18, 2) /* index 163 */, TILEGX_OPC_MUL_HS_HS, TILEGX_OPC_MUL_HU_HU, TILEGX_OPC_MUL_LS_LS, TILEGX_OPC_MUL_LU_LU, BITFIELD(18, 2) /* index 168 */, TILEGX_OPC_MULA_HS_HS, TILEGX_OPC_MULA_HU_HU, TILEGX_OPC_MULA_LS_LS, TILEGX_OPC_MULA_LU_LU, BITFIELD(18, 2) /* index 173 */, TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI, }; static const unsigned short decode_Y1_fsm[167] = { BITFIELD(58, 4) /* index 0 */, TILEGX_OPC_NONE, CHILD(17), TILEGX_OPC_ADDXI, CHILD(32), TILEGX_OPC_CMPEQI, TILEGX_OPC_CMPLTSI, CHILD(62), CHILD(67), CHILD(117), CHILD(122), CHILD(127), CHILD(132), CHILD(152), CHILD(157), CHILD(162), TILEGX_OPC_NONE, BITFIELD(37, 2) /* index 17 */, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(22), BITFIELD(39, 2) /* index 22 */, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(27), BITFIELD(41, 2) /* index 27 */, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI, BITFIELD(31, 2) /* index 32 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(37), BITFIELD(33, 2) /* index 37 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(42), BITFIELD(35, 2) /* index 42 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(47), BITFIELD(37, 2) /* index 47 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(52), BITFIELD(39, 2) /* index 52 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(57), BITFIELD(41, 2) /* index 57 */, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO, BITFIELD(49, 2) /* index 62 */, TILEGX_OPC_ADDX, TILEGX_OPC_ADD, TILEGX_OPC_SUBX, TILEGX_OPC_SUB, BITFIELD(47, 4) /* index 67 */, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, CHILD(84), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, BITFIELD(43, 3) /* index 84 */, CHILD(93), CHILD(96), CHILD(99), CHILD(102), CHILD(105), CHILD(108), CHILD(111), CHILD(114), BITFIELD(46, 1) /* index 93 */, TILEGX_OPC_NONE, TILEGX_OPC_FNOP, BITFIELD(46, 1) /* index 96 */, TILEGX_OPC_NONE, TILEGX_OPC_ILL, BITFIELD(46, 1) /* index 99 */, TILEGX_OPC_NONE, TILEGX_OPC_JALRP, BITFIELD(46, 1) /* index 102 */, TILEGX_OPC_NONE, TILEGX_OPC_JALR, BITFIELD(46, 1) /* index 105 */, TILEGX_OPC_NONE, TILEGX_OPC_JRP, BITFIELD(46, 1) /* index 108 */, TILEGX_OPC_NONE, TILEGX_OPC_JR, BITFIELD(46, 1) /* index 111 */, TILEGX_OPC_NONE, TILEGX_OPC_LNK, BITFIELD(46, 1) /* index 114 */, TILEGX_OPC_NONE, TILEGX_OPC_NOP, BITFIELD(49, 2) /* index 117 */, TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU, BITFIELD(49, 2) /* index 122 */, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPNE, BITFIELD(49, 2) /* index 127 */, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_MNZ, TILEGX_OPC_MZ, BITFIELD(49, 2) /* index 132 */, TILEGX_OPC_AND, TILEGX_OPC_NOR, CHILD(137), TILEGX_OPC_XOR, BITFIELD(43, 2) /* index 137 */, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(142), BITFIELD(45, 2) /* index 142 */, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(147), BITFIELD(47, 2) /* index 147 */, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE, BITFIELD(49, 2) /* index 152 */, TILEGX_OPC_ROTL, TILEGX_OPC_SHL, TILEGX_OPC_SHRS, TILEGX_OPC_SHRU, BITFIELD(49, 2) /* index 157 */, TILEGX_OPC_NONE, TILEGX_OPC_SHL1ADDX, TILEGX_OPC_SHL2ADDX, TILEGX_OPC_SHL3ADDX, BITFIELD(49, 2) /* index 162 */, TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI, }; static const unsigned short decode_Y2_fsm[118] = { BITFIELD(62, 2) /* index 0 */, TILEGX_OPC_NONE, CHILD(5), CHILD(66), CHILD(109), BITFIELD(55, 3) /* index 5 */, CHILD(14), CHILD(14), CHILD(14), CHILD(17), CHILD(40), CHILD(40), CHILD(40), CHILD(43), BITFIELD(26, 1) /* index 14 */, TILEGX_OPC_LD1S, TILEGX_OPC_LD1U, BITFIELD(26, 1) /* index 17 */, CHILD(20), CHILD(30), BITFIELD(51, 2) /* index 20 */, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(25), BITFIELD(53, 2) /* index 25 */, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_PREFETCH_L1_FAULT, BITFIELD(51, 2) /* index 30 */, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(35), BITFIELD(53, 2) /* index 35 */, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_PREFETCH, BITFIELD(26, 1) /* index 40 */, TILEGX_OPC_LD2S, TILEGX_OPC_LD2U, BITFIELD(26, 1) /* index 43 */, CHILD(46), CHILD(56), BITFIELD(51, 2) /* index 46 */, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(51), BITFIELD(53, 2) /* index 51 */, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_PREFETCH_L2_FAULT, BITFIELD(51, 2) /* index 56 */, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(61), BITFIELD(53, 2) /* index 61 */, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_PREFETCH_L2, BITFIELD(56, 2) /* index 66 */, CHILD(71), CHILD(74), CHILD(90), CHILD(93), BITFIELD(26, 1) /* index 71 */, TILEGX_OPC_NONE, TILEGX_OPC_LD4S, BITFIELD(26, 1) /* index 74 */, TILEGX_OPC_NONE, CHILD(77), BITFIELD(51, 2) /* index 77 */, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(82), BITFIELD(53, 2) /* index 82 */, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(87), BITFIELD(55, 1) /* index 87 */, TILEGX_OPC_LD4S, TILEGX_OPC_PREFETCH_L3_FAULT, BITFIELD(26, 1) /* index 90 */, TILEGX_OPC_LD4U, TILEGX_OPC_LD, BITFIELD(26, 1) /* index 93 */, CHILD(96), TILEGX_OPC_LD, BITFIELD(51, 2) /* index 96 */, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(101), BITFIELD(53, 2) /* index 101 */, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(106), BITFIELD(55, 1) /* index 106 */, TILEGX_OPC_LD4U, TILEGX_OPC_PREFETCH_L3, BITFIELD(26, 1) /* index 109 */, CHILD(112), CHILD(115), BITFIELD(57, 1) /* index 112 */, TILEGX_OPC_ST1, TILEGX_OPC_ST4, BITFIELD(57, 1) /* index 115 */, TILEGX_OPC_ST2, TILEGX_OPC_ST, }; #undef BITFIELD #undef CHILD const unsigned short * const tilegx_bundle_decoder_fsms[TILEGX_NUM_PIPELINE_ENCODINGS] = { decode_X0_fsm, decode_X1_fsm, decode_Y0_fsm, decode_Y1_fsm, decode_Y2_fsm }; const struct tilegx_operand tilegx_operands[35] = { { TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_X0), 8, 1, 0, 0, 0, 0, create_Imm8_X0, get_Imm8_X0 }, { TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_X1), 8, 1, 0, 0, 0, 0, create_Imm8_X1, get_Imm8_X1 }, { TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_Y0), 8, 1, 0, 0, 0, 0, create_Imm8_Y0, get_Imm8_Y0 }, { TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_Y1), 8, 1, 0, 0, 0, 0, create_Imm8_Y1, get_Imm8_Y1 }, { TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM16_X0_HW0_LAST), 16, 1, 0, 0, 0, 0, create_Imm16_X0, get_Imm16_X0 }, { TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM16_X1_HW0_LAST), 16, 1, 0, 0, 0, 0, create_Imm16_X1, get_Imm16_X1 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 0, 1, 0, 0, create_Dest_X0, get_Dest_X0 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcA_X0, get_SrcA_X0 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 0, 1, 0, 0, create_Dest_X1, get_Dest_X1 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcA_X1, get_SrcA_X1 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 0, 1, 0, 0, create_Dest_Y0, get_Dest_Y0 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcA_Y0, get_SrcA_Y0 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 0, 1, 0, 0, create_Dest_Y1, get_Dest_Y1 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcA_Y1, get_SrcA_Y1 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcA_Y2, get_SrcA_Y2 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 1, 0, 0, create_SrcA_X1, get_SrcA_X1 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcB_X0, get_SrcB_X0 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcB_X1, get_SrcB_X1 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcB_Y0, get_SrcB_Y0 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcB_Y1, get_SrcB_Y1 }, { TILEGX_OP_TYPE_ADDRESS, BFD_RELOC(TILEGX_BROFF_X1), 17, 1, 0, 0, 1, TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, create_BrOff_X1, get_BrOff_X1 }, { TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE), 6, 0, 0, 0, 0, 0, create_BFStart_X0, get_BFStart_X0 }, { TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE), 6, 0, 0, 0, 0, 0, create_BFEnd_X0, get_BFEnd_X0 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 1, 0, 0, create_Dest_X0, get_Dest_X0 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 1, 0, 0, create_Dest_Y0, get_Dest_Y0 }, { TILEGX_OP_TYPE_ADDRESS, BFD_RELOC(TILEGX_JUMPOFF_X1), 27, 1, 0, 0, 1, TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, create_JumpOff_X1, get_JumpOff_X1 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 0, 1, 0, 0, create_SrcBDest_Y2, get_SrcBDest_Y2 }, { TILEGX_OP_TYPE_SPR, BFD_RELOC(TILEGX_MF_IMM14_X1), 14, 0, 0, 0, 0, 0, create_MF_Imm14_X1, get_MF_Imm14_X1 }, { TILEGX_OP_TYPE_SPR, BFD_RELOC(TILEGX_MT_IMM14_X1), 14, 0, 0, 0, 0, 0, create_MT_Imm14_X1, get_MT_Imm14_X1 }, { TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_X0), 6, 0, 0, 0, 0, 0, create_ShAmt_X0, get_ShAmt_X0 }, { TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_X1), 6, 0, 0, 0, 0, 0, create_ShAmt_X1, get_ShAmt_X1 }, { TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_Y0), 6, 0, 0, 0, 0, 0, create_ShAmt_Y0, get_ShAmt_Y0 }, { TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_Y1), 6, 0, 0, 0, 0, 0, create_ShAmt_Y1, get_ShAmt_Y1 }, { TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcBDest_Y2, get_SrcBDest_Y2 }, { TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_DEST_IMM8_X1), 8, 1, 0, 0, 0, 0, create_Dest_Imm8_X1, get_Dest_Imm8_X1 } }; /* Given a set of bundle bits and the lookup FSM for a specific pipe, * returns which instruction the bundle contains in that pipe. */ static const struct tilegx_opcode * find_opcode(tilegx_bundle_bits bits, const unsigned short *table) { int index = 0; while (1) { unsigned short bitspec = table[index]; unsigned int bitfield = ((unsigned int)(bits >> (bitspec & 63))) & (bitspec >> 6); unsigned short next = table[index + 1 + bitfield]; if (next <= TILEGX_OPC_NONE) return &tilegx_opcodes[next]; index = next - TILEGX_OPC_NONE; } } int parse_insn_tilegx(tilegx_bundle_bits bits, unsigned long long pc, struct tilegx_decoded_instruction decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE]) { int num_instructions = 0; int pipe; int min_pipe, max_pipe; if ((bits & TILEGX_BUNDLE_MODE_MASK) == 0) { min_pipe = TILEGX_PIPELINE_X0; max_pipe = TILEGX_PIPELINE_X1; } else { min_pipe = TILEGX_PIPELINE_Y0; max_pipe = TILEGX_PIPELINE_Y2; } /* For each pipe, find an instruction that fits. */ for (pipe = min_pipe; pipe <= max_pipe; pipe++) { const struct tilegx_opcode *opc; struct tilegx_decoded_instruction *d; int i; d = &decoded[num_instructions++]; opc = find_opcode (bits, tilegx_bundle_decoder_fsms[pipe]); d->opcode = opc; /* Decode each operand, sign extending, etc. as appropriate. */ for (i = 0; i < opc->num_operands; i++) { const struct tilegx_operand *op = &tilegx_operands[opc->operands[pipe][i]]; int raw_opval = op->extract (bits); long long opval; if (op->is_signed) { /* Sign-extend the operand. */ int shift = (int)((sizeof(int) * 8) - op->num_bits); raw_opval = (raw_opval << shift) >> shift; } /* Adjust PC-relative scaled branch offsets. */ if (op->type == TILEGX_OP_TYPE_ADDRESS) opval = (raw_opval * TILEGX_BUNDLE_SIZE_IN_BYTES) + pc; else opval = raw_opval; /* Record the final value. */ d->operands[i] = op; d->operand_values[i] = opval; } } return num_instructions; }
gpl-2.0
jderrick/linux-torvalds
arch/arm/mach-mmp/common.c
2842
1217
/* * linux/arch/arm/mach-mmp/common.c * * Code common to PXA168 processor lines * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <asm/page.h> #include <asm/mach/map.h> #include <asm/system_misc.h> #include <mach/addr-map.h> #include <mach/cputype.h> #include "common.h" #define MMP_CHIPID (AXI_VIRT_BASE + 0x82c00) unsigned int mmp_chip_id; EXPORT_SYMBOL(mmp_chip_id); static struct map_desc standard_io_desc[] __initdata = { { .pfn = __phys_to_pfn(APB_PHYS_BASE), .virtual = (unsigned long)APB_VIRT_BASE, .length = APB_PHYS_SIZE, .type = MT_DEVICE, }, { .pfn = __phys_to_pfn(AXI_PHYS_BASE), .virtual = (unsigned long)AXI_VIRT_BASE, .length = AXI_PHYS_SIZE, .type = MT_DEVICE, }, }; void __init mmp_map_io(void) { iotable_init(standard_io_desc, ARRAY_SIZE(standard_io_desc)); /* this is early, initialize mmp_chip_id here */ mmp_chip_id = __raw_readl(MMP_CHIPID); } void mmp_restart(enum reboot_mode mode, const char *cmd) { soft_restart(0); }
gpl-2.0
gripped/MK808-headless-nand-3.0.8-rk3066
mm/net/ipv4/udplite.c
3098
3293
/* * UDPLITE An implementation of the UDP-Lite protocol (RFC 3828). * * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk> * * Changes: * Fixes: * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include "udp_impl.h" struct udp_table udplite_table __read_mostly; EXPORT_SYMBOL(udplite_table); static int udplite_rcv(struct sk_buff *skb) { return __udp4_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE); } static void udplite_err(struct sk_buff *skb, u32 info) { __udp4_lib_err(skb, info, &udplite_table); } static const struct net_protocol udplite_protocol = { .handler = udplite_rcv, .err_handler = udplite_err, .no_policy = 1, .netns_ok = 1, }; struct proto udplite_prot = { .name = "UDP-Lite", .owner = THIS_MODULE, .close = udp_lib_close, .connect = ip4_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, .init = udplite_sk_init, .destroy = udp_destroy_sock, .setsockopt = udp_setsockopt, .getsockopt = udp_getsockopt, .sendmsg = udp_sendmsg, .recvmsg = udp_recvmsg, .sendpage = udp_sendpage, .backlog_rcv = udp_queue_rcv_skb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .get_port = udp_v4_get_port, .obj_size = sizeof(struct udp_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .h.udp_table = &udplite_table, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_udp_setsockopt, .compat_getsockopt = compat_udp_getsockopt, #endif .clear_sk = sk_prot_clear_portaddr_nulls, }; EXPORT_SYMBOL(udplite_prot); static struct inet_protosw udplite4_protosw = { .type = SOCK_DGRAM, .protocol = IPPROTO_UDPLITE, .prot = &udplite_prot, .ops = &inet_dgram_ops, .no_check = 0, /* must checksum (RFC 3828) */ .flags = INET_PROTOSW_PERMANENT, }; #ifdef CONFIG_PROC_FS static struct udp_seq_afinfo udplite4_seq_afinfo = { .name = "udplite", .family = AF_INET, .udp_table = &udplite_table, .seq_fops = { .owner = THIS_MODULE, }, .seq_ops = { .show = udp4_seq_show, }, }; static int __net_init udplite4_proc_init_net(struct net *net) { return udp_proc_register(net, &udplite4_seq_afinfo); } static void __net_exit udplite4_proc_exit_net(struct net *net) { udp_proc_unregister(net, &udplite4_seq_afinfo); } static struct pernet_operations udplite4_net_ops = { .init = udplite4_proc_init_net, .exit = udplite4_proc_exit_net, }; static __init int udplite4_proc_init(void) { return register_pernet_subsys(&udplite4_net_ops); } #else static inline int udplite4_proc_init(void) { return 0; } #endif void __init udplite4_register(void) { udp_table_init(&udplite_table, "UDP-Lite"); if (proto_register(&udplite_prot, 1)) goto out_register_err; if (inet_add_protocol(&udplite_protocol, IPPROTO_UDPLITE) < 0) goto out_unregister_proto; inet_register_protosw(&udplite4_protosw); if (udplite4_proc_init()) printk(KERN_ERR "%s: Cannot register /proc!\n", __func__); return; out_unregister_proto: proto_unregister(&udplite_prot); out_register_err: printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __func__); }
gpl-2.0
aweosomeabhijeet/android_kernel_sony_nicki
drivers/parport/parport_mfc3.c
4890
10988
/* Low-level parallel port routines for the Multiface 3 card * * Author: Joerg Dorchain <joerg@dorchain.net> * * (C) The elitist m68k Users(TM) * * based on the existing parport_amiga and lp_mfc * * * From the MFC3 documentation: * * Miscellaneous PIA Details * ------------------------- * * The two open-drain interrupt outputs /IRQA and /IRQB are routed to * /INT2 of the Z2 bus. * * The CPU data bus of the PIA (D0-D7) is connected to D8-D15 on the Z2 * bus. This means that any PIA registers are accessed at even addresses. * * Centronics Pin Connections for the PIA * -------------------------------------- * * The following table shows the connections between the PIA and the * Centronics interface connector. These connections implement a single, but * very complete, Centronics type interface. The Pin column gives the pin * numbers of the PIA. The Centronics pin numbers can be found in the section * "Parallel Connectors". * * * Pin | PIA | Dir | Centronics Names * -------+-----+-----+--------------------------------------------------------- * 19 | CB2 | --> | /STROBE (aka /DRDY) * 10-17 | PBx | <-> | DATA0 - DATA7 * 18 | CB1 | <-- | /ACK * 40 | CA1 | <-- | BUSY * 3 | PA1 | <-- | PAPER-OUT (aka POUT) * 4 | PA2 | <-- | SELECTED (aka SEL) * 9 | PA7 | --> | /INIT (aka /RESET or /INPUT-PRIME) * 6 | PA4 | <-- | /ERROR (aka /FAULT) * 7 | PA5 | --> | DIR (aka /SELECT-IN) * 8 | PA6 | --> | /AUTO-FEED-XT * 39 | CA2 | --> | open * 5 | PA3 | <-- | /ACK (same as CB1!) * 2 | PA0 | <-- | BUSY (same as CA1!) * -------+-----+-----+--------------------------------------------------------- * * Should be enough to understand some of the driver. * * Per convention for normal use the port registers are visible. * If you need the data direction registers, restore the value in the * control register. */ #include "multiface.h" #include <linux/module.h> #include <linux/init.h> #include <linux/parport.h> #include <linux/delay.h> #include <linux/mc6821.h> #include <linux/zorro.h> #include <linux/interrupt.h> #include <asm/setup.h> #include <asm/amigahw.h> #include <asm/irq.h> #include <asm/amigaints.h> /* Maximum Number of Cards supported */ #define MAX_MFC 5 #undef DEBUG #ifdef DEBUG #define DPRINTK printk #else static inline int DPRINTK(void *nothing, ...) {return 0;} #endif static struct parport *this_port[MAX_MFC] = {NULL, }; static volatile int dummy; /* for trigger readds */ #define pia(dev) ((struct pia *)(dev->base)) static struct parport_operations pp_mfc3_ops; static void mfc3_write_data(struct parport *p, unsigned char data) { DPRINTK(KERN_DEBUG "write_data %c\n",data); dummy = pia(p)->pprb; /* clears irq bit */ /* Triggers also /STROBE.*/ pia(p)->pprb = data; } static unsigned char mfc3_read_data(struct parport *p) { /* clears interrupt bit. Triggers also /STROBE. */ return pia(p)->pprb; } static unsigned char control_pc_to_mfc3(unsigned char control) { unsigned char ret = 32|64; if (control & PARPORT_CONTROL_SELECT) /* XXX: What is SELECP? */ ret &= ~32; /* /SELECT_IN */ if (control & PARPORT_CONTROL_INIT) /* INITP */ ret |= 128; if (control & PARPORT_CONTROL_AUTOFD) /* AUTOLF */ ret &= ~64; if (control & PARPORT_CONTROL_STROBE) /* Strobe */ /* Handled directly by hardware */; return ret; } static unsigned char control_mfc3_to_pc(unsigned char control) { unsigned char ret = PARPORT_CONTROL_STROBE | PARPORT_CONTROL_AUTOFD | PARPORT_CONTROL_SELECT; if (control & 128) /* /INITP */ ret |= PARPORT_CONTROL_INIT; if (control & 64) /* /AUTOLF */ ret &= ~PARPORT_CONTROL_AUTOFD; if (control & 32) /* /SELECT_IN */ ret &= ~PARPORT_CONTROL_SELECT; return ret; } static void mfc3_write_control(struct parport *p, unsigned char control) { DPRINTK(KERN_DEBUG "write_control %02x\n",control); pia(p)->ppra = (pia(p)->ppra & 0x1f) | control_pc_to_mfc3(control); } static unsigned char mfc3_read_control( struct parport *p) { DPRINTK(KERN_DEBUG "read_control \n"); return control_mfc3_to_pc(pia(p)->ppra & 0xe0); } static unsigned char mfc3_frob_control( struct parport *p, unsigned char mask, unsigned char val) { unsigned char old; DPRINTK(KERN_DEBUG "frob_control mask %02x, value %02x\n",mask,val); old = mfc3_read_control(p); mfc3_write_control(p, (old & ~mask) ^ val); return old; } #if 0 /* currently unused */ static unsigned char status_pc_to_mfc3(unsigned char status) { unsigned char ret = 1; if (status & PARPORT_STATUS_BUSY) /* Busy */ ret &= ~1; if (status & PARPORT_STATUS_ACK) /* Ack */ ret |= 8; if (status & PARPORT_STATUS_PAPEROUT) /* PaperOut */ ret |= 2; if (status & PARPORT_STATUS_SELECT) /* select */ ret |= 4; if (status & PARPORT_STATUS_ERROR) /* error */ ret |= 16; return ret; } #endif static unsigned char status_mfc3_to_pc(unsigned char status) { unsigned char ret = PARPORT_STATUS_BUSY; if (status & 1) /* Busy */ ret &= ~PARPORT_STATUS_BUSY; if (status & 2) /* PaperOut */ ret |= PARPORT_STATUS_PAPEROUT; if (status & 4) /* Selected */ ret |= PARPORT_STATUS_SELECT; if (status & 8) /* Ack */ ret |= PARPORT_STATUS_ACK; if (status & 16) /* /ERROR */ ret |= PARPORT_STATUS_ERROR; return ret; } #if 0 /* currently unused */ static void mfc3_write_status( struct parport *p, unsigned char status) { DPRINTK(KERN_DEBUG "write_status %02x\n",status); pia(p)->ppra = (pia(p)->ppra & 0xe0) | status_pc_to_mfc3(status); } #endif static unsigned char mfc3_read_status(struct parport *p) { unsigned char status; status = status_mfc3_to_pc(pia(p)->ppra & 0x1f); DPRINTK(KERN_DEBUG "read_status %02x\n", status); return status; } #if 0 /* currently unused */ static void mfc3_change_mode( struct parport *p, int m) { /* XXX: This port only has one mode, and I am not sure about the corresponding PC-style mode*/ } #endif static int use_cnt = 0; static irqreturn_t mfc3_interrupt(int irq, void *dev_id) { int i; for( i = 0; i < MAX_MFC; i++) if (this_port[i] != NULL) if (pia(this_port[i])->crb & 128) { /* Board caused interrupt */ dummy = pia(this_port[i])->pprb; /* clear irq bit */ parport_generic_irq(this_port[i]); } return IRQ_HANDLED; } static void mfc3_enable_irq(struct parport *p) { pia(p)->crb |= PIA_C1_ENABLE_IRQ; } static void mfc3_disable_irq(struct parport *p) { pia(p)->crb &= ~PIA_C1_ENABLE_IRQ; } static void mfc3_data_forward(struct parport *p) { DPRINTK(KERN_DEBUG "forward\n"); pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */ pia(p)->pddrb = 255; /* all pins output */ pia(p)->crb |= PIA_DDR; /* make data register visible - default */ } static void mfc3_data_reverse(struct parport *p) { DPRINTK(KERN_DEBUG "reverse\n"); pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */ pia(p)->pddrb = 0; /* all pins input */ pia(p)->crb |= PIA_DDR; /* make data register visible - default */ } static void mfc3_init_state(struct pardevice *dev, struct parport_state *s) { s->u.amiga.data = 0; s->u.amiga.datadir = 255; s->u.amiga.status = 0; s->u.amiga.statusdir = 0xe0; } static void mfc3_save_state(struct parport *p, struct parport_state *s) { s->u.amiga.data = pia(p)->pprb; pia(p)->crb &= ~PIA_DDR; s->u.amiga.datadir = pia(p)->pddrb; pia(p)->crb |= PIA_DDR; s->u.amiga.status = pia(p)->ppra; pia(p)->cra &= ~PIA_DDR; s->u.amiga.statusdir = pia(p)->pddrb; pia(p)->cra |= PIA_DDR; } static void mfc3_restore_state(struct parport *p, struct parport_state *s) { pia(p)->pprb = s->u.amiga.data; pia(p)->crb &= ~PIA_DDR; pia(p)->pddrb = s->u.amiga.datadir; pia(p)->crb |= PIA_DDR; pia(p)->ppra = s->u.amiga.status; pia(p)->cra &= ~PIA_DDR; pia(p)->pddrb = s->u.amiga.statusdir; pia(p)->cra |= PIA_DDR; } static struct parport_operations pp_mfc3_ops = { .write_data = mfc3_write_data, .read_data = mfc3_read_data, .write_control = mfc3_write_control, .read_control = mfc3_read_control, .frob_control = mfc3_frob_control, .read_status = mfc3_read_status, .enable_irq = mfc3_enable_irq, .disable_irq = mfc3_disable_irq, .data_forward = mfc3_data_forward, .data_reverse = mfc3_data_reverse, .init_state = mfc3_init_state, .save_state = mfc3_save_state, .restore_state = mfc3_restore_state, .epp_write_data = parport_ieee1284_epp_write_data, .epp_read_data = parport_ieee1284_epp_read_data, .epp_write_addr = parport_ieee1284_epp_write_addr, .epp_read_addr = parport_ieee1284_epp_read_addr, .ecp_write_data = parport_ieee1284_ecp_write_data, .ecp_read_data = parport_ieee1284_ecp_read_data, .ecp_write_addr = parport_ieee1284_ecp_write_addr, .compat_write_data = parport_ieee1284_write_compat, .nibble_read_data = parport_ieee1284_read_nibble, .byte_read_data = parport_ieee1284_read_byte, .owner = THIS_MODULE, }; /* ----------- Initialisation code --------------------------------- */ static int __init parport_mfc3_init(void) { struct parport *p; int pias = 0; struct pia *pp; struct zorro_dev *z = NULL; if (!MACH_IS_AMIGA) return -ENODEV; while ((z = zorro_find_device(ZORRO_PROD_BSC_MULTIFACE_III, z))) { unsigned long piabase = z->resource.start+PIABASE; if (!request_mem_region(piabase, sizeof(struct pia), "PIA")) continue; pp = (struct pia *)ZTWO_VADDR(piabase); pp->crb = 0; pp->pddrb = 255; /* all data pins output */ pp->crb = PIA_DDR|32|8; dummy = pp->pddrb; /* reading clears interrupt */ pp->cra = 0; pp->pddra = 0xe0; /* /RESET, /DIR ,/AUTO-FEED output */ pp->cra = PIA_DDR; pp->ppra = 0; /* reset printer */ udelay(10); pp->ppra = 128; p = parport_register_port((unsigned long)pp, IRQ_AMIGA_PORTS, PARPORT_DMA_NONE, &pp_mfc3_ops); if (!p) goto out_port; if (p->irq != PARPORT_IRQ_NONE) { if (use_cnt++ == 0) if (request_irq(IRQ_AMIGA_PORTS, mfc3_interrupt, IRQF_SHARED, p->name, &pp_mfc3_ops)) goto out_irq; } p->dev = &z->dev; this_port[pias++] = p; printk(KERN_INFO "%s: Multiface III port using irq\n", p->name); /* XXX: set operating mode */ p->private_data = (void *)piabase; parport_announce_port (p); if (pias >= MAX_MFC) break; continue; out_irq: parport_put_port(p); out_port: release_mem_region(piabase, sizeof(struct pia)); } return pias ? 0 : -ENODEV; } static void __exit parport_mfc3_exit(void) { int i; for (i = 0; i < MAX_MFC; i++) { if (!this_port[i]) continue; parport_remove_port(this_port[i]); if (this_port[i]->irq != PARPORT_IRQ_NONE) { if (--use_cnt == 0) free_irq(IRQ_AMIGA_PORTS, &pp_mfc3_ops); } release_mem_region(ZTWO_PADDR(this_port[i]->private_data), sizeof(struct pia)); parport_put_port(this_port[i]); } } MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>"); MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Parallel Port"); MODULE_SUPPORTED_DEVICE("Multiface 3 Parallel Port"); MODULE_LICENSE("GPL"); module_init(parport_mfc3_init) module_exit(parport_mfc3_exit)
gpl-2.0
FrancescoCG/Crazy-Kernel1-TW-Kernel
drivers/net/wireless/ath/carl9170/usb.c
4890
25632
/* * Atheros CARL9170 driver * * USB - frontend * * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, see * http://www.gnu.org/licenses/. * * This file incorporates work covered by the following copyright and * permission notice: * Copyright (c) 2007-2008 Atheros Communications, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <linux/device.h> #include <net/mac80211.h> #include "carl9170.h" #include "cmd.h" #include "hw.h" #include "fwcmd.h" MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); MODULE_AUTHOR("Christian Lamparter <chunkeey@googlemail.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless"); MODULE_FIRMWARE(CARL9170FW_NAME); MODULE_ALIAS("ar9170usb"); MODULE_ALIAS("arusb_lnx"); /* * Note: * * Always update our wiki's device list (located at: * http://wireless.kernel.org/en/users/Drivers/ar9170/devices ), * whenever you add a new device. */ static struct usb_device_id carl9170_usb_ids[] = { /* Atheros 9170 */ { USB_DEVICE(0x0cf3, 0x9170) }, /* Atheros TG121N */ { USB_DEVICE(0x0cf3, 0x1001) }, /* TP-Link TL-WN821N v2 */ { USB_DEVICE(0x0cf3, 0x1002), .driver_info = CARL9170_WPS_BUTTON | CARL9170_ONE_LED }, /* 3Com Dual Band 802.11n USB Adapter */ { USB_DEVICE(0x0cf3, 0x1010) }, /* H3C Dual Band 802.11n USB Adapter */ { USB_DEVICE(0x0cf3, 0x1011) }, /* Cace Airpcap NX */ { USB_DEVICE(0xcace, 0x0300) }, /* D-Link DWA 160 A1 */ { USB_DEVICE(0x07d1, 0x3c10) }, /* D-Link DWA 160 A2 */ { USB_DEVICE(0x07d1, 0x3a09) }, /* D-Link DWA 130 D */ { USB_DEVICE(0x07d1, 0x3a0f) }, /* Netgear WNA1000 */ { USB_DEVICE(0x0846, 0x9040) }, /* Netgear WNDA3100 (v1) */ { USB_DEVICE(0x0846, 0x9010) }, /* Netgear WN111 v2 */ { USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED }, /* Zydas ZD1221 */ { USB_DEVICE(0x0ace, 0x1221) }, /* Proxim ORiNOCO 802.11n USB */ { USB_DEVICE(0x1435, 0x0804) }, /* WNC Generic 11n USB Dongle */ { USB_DEVICE(0x1435, 0x0326) }, /* ZyXEL NWD271N */ { USB_DEVICE(0x0586, 0x3417) }, /* Z-Com UB81 BG */ { USB_DEVICE(0x0cde, 0x0023) }, /* Z-Com UB82 ABG */ { USB_DEVICE(0x0cde, 0x0026) }, /* Sphairon Homelink 1202 */ { USB_DEVICE(0x0cde, 0x0027) }, /* Arcadyan WN7512 */ { USB_DEVICE(0x083a, 0xf522) }, /* Planex GWUS300 */ { USB_DEVICE(0x2019, 0x5304) }, /* IO-Data WNGDNUS2 */ { USB_DEVICE(0x04bb, 0x093f) }, /* NEC WL300NU-G */ { USB_DEVICE(0x0409, 0x0249) }, /* NEC WL300NU-AG */ { USB_DEVICE(0x0409, 0x02b4) }, /* AVM FRITZ!WLAN USB Stick N */ { USB_DEVICE(0x057c, 0x8401) }, /* AVM FRITZ!WLAN USB Stick N 2.4 */ { USB_DEVICE(0x057c, 0x8402) }, /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ { USB_DEVICE(0x1668, 0x1200) }, /* Airlive X.USB a/b/g/n */ { USB_DEVICE(0x1b75, 0x9170) }, /* terminate */ {} }; MODULE_DEVICE_TABLE(usb, carl9170_usb_ids); static void carl9170_usb_submit_data_urb(struct ar9170 *ar) { struct urb *urb; int err; if (atomic_inc_return(&ar->tx_anch_urbs) > AR9170_NUM_TX_URBS) goto err_acc; urb = usb_get_from_anchor(&ar->tx_wait); if (!urb) goto err_acc; usb_anchor_urb(urb, &ar->tx_anch); err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { if (net_ratelimit()) { dev_err(&ar->udev->dev, "tx submit failed (%d)\n", urb->status); } usb_unanchor_urb(urb); usb_anchor_urb(urb, &ar->tx_err); } usb_free_urb(urb); if (likely(err == 0)) return; err_acc: atomic_dec(&ar->tx_anch_urbs); } static void carl9170_usb_tx_data_complete(struct urb *urb) { struct ar9170 *ar = usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); if (WARN_ON_ONCE(!ar)) { dev_kfree_skb_irq(urb->context); return; } atomic_dec(&ar->tx_anch_urbs); switch (urb->status) { /* everything is fine */ case 0: carl9170_tx_callback(ar, (void *)urb->context); break; /* disconnect */ case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: /* * Defer the frame clean-up to the tasklet worker. * This is necessary, because carl9170_tx_drop * does not work in an irqsave context. */ usb_anchor_urb(urb, &ar->tx_err); return; /* a random transmission error has occurred? */ default: if (net_ratelimit()) { dev_err(&ar->udev->dev, "tx failed (%d)\n", urb->status); } usb_anchor_urb(urb, &ar->tx_err); break; } if (likely(IS_STARTED(ar))) carl9170_usb_submit_data_urb(ar); } static int carl9170_usb_submit_cmd_urb(struct ar9170 *ar) { struct urb *urb; int err; if (atomic_inc_return(&ar->tx_cmd_urbs) != 1) { atomic_dec(&ar->tx_cmd_urbs); return 0; } urb = usb_get_from_anchor(&ar->tx_cmd); if (!urb) { atomic_dec(&ar->tx_cmd_urbs); return 0; } usb_anchor_urb(urb, &ar->tx_anch); err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { usb_unanchor_urb(urb); atomic_dec(&ar->tx_cmd_urbs); } usb_free_urb(urb); return err; } static void carl9170_usb_cmd_complete(struct urb *urb) { struct ar9170 *ar = urb->context; int err = 0; if (WARN_ON_ONCE(!ar)) return; atomic_dec(&ar->tx_cmd_urbs); switch (urb->status) { /* everything is fine */ case 0: break; /* disconnect */ case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: return; default: err = urb->status; break; } if (!IS_INITIALIZED(ar)) return; if (err) dev_err(&ar->udev->dev, "submit cmd cb failed (%d).\n", err); err = carl9170_usb_submit_cmd_urb(ar); if (err) dev_err(&ar->udev->dev, "submit cmd failed (%d).\n", err); } static void carl9170_usb_rx_irq_complete(struct urb *urb) { struct ar9170 *ar = urb->context; if (WARN_ON_ONCE(!ar)) return; switch (urb->status) { /* everything is fine */ case 0: break; /* disconnect */ case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: return; default: goto resubmit; } carl9170_handle_command_response(ar, urb->transfer_buffer, urb->actual_length); resubmit: usb_anchor_urb(urb, &ar->rx_anch); if (unlikely(usb_submit_urb(urb, GFP_ATOMIC))) usb_unanchor_urb(urb); } static int carl9170_usb_submit_rx_urb(struct ar9170 *ar, gfp_t gfp) { struct urb *urb; int err = 0, runs = 0; while ((atomic_read(&ar->rx_anch_urbs) < AR9170_NUM_RX_URBS) && (runs++ < AR9170_NUM_RX_URBS)) { err = -ENOSPC; urb = usb_get_from_anchor(&ar->rx_pool); if (urb) { usb_anchor_urb(urb, &ar->rx_anch); err = usb_submit_urb(urb, gfp); if (unlikely(err)) { usb_unanchor_urb(urb); usb_anchor_urb(urb, &ar->rx_pool); } else { atomic_dec(&ar->rx_pool_urbs); atomic_inc(&ar->rx_anch_urbs); } usb_free_urb(urb); } } return err; } static void carl9170_usb_rx_work(struct ar9170 *ar) { struct urb *urb; int i; for (i = 0; i < AR9170_NUM_RX_URBS_POOL; i++) { urb = usb_get_from_anchor(&ar->rx_work); if (!urb) break; atomic_dec(&ar->rx_work_urbs); if (IS_INITIALIZED(ar)) { carl9170_rx(ar, urb->transfer_buffer, urb->actual_length); } usb_anchor_urb(urb, &ar->rx_pool); atomic_inc(&ar->rx_pool_urbs); usb_free_urb(urb); carl9170_usb_submit_rx_urb(ar, GFP_ATOMIC); } } void carl9170_usb_handle_tx_err(struct ar9170 *ar) { struct urb *urb; while ((urb = usb_get_from_anchor(&ar->tx_err))) { struct sk_buff *skb = (void *)urb->context; carl9170_tx_drop(ar, skb); carl9170_tx_callback(ar, skb); usb_free_urb(urb); } } static void carl9170_usb_tasklet(unsigned long data) { struct ar9170 *ar = (struct ar9170 *) data; if (!IS_INITIALIZED(ar)) return; carl9170_usb_rx_work(ar); /* * Strictly speaking: The tx scheduler is not part of the USB system. * But the rx worker returns frames back to the mac80211-stack and * this is the _perfect_ place to generate the next transmissions. */ if (IS_STARTED(ar)) carl9170_tx_scheduler(ar); } static void carl9170_usb_rx_complete(struct urb *urb) { struct ar9170 *ar = (struct ar9170 *)urb->context; int err; if (WARN_ON_ONCE(!ar)) return; atomic_dec(&ar->rx_anch_urbs); switch (urb->status) { case 0: /* rx path */ usb_anchor_urb(urb, &ar->rx_work); atomic_inc(&ar->rx_work_urbs); break; case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: /* handle disconnect events*/ return; default: /* handle all other errors */ usb_anchor_urb(urb, &ar->rx_pool); atomic_inc(&ar->rx_pool_urbs); break; } err = carl9170_usb_submit_rx_urb(ar, GFP_ATOMIC); if (unlikely(err)) { /* * usb_submit_rx_urb reported a problem. * In case this is due to a rx buffer shortage, * elevate the tasklet worker priority to * the highest available level. */ tasklet_hi_schedule(&ar->usb_tasklet); if (atomic_read(&ar->rx_anch_urbs) == 0) { /* * The system is too slow to cope with * the enormous workload. We have simply * run out of active rx urbs and this * unfortunately leads to an unpredictable * device. */ ieee80211_queue_work(ar->hw, &ar->ping_work); } } else { /* * Using anything less than _high_ priority absolutely * kills the rx performance my UP-System... */ tasklet_hi_schedule(&ar->usb_tasklet); } } static struct urb *carl9170_usb_alloc_rx_urb(struct ar9170 *ar, gfp_t gfp) { struct urb *urb; void *buf; buf = kmalloc(ar->fw.rx_size, gfp); if (!buf) return NULL; urb = usb_alloc_urb(0, gfp); if (!urb) { kfree(buf); return NULL; } usb_fill_bulk_urb(urb, ar->udev, usb_rcvbulkpipe(ar->udev, AR9170_USB_EP_RX), buf, ar->fw.rx_size, carl9170_usb_rx_complete, ar); urb->transfer_flags |= URB_FREE_BUFFER; return urb; } static int carl9170_usb_send_rx_irq_urb(struct ar9170 *ar) { struct urb *urb = NULL; void *ibuf; int err = -ENOMEM; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) goto out; ibuf = kmalloc(AR9170_USB_EP_CTRL_MAX, GFP_KERNEL); if (!ibuf) goto out; usb_fill_int_urb(urb, ar->udev, usb_rcvintpipe(ar->udev, AR9170_USB_EP_IRQ), ibuf, AR9170_USB_EP_CTRL_MAX, carl9170_usb_rx_irq_complete, ar, 1); urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &ar->rx_anch); err = usb_submit_urb(urb, GFP_KERNEL); if (err) usb_unanchor_urb(urb); out: usb_free_urb(urb); return err; } static int carl9170_usb_init_rx_bulk_urbs(struct ar9170 *ar) { struct urb *urb; int i, err = -EINVAL; /* * The driver actively maintains a second shadow * pool for inactive, but fully-prepared rx urbs. * * The pool should help the driver to master huge * workload spikes without running the risk of * undersupplying the hardware or wasting time by * processing rx data (streams) inside the urb * completion (hardirq context). */ for (i = 0; i < AR9170_NUM_RX_URBS_POOL; i++) { urb = carl9170_usb_alloc_rx_urb(ar, GFP_KERNEL); if (!urb) { err = -ENOMEM; goto err_out; } usb_anchor_urb(urb, &ar->rx_pool); atomic_inc(&ar->rx_pool_urbs); usb_free_urb(urb); } err = carl9170_usb_submit_rx_urb(ar, GFP_KERNEL); if (err) goto err_out; /* the device now waiting for the firmware. */ carl9170_set_state_when(ar, CARL9170_STOPPED, CARL9170_IDLE); return 0; err_out: usb_scuttle_anchored_urbs(&ar->rx_pool); usb_scuttle_anchored_urbs(&ar->rx_work); usb_kill_anchored_urbs(&ar->rx_anch); return err; } static int carl9170_usb_flush(struct ar9170 *ar) { struct urb *urb; int ret, err = 0; while ((urb = usb_get_from_anchor(&ar->tx_wait))) { struct sk_buff *skb = (void *)urb->context; carl9170_tx_drop(ar, skb); carl9170_tx_callback(ar, skb); usb_free_urb(urb); } ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, 1000); if (ret == 0) err = -ETIMEDOUT; /* lets wait a while until the tx - queues are dried out */ ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, 1000); if (ret == 0) err = -ETIMEDOUT; usb_kill_anchored_urbs(&ar->tx_anch); carl9170_usb_handle_tx_err(ar); return err; } static void carl9170_usb_cancel_urbs(struct ar9170 *ar) { int err; carl9170_set_state(ar, CARL9170_UNKNOWN_STATE); err = carl9170_usb_flush(ar); if (err) dev_err(&ar->udev->dev, "stuck tx urbs!\n"); usb_poison_anchored_urbs(&ar->tx_anch); carl9170_usb_handle_tx_err(ar); usb_poison_anchored_urbs(&ar->rx_anch); tasklet_kill(&ar->usb_tasklet); usb_scuttle_anchored_urbs(&ar->rx_work); usb_scuttle_anchored_urbs(&ar->rx_pool); usb_scuttle_anchored_urbs(&ar->tx_cmd); } int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd, const bool free_buf) { struct urb *urb; int err = 0; if (!IS_INITIALIZED(ar)) { err = -EPERM; goto err_free; } if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4)) { err = -EINVAL; goto err_free; } urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { err = -ENOMEM; goto err_free; } usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev, AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4, carl9170_usb_cmd_complete, ar, 1); if (free_buf) urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &ar->tx_cmd); usb_free_urb(urb); return carl9170_usb_submit_cmd_urb(ar); err_free: if (free_buf) kfree(cmd); return err; } int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd, unsigned int plen, void *payload, unsigned int outlen, void *out) { int err = -ENOMEM; if (!IS_ACCEPTING_CMD(ar)) return -EIO; if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) might_sleep(); ar->cmd.hdr.len = plen; ar->cmd.hdr.cmd = cmd; /* writing multiple regs fills this buffer already */ if (plen && payload != (u8 *)(ar->cmd.data)) memcpy(ar->cmd.data, payload, plen); spin_lock_bh(&ar->cmd_lock); ar->readbuf = (u8 *)out; ar->readlen = outlen; spin_unlock_bh(&ar->cmd_lock); err = __carl9170_exec_cmd(ar, &ar->cmd, false); if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) { err = wait_for_completion_timeout(&ar->cmd_wait, HZ); if (err == 0) { err = -ETIMEDOUT; goto err_unbuf; } if (ar->readlen != outlen) { err = -EMSGSIZE; goto err_unbuf; } } return 0; err_unbuf: /* Maybe the device was removed in the moment we were waiting? */ if (IS_STARTED(ar)) { dev_err(&ar->udev->dev, "no command feedback " "received (%d).\n", err); /* provide some maybe useful debug information */ print_hex_dump_bytes("carl9170 cmd: ", DUMP_PREFIX_NONE, &ar->cmd, plen + 4); carl9170_restart(ar, CARL9170_RR_COMMAND_TIMEOUT); } /* invalidate to avoid completing the next command prematurely */ spin_lock_bh(&ar->cmd_lock); ar->readbuf = NULL; ar->readlen = 0; spin_unlock_bh(&ar->cmd_lock); return err; } void carl9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb) { struct urb *urb; struct ar9170_stream *tx_stream; void *data; unsigned int len; if (!IS_STARTED(ar)) goto err_drop; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) goto err_drop; if (ar->fw.tx_stream) { tx_stream = (void *) (skb->data - sizeof(*tx_stream)); len = skb->len + sizeof(*tx_stream); tx_stream->length = cpu_to_le16(len); tx_stream->tag = cpu_to_le16(AR9170_TX_STREAM_TAG); data = tx_stream; } else { data = skb->data; len = skb->len; } usb_fill_bulk_urb(urb, ar->udev, usb_sndbulkpipe(ar->udev, AR9170_USB_EP_TX), data, len, carl9170_usb_tx_data_complete, skb); urb->transfer_flags |= URB_ZERO_PACKET; usb_anchor_urb(urb, &ar->tx_wait); usb_free_urb(urb); carl9170_usb_submit_data_urb(ar); return; err_drop: carl9170_tx_drop(ar, skb); carl9170_tx_callback(ar, skb); } static void carl9170_release_firmware(struct ar9170 *ar) { if (ar->fw.fw) { release_firmware(ar->fw.fw); memset(&ar->fw, 0, sizeof(ar->fw)); } } void carl9170_usb_stop(struct ar9170 *ar) { int ret; carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STOPPED); ret = carl9170_usb_flush(ar); if (ret) dev_err(&ar->udev->dev, "kill pending tx urbs.\n"); usb_poison_anchored_urbs(&ar->tx_anch); carl9170_usb_handle_tx_err(ar); /* kill any pending command */ spin_lock_bh(&ar->cmd_lock); ar->readlen = 0; spin_unlock_bh(&ar->cmd_lock); complete_all(&ar->cmd_wait); /* This is required to prevent an early completion on _start */ INIT_COMPLETION(ar->cmd_wait); /* * Note: * So far we freed all tx urbs, but we won't dare to touch any rx urbs. * Else we would end up with a unresponsive device... */ } int carl9170_usb_open(struct ar9170 *ar) { usb_unpoison_anchored_urbs(&ar->tx_anch); carl9170_set_state_when(ar, CARL9170_STOPPED, CARL9170_IDLE); return 0; } static int carl9170_usb_load_firmware(struct ar9170 *ar) { const u8 *data; u8 *buf; unsigned int transfer; size_t len; u32 addr; int err = 0; buf = kmalloc(4096, GFP_KERNEL); if (!buf) { err = -ENOMEM; goto err_out; } data = ar->fw.fw->data; len = ar->fw.fw->size; addr = ar->fw.address; /* this removes the miniboot image */ data += ar->fw.offset; len -= ar->fw.offset; while (len) { transfer = min_t(unsigned int, len, 4096u); memcpy(buf, data, transfer); err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0), 0x30 /* FW DL */, 0x40 | USB_DIR_OUT, addr >> 8, 0, buf, transfer, 100); if (err < 0) { kfree(buf); goto err_out; } len -= transfer; data += transfer; addr += transfer; } kfree(buf); err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0), 0x31 /* FW DL COMPLETE */, 0x40 | USB_DIR_OUT, 0, 0, NULL, 0, 200); if (wait_for_completion_timeout(&ar->fw_boot_wait, HZ) == 0) { err = -ETIMEDOUT; goto err_out; } err = carl9170_echo_test(ar, 0x4a110123); if (err) goto err_out; /* now, start the command response counter */ ar->cmd_seq = -1; return 0; err_out: dev_err(&ar->udev->dev, "firmware upload failed (%d).\n", err); return err; } int carl9170_usb_restart(struct ar9170 *ar) { int err = 0; if (ar->intf->condition != USB_INTERFACE_BOUND) return 0; /* * Disable the command response sequence counter check. * We already know that the device/firmware is in a bad state. * So, no extra points are awarded to anyone who reminds the * driver about that. */ ar->cmd_seq = -2; err = carl9170_reboot(ar); carl9170_usb_stop(ar); if (err) goto err_out; tasklet_schedule(&ar->usb_tasklet); /* The reboot procedure can take quite a while to complete. */ msleep(1100); err = carl9170_usb_open(ar); if (err) goto err_out; err = carl9170_usb_load_firmware(ar); if (err) goto err_out; return 0; err_out: carl9170_usb_cancel_urbs(ar); return err; } void carl9170_usb_reset(struct ar9170 *ar) { /* * This is the last resort to get the device going again * without any *user replugging action*. * * But there is a catch: usb_reset really is like a physical * *reconnect*. The mac80211 state will be lost in the process. * Therefore a userspace application, which is monitoring * the link must step in. */ carl9170_usb_cancel_urbs(ar); carl9170_usb_stop(ar); usb_queue_reset_device(ar->intf); } static int carl9170_usb_init_device(struct ar9170 *ar) { int err; /* * The carl9170 firmware let's the driver know when it's * ready for action. But we have to be prepared to gracefully * handle all spurious [flushed] messages after each (re-)boot. * Thus the command response counter remains disabled until it * can be safely synchronized. */ ar->cmd_seq = -2; err = carl9170_usb_send_rx_irq_urb(ar); if (err) goto err_out; err = carl9170_usb_init_rx_bulk_urbs(ar); if (err) goto err_unrx; err = carl9170_usb_open(ar); if (err) goto err_unrx; mutex_lock(&ar->mutex); err = carl9170_usb_load_firmware(ar); mutex_unlock(&ar->mutex); if (err) goto err_stop; return 0; err_stop: carl9170_usb_stop(ar); err_unrx: carl9170_usb_cancel_urbs(ar); err_out: return err; } static void carl9170_usb_firmware_failed(struct ar9170 *ar) { struct device *parent = ar->udev->dev.parent; struct usb_device *udev; /* * Store a copy of the usb_device pointer locally. * This is because device_release_driver initiates * carl9170_usb_disconnect, which in turn frees our * driver context (ar). */ udev = ar->udev; complete(&ar->fw_load_wait); /* unbind anything failed */ if (parent) device_lock(parent); device_release_driver(&udev->dev); if (parent) device_unlock(parent); usb_put_dev(udev); } static void carl9170_usb_firmware_finish(struct ar9170 *ar) { int err; err = carl9170_parse_firmware(ar); if (err) goto err_freefw; err = carl9170_usb_init_device(ar); if (err) goto err_freefw; err = carl9170_register(ar); carl9170_usb_stop(ar); if (err) goto err_unrx; complete(&ar->fw_load_wait); usb_put_dev(ar->udev); return; err_unrx: carl9170_usb_cancel_urbs(ar); err_freefw: carl9170_release_firmware(ar); carl9170_usb_firmware_failed(ar); } static void carl9170_usb_firmware_step2(const struct firmware *fw, void *context) { struct ar9170 *ar = context; if (fw) { ar->fw.fw = fw; carl9170_usb_firmware_finish(ar); return; } dev_err(&ar->udev->dev, "firmware not found.\n"); carl9170_usb_firmware_failed(ar); } static int carl9170_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct ar9170 *ar; struct usb_device *udev; int err; err = usb_reset_device(interface_to_usbdev(intf)); if (err) return err; ar = carl9170_alloc(sizeof(*ar)); if (IS_ERR(ar)) return PTR_ERR(ar); udev = interface_to_usbdev(intf); usb_get_dev(udev); ar->udev = udev; ar->intf = intf; ar->features = id->driver_info; usb_set_intfdata(intf, ar); SET_IEEE80211_DEV(ar->hw, &intf->dev); init_usb_anchor(&ar->rx_anch); init_usb_anchor(&ar->rx_pool); init_usb_anchor(&ar->rx_work); init_usb_anchor(&ar->tx_wait); init_usb_anchor(&ar->tx_anch); init_usb_anchor(&ar->tx_cmd); init_usb_anchor(&ar->tx_err); init_completion(&ar->cmd_wait); init_completion(&ar->fw_boot_wait); init_completion(&ar->fw_load_wait); tasklet_init(&ar->usb_tasklet, carl9170_usb_tasklet, (unsigned long)ar); atomic_set(&ar->tx_cmd_urbs, 0); atomic_set(&ar->tx_anch_urbs, 0); atomic_set(&ar->rx_work_urbs, 0); atomic_set(&ar->rx_anch_urbs, 0); atomic_set(&ar->rx_pool_urbs, 0); usb_get_dev(ar->udev); carl9170_set_state(ar, CARL9170_STOPPED); return request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME, &ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2); } static void carl9170_usb_disconnect(struct usb_interface *intf) { struct ar9170 *ar = usb_get_intfdata(intf); struct usb_device *udev; if (WARN_ON(!ar)) return; udev = ar->udev; wait_for_completion(&ar->fw_load_wait); if (IS_INITIALIZED(ar)) { carl9170_reboot(ar); carl9170_usb_stop(ar); } carl9170_usb_cancel_urbs(ar); carl9170_unregister(ar); usb_set_intfdata(intf, NULL); carl9170_release_firmware(ar); carl9170_free(ar); usb_put_dev(udev); } #ifdef CONFIG_PM static int carl9170_usb_suspend(struct usb_interface *intf, pm_message_t message) { struct ar9170 *ar = usb_get_intfdata(intf); if (!ar) return -ENODEV; carl9170_usb_cancel_urbs(ar); return 0; } static int carl9170_usb_resume(struct usb_interface *intf) { struct ar9170 *ar = usb_get_intfdata(intf); int err; if (!ar) return -ENODEV; usb_unpoison_anchored_urbs(&ar->rx_anch); carl9170_set_state(ar, CARL9170_STOPPED); /* * The USB documentation demands that [for suspend] all traffic * to and from the device has to stop. This would be fine, but * there's a catch: the device[usb phy] does not come back. * * Upon resume the firmware will "kill" itself and the * boot-code sorts out the magic voodoo. * Not very nice, but there's not much what could go wrong. */ msleep(1100); err = carl9170_usb_init_device(ar); if (err) goto err_unrx; return 0; err_unrx: carl9170_usb_cancel_urbs(ar); return err; } #endif /* CONFIG_PM */ static struct usb_driver carl9170_driver = { .name = KBUILD_MODNAME, .probe = carl9170_usb_probe, .disconnect = carl9170_usb_disconnect, .id_table = carl9170_usb_ids, .soft_unbind = 1, #ifdef CONFIG_PM .suspend = carl9170_usb_suspend, .resume = carl9170_usb_resume, .reset_resume = carl9170_usb_resume, #endif /* CONFIG_PM */ }; module_usb_driver(carl9170_driver);
gpl-2.0
savoca/furnace-g3
arch/sh/boards/mach-se/7722/irq.c
7450
2092
/* * linux/arch/sh/boards/se/7722/irq.c * * Copyright (C) 2007 Nobuhiro Iwamatsu * * Hitachi UL SolutionEngine 7722 Support. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <asm/irq.h> #include <asm/io.h> #include <mach-se/mach/se7722.h> unsigned int se7722_fpga_irq[SE7722_FPGA_IRQ_NR] = { 0, }; static void disable_se7722_irq(struct irq_data *data) { unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data); __raw_writew(__raw_readw(IRQ01_MASK) | 1 << bit, IRQ01_MASK); } static void enable_se7722_irq(struct irq_data *data) { unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data); __raw_writew(__raw_readw(IRQ01_MASK) & ~(1 << bit), IRQ01_MASK); } static struct irq_chip se7722_irq_chip __read_mostly = { .name = "SE7722-FPGA", .irq_mask = disable_se7722_irq, .irq_unmask = enable_se7722_irq, }; static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc) { unsigned short intv = __raw_readw(IRQ01_STS); unsigned int ext_irq = 0; intv &= (1 << SE7722_FPGA_IRQ_NR) - 1; for (; intv; intv >>= 1, ext_irq++) { if (!(intv & 1)) continue; generic_handle_irq(se7722_fpga_irq[ext_irq]); } } /* * Initialize IRQ setting */ void __init init_se7722_IRQ(void) { int i, irq; __raw_writew(0, IRQ01_MASK); /* disable all irqs */ __raw_writew(0x2000, 0xb03fffec); /* mrshpc irq enable */ for (i = 0; i < SE7722_FPGA_IRQ_NR; i++) { irq = create_irq(); if (irq < 0) return; se7722_fpga_irq[i] = irq; irq_set_chip_and_handler_name(se7722_fpga_irq[i], &se7722_irq_chip, handle_level_irq, "level"); irq_set_chip_data(se7722_fpga_irq[i], (void *)i); } irq_set_chained_handler(IRQ0_IRQ, se7722_irq_demux); irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); irq_set_chained_handler(IRQ1_IRQ, se7722_irq_demux); irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); }
gpl-2.0
sakuraba001/android_kernel_samsung_hlte
drivers/video/offb.c
7450
19913
/* * linux/drivers/video/offb.c -- Open Firmware based frame buffer device * * Copyright (C) 1997 Geert Uytterhoeven * * This driver is partly based on the PowerMac console driver: * * Copyright (C) 1996 Paul Mackerras * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/pci.h> #include <asm/io.h> #ifdef CONFIG_PPC64 #include <asm/pci-bridge.h> #endif #ifdef CONFIG_PPC32 #include <asm/bootx.h> #endif #include "macmodes.h" /* Supported palette hacks */ enum { cmap_unknown, cmap_simple, /* ATI Mach64 */ cmap_r128, /* ATI Rage128 */ cmap_M3A, /* ATI Rage Mobility M3 Head A */ cmap_M3B, /* ATI Rage Mobility M3 Head B */ cmap_radeon, /* ATI Radeon */ cmap_gxt2000, /* IBM GXT2000 */ cmap_avivo, /* ATI R5xx */ cmap_qemu, /* qemu vga */ }; struct offb_par { volatile void __iomem *cmap_adr; volatile void __iomem *cmap_data; int cmap_type; int blanked; }; struct offb_par default_par; #ifdef CONFIG_PPC32 extern boot_infos_t *boot_infos; #endif /* Definitions used by the Avivo palette hack */ #define AVIVO_DC_LUT_RW_SELECT 0x6480 #define AVIVO_DC_LUT_RW_MODE 0x6484 #define AVIVO_DC_LUT_RW_INDEX 0x6488 #define AVIVO_DC_LUT_SEQ_COLOR 0x648c #define AVIVO_DC_LUT_PWL_DATA 0x6490 #define AVIVO_DC_LUT_30_COLOR 0x6494 #define AVIVO_DC_LUT_READ_PIPE_SELECT 0x6498 #define AVIVO_DC_LUT_WRITE_EN_MASK 0x649c #define AVIVO_DC_LUT_AUTOFILL 0x64a0 #define AVIVO_DC_LUTA_CONTROL 0x64c0 #define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE 0x64c4 #define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN 0x64c8 #define AVIVO_DC_LUTA_BLACK_OFFSET_RED 0x64cc #define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE 0x64d0 #define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN 0x64d4 #define AVIVO_DC_LUTA_WHITE_OFFSET_RED 0x64d8 #define AVIVO_DC_LUTB_CONTROL 0x6cc0 #define AVIVO_DC_LUTB_BLACK_OFFSET_BLUE 0x6cc4 #define AVIVO_DC_LUTB_BLACK_OFFSET_GREEN 0x6cc8 #define AVIVO_DC_LUTB_BLACK_OFFSET_RED 0x6ccc #define AVIVO_DC_LUTB_WHITE_OFFSET_BLUE 0x6cd0 #define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4 #define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8 /* * Set a single color register. The values supplied are already * rounded down to the hardware's capabilities (according to the * entries in the var structure). Return != 0 for invalid regno. */ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct offb_par *par = (struct offb_par *) info->par; if (info->fix.visual == FB_VISUAL_TRUECOLOR) { u32 *pal = info->pseudo_palette; u32 cr = red >> (16 - info->var.red.length); u32 cg = green >> (16 - info->var.green.length); u32 cb = blue >> (16 - info->var.blue.length); u32 value; if (regno >= 16) return -EINVAL; value = (cr << info->var.red.offset) | (cg << info->var.green.offset) | (cb << info->var.blue.offset); if (info->var.transp.length > 0) { u32 mask = (1 << info->var.transp.length) - 1; mask <<= info->var.transp.offset; value |= mask; } pal[regno] = value; return 0; } if (regno > 255) return -EINVAL; red >>= 8; green >>= 8; blue >>= 8; if (!par->cmap_adr) return 0; switch (par->cmap_type) { case cmap_simple: writeb(regno, par->cmap_adr); writeb(red, par->cmap_data); writeb(green, par->cmap_data); writeb(blue, par->cmap_data); break; case cmap_M3A: /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) & ~0x20); case cmap_r128: /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, regno); out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue)); break; case cmap_M3B: /* Set PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) | 0x20); /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, regno); out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue)); break; case cmap_radeon: /* Set palette index & data (could be smarter) */ out_8(par->cmap_adr + 0xb0, regno); out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue)); break; case cmap_gxt2000: out_le32(((unsigned __iomem *) par->cmap_adr) + regno, (red << 16 | green << 8 | blue)); break; case cmap_avivo: /* Write to both LUTs for now */ writel(1, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT); writeb(regno, par->cmap_adr + AVIVO_DC_LUT_RW_INDEX); writel(((red) << 22) | ((green) << 12) | ((blue) << 2), par->cmap_adr + AVIVO_DC_LUT_30_COLOR); writel(0, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT); writeb(regno, par->cmap_adr + AVIVO_DC_LUT_RW_INDEX); writel(((red) << 22) | ((green) << 12) | ((blue) << 2), par->cmap_adr + AVIVO_DC_LUT_30_COLOR); break; } return 0; } /* * Blank the display. */ static int offb_blank(int blank, struct fb_info *info) { struct offb_par *par = (struct offb_par *) info->par; int i, j; if (!par->cmap_adr) return 0; if (!par->blanked) if (!blank) return 0; par->blanked = blank; if (blank) for (i = 0; i < 256; i++) { switch (par->cmap_type) { case cmap_simple: writeb(i, par->cmap_adr); for (j = 0; j < 3; j++) writeb(0, par->cmap_data); break; case cmap_M3A: /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) & ~0x20); case cmap_r128: /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, i); out_le32(par->cmap_adr + 0xb4, 0); break; case cmap_M3B: /* Set PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) | 0x20); /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, i); out_le32(par->cmap_adr + 0xb4, 0); break; case cmap_radeon: out_8(par->cmap_adr + 0xb0, i); out_le32(par->cmap_adr + 0xb4, 0); break; case cmap_gxt2000: out_le32(((unsigned __iomem *) par->cmap_adr) + i, 0); break; case cmap_avivo: writel(1, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT); writeb(i, par->cmap_adr + AVIVO_DC_LUT_RW_INDEX); writel(0, par->cmap_adr + AVIVO_DC_LUT_30_COLOR); writel(0, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT); writeb(i, par->cmap_adr + AVIVO_DC_LUT_RW_INDEX); writel(0, par->cmap_adr + AVIVO_DC_LUT_30_COLOR); break; } } else fb_set_cmap(&info->cmap, info); return 0; } static int offb_set_par(struct fb_info *info) { struct offb_par *par = (struct offb_par *) info->par; /* On avivo, initialize palette control */ if (par->cmap_type == cmap_avivo) { writel(0, par->cmap_adr + AVIVO_DC_LUTA_CONTROL); writel(0, par->cmap_adr + AVIVO_DC_LUTA_BLACK_OFFSET_BLUE); writel(0, par->cmap_adr + AVIVO_DC_LUTA_BLACK_OFFSET_GREEN); writel(0, par->cmap_adr + AVIVO_DC_LUTA_BLACK_OFFSET_RED); writel(0x0000ffff, par->cmap_adr + AVIVO_DC_LUTA_WHITE_OFFSET_BLUE); writel(0x0000ffff, par->cmap_adr + AVIVO_DC_LUTA_WHITE_OFFSET_GREEN); writel(0x0000ffff, par->cmap_adr + AVIVO_DC_LUTA_WHITE_OFFSET_RED); writel(0, par->cmap_adr + AVIVO_DC_LUTB_CONTROL); writel(0, par->cmap_adr + AVIVO_DC_LUTB_BLACK_OFFSET_BLUE); writel(0, par->cmap_adr + AVIVO_DC_LUTB_BLACK_OFFSET_GREEN); writel(0, par->cmap_adr + AVIVO_DC_LUTB_BLACK_OFFSET_RED); writel(0x0000ffff, par->cmap_adr + AVIVO_DC_LUTB_WHITE_OFFSET_BLUE); writel(0x0000ffff, par->cmap_adr + AVIVO_DC_LUTB_WHITE_OFFSET_GREEN); writel(0x0000ffff, par->cmap_adr + AVIVO_DC_LUTB_WHITE_OFFSET_RED); writel(1, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT); writel(0, par->cmap_adr + AVIVO_DC_LUT_RW_MODE); writel(0x0000003f, par->cmap_adr + AVIVO_DC_LUT_WRITE_EN_MASK); writel(0, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT); writel(0, par->cmap_adr + AVIVO_DC_LUT_RW_MODE); writel(0x0000003f, par->cmap_adr + AVIVO_DC_LUT_WRITE_EN_MASK); } return 0; } static void offb_destroy(struct fb_info *info) { if (info->screen_base) iounmap(info->screen_base); release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); framebuffer_release(info); } static struct fb_ops offb_ops = { .owner = THIS_MODULE, .fb_destroy = offb_destroy, .fb_setcolreg = offb_setcolreg, .fb_set_par = offb_set_par, .fb_blank = offb_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static void __iomem *offb_map_reg(struct device_node *np, int index, unsigned long offset, unsigned long size) { const u32 *addrp; u64 asize, taddr; unsigned int flags; addrp = of_get_pci_address(np, index, &asize, &flags); if (addrp == NULL) addrp = of_get_address(np, index, &asize, &flags); if (addrp == NULL) return NULL; if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0) return NULL; if ((offset + size) > asize) return NULL; taddr = of_translate_address(np, addrp); if (taddr == OF_BAD_ADDR) return NULL; return ioremap(taddr + offset, size); } static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp, const char *name, unsigned long address) { struct offb_par *par = (struct offb_par *) info->par; if (dp && !strncmp(name, "ATY,Rage128", 11)) { par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_r128; } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12) || !strncmp(name, "ATY,RageM3p12A", 14))) { par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_M3A; } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) { par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_M3B; } else if (dp && !strncmp(name, "ATY,Rage6", 9)) { par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_radeon; } else if (!strncmp(name, "ATY,", 4)) { unsigned long base = address & 0xff000000UL; par->cmap_adr = ioremap(base + 0x7ff000, 0x1000) + 0xcc0; par->cmap_data = par->cmap_adr + 1; par->cmap_type = cmap_simple; } else if (dp && (of_device_is_compatible(dp, "pci1014,b7") || of_device_is_compatible(dp, "pci1014,21c"))) { par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); if (par->cmap_adr) par->cmap_type = cmap_gxt2000; } else if (dp && !strncmp(name, "vga,Display-", 12)) { /* Look for AVIVO initialized by SLOF */ struct device_node *pciparent = of_get_parent(dp); const u32 *vid, *did; vid = of_get_property(pciparent, "vendor-id", NULL); did = of_get_property(pciparent, "device-id", NULL); /* This will match most R5xx */ if (vid && did && *vid == 0x1002 && ((*did >= 0x7100 && *did < 0x7800) || (*did >= 0x9400))) { par->cmap_adr = offb_map_reg(pciparent, 2, 0, 0x10000); if (par->cmap_adr) par->cmap_type = cmap_avivo; } of_node_put(pciparent); } else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) { const u32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 }; u64 io_addr = of_translate_address(dp, io_of_addr); if (io_addr != OF_BAD_ADDR) { par->cmap_adr = ioremap(io_addr + 0x3c8, 2); if (par->cmap_adr) { par->cmap_type = cmap_simple; par->cmap_data = par->cmap_adr + 1; } } } info->fix.visual = (par->cmap_type != cmap_unknown) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_STATIC_PSEUDOCOLOR; } static void __init offb_init_fb(const char *name, const char *full_name, int width, int height, int depth, int pitch, unsigned long address, int foreign_endian, struct device_node *dp) { unsigned long res_size = pitch * height; struct offb_par *par = &default_par; unsigned long res_start = address; struct fb_fix_screeninfo *fix; struct fb_var_screeninfo *var; struct fb_info *info; if (!request_mem_region(res_start, res_size, "offb")) return; printk(KERN_INFO "Using unsupported %dx%d %s at %lx, depth=%d, pitch=%d\n", width, height, name, address, depth, pitch); if (depth != 8 && depth != 15 && depth != 16 && depth != 32) { printk(KERN_ERR "%s: can't use depth = %d\n", full_name, depth); release_mem_region(res_start, res_size); return; } info = framebuffer_alloc(sizeof(u32) * 16, NULL); if (info == 0) { release_mem_region(res_start, res_size); return; } fix = &info->fix; var = &info->var; info->par = par; strcpy(fix->id, "OFfb "); strncat(fix->id, name, sizeof(fix->id) - sizeof("OFfb ")); fix->id[sizeof(fix->id) - 1] = '\0'; var->xres = var->xres_virtual = width; var->yres = var->yres_virtual = height; fix->line_length = pitch; fix->smem_start = address; fix->smem_len = pitch * height; fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; par->cmap_type = cmap_unknown; if (depth == 8) offb_init_palette_hacks(info, dp, name, address); else fix->visual = FB_VISUAL_TRUECOLOR; var->xoffset = var->yoffset = 0; switch (depth) { case 8: var->bits_per_pixel = 8; var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case 15: /* RGB 555 */ var->bits_per_pixel = 16; var->red.offset = 10; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case 16: /* RGB 565 */ var->bits_per_pixel = 16; var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case 32: /* RGB 888 */ var->bits_per_pixel = 32; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; } var->red.msb_right = var->green.msb_right = var->blue.msb_right = var->transp.msb_right = 0; var->grayscale = 0; var->nonstd = 0; var->activate = 0; var->height = var->width = -1; var->pixclock = 10000; var->left_margin = var->right_margin = 16; var->upper_margin = var->lower_margin = 16; var->hsync_len = var->vsync_len = 8; var->sync = 0; var->vmode = FB_VMODE_NONINTERLACED; /* set offb aperture size for generic probing */ info->apertures = alloc_apertures(1); if (!info->apertures) goto out_aper; info->apertures->ranges[0].base = address; info->apertures->ranges[0].size = fix->smem_len; info->fbops = &offb_ops; info->screen_base = ioremap(address, fix->smem_len); info->pseudo_palette = (void *) (info + 1); info->flags = FBINFO_DEFAULT | FBINFO_MISC_FIRMWARE | foreign_endian; fb_alloc_cmap(&info->cmap, 256, 0); if (register_framebuffer(info) < 0) goto out_err; printk(KERN_INFO "fb%d: Open Firmware frame buffer device on %s\n", info->node, full_name); return; out_err: iounmap(info->screen_base); out_aper: iounmap(par->cmap_adr); par->cmap_adr = NULL; framebuffer_release(info); release_mem_region(res_start, res_size); } static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) { unsigned int len; int i, width = 640, height = 480, depth = 8, pitch = 640; unsigned int flags, rsize, addr_prop = 0; unsigned long max_size = 0; u64 rstart, address = OF_BAD_ADDR; const u32 *pp, *addrp, *up; u64 asize; int foreign_endian = 0; #ifdef __BIG_ENDIAN if (of_get_property(dp, "little-endian", NULL)) foreign_endian = FBINFO_FOREIGN_ENDIAN; #else if (of_get_property(dp, "big-endian", NULL)) foreign_endian = FBINFO_FOREIGN_ENDIAN; #endif pp = of_get_property(dp, "linux,bootx-depth", &len); if (pp == NULL) pp = of_get_property(dp, "depth", &len); if (pp && len == sizeof(u32)) depth = *pp; pp = of_get_property(dp, "linux,bootx-width", &len); if (pp == NULL) pp = of_get_property(dp, "width", &len); if (pp && len == sizeof(u32)) width = *pp; pp = of_get_property(dp, "linux,bootx-height", &len); if (pp == NULL) pp = of_get_property(dp, "height", &len); if (pp && len == sizeof(u32)) height = *pp; pp = of_get_property(dp, "linux,bootx-linebytes", &len); if (pp == NULL) pp = of_get_property(dp, "linebytes", &len); if (pp && len == sizeof(u32) && (*pp != 0xffffffffu)) pitch = *pp; else pitch = width * ((depth + 7) / 8); rsize = (unsigned long)pitch * (unsigned long)height; /* Ok, now we try to figure out the address of the framebuffer. * * Unfortunately, Open Firmware doesn't provide a standard way to do * so. All we can do is a dodgy heuristic that happens to work in * practice. On most machines, the "address" property contains what * we need, though not on Matrox cards found in IBM machines. What I've * found that appears to give good results is to go through the PCI * ranges and pick one that is both big enough and if possible encloses * the "address" property. If none match, we pick the biggest */ up = of_get_property(dp, "linux,bootx-addr", &len); if (up == NULL) up = of_get_property(dp, "address", &len); if (up && len == sizeof(u32)) addr_prop = *up; /* Hack for when BootX is passing us */ if (no_real_node) goto skip_addr; for (i = 0; (addrp = of_get_address(dp, i, &asize, &flags)) != NULL; i++) { int match_addrp = 0; if (!(flags & IORESOURCE_MEM)) continue; if (asize < rsize) continue; rstart = of_translate_address(dp, addrp); if (rstart == OF_BAD_ADDR) continue; if (addr_prop && (rstart <= addr_prop) && ((rstart + asize) >= (addr_prop + rsize))) match_addrp = 1; if (match_addrp) { address = addr_prop; break; } if (rsize > max_size) { max_size = rsize; address = OF_BAD_ADDR; } if (address == OF_BAD_ADDR) address = rstart; } skip_addr: if (address == OF_BAD_ADDR && addr_prop) address = (u64)addr_prop; if (address != OF_BAD_ADDR) { /* kludge for valkyrie */ if (strcmp(dp->name, "valkyrie") == 0) address += 0x1000; offb_init_fb(no_real_node ? "bootx" : dp->name, no_real_node ? "display" : dp->full_name, width, height, depth, pitch, address, foreign_endian, no_real_node ? NULL : dp); } } static int __init offb_init(void) { struct device_node *dp = NULL, *boot_disp = NULL; if (fb_get_options("offb", NULL)) return -ENODEV; /* Check if we have a MacOS display without a node spec */ if (of_get_property(of_chosen, "linux,bootx-noscreen", NULL) != NULL) { /* The old code tried to work out which node was the MacOS * display based on the address. I'm dropping that since the * lack of a node spec only happens with old BootX versions * (users can update) and with this code, they'll still get * a display (just not the palette hacks). */ offb_init_nodriver(of_chosen, 1); } for (dp = NULL; (dp = of_find_node_by_type(dp, "display"));) { if (of_get_property(dp, "linux,opened", NULL) && of_get_property(dp, "linux,boot-display", NULL)) { boot_disp = dp; offb_init_nodriver(dp, 0); } } for (dp = NULL; (dp = of_find_node_by_type(dp, "display"));) { if (of_get_property(dp, "linux,opened", NULL) && dp != boot_disp) offb_init_nodriver(dp, 0); } return 0; } module_init(offb_init); MODULE_LICENSE("GPL");
gpl-2.0
bsmitty83/KaPoW_vivow
arch/mips/jz4740/clock-debugfs.c
7962
2746
/* * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 SoC clock support debugfs entries * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <asm/mach-jz4740/clock.h> #include "clock.h" static struct dentry *jz4740_clock_debugfs; static int jz4740_clock_debugfs_show_enabled(void *data, uint64_t *value) { struct clk *clk = data; *value = clk_is_enabled(clk); return 0; } static int jz4740_clock_debugfs_set_enabled(void *data, uint64_t value) { struct clk *clk = data; if (value) return clk_enable(clk); else clk_disable(clk); return 0; } DEFINE_SIMPLE_ATTRIBUTE(jz4740_clock_debugfs_ops_enabled, jz4740_clock_debugfs_show_enabled, jz4740_clock_debugfs_set_enabled, "%llu\n"); static int jz4740_clock_debugfs_show_rate(void *data, uint64_t *value) { struct clk *clk = data; *value = clk_get_rate(clk); return 0; } DEFINE_SIMPLE_ATTRIBUTE(jz4740_clock_debugfs_ops_rate, jz4740_clock_debugfs_show_rate, NULL, "%llu\n"); void jz4740_clock_debugfs_add_clk(struct clk *clk) { if (!jz4740_clock_debugfs) return; clk->debugfs_entry = debugfs_create_dir(clk->name, jz4740_clock_debugfs); debugfs_create_file("rate", S_IWUGO | S_IRUGO, clk->debugfs_entry, clk, &jz4740_clock_debugfs_ops_rate); debugfs_create_file("enabled", S_IRUGO, clk->debugfs_entry, clk, &jz4740_clock_debugfs_ops_enabled); if (clk->parent) { char parent_path[100]; snprintf(parent_path, 100, "../%s", clk->parent->name); clk->debugfs_parent_entry = debugfs_create_symlink("parent", clk->debugfs_entry, parent_path); } } /* TODO: Locking */ void jz4740_clock_debugfs_update_parent(struct clk *clk) { if (clk->debugfs_parent_entry) debugfs_remove(clk->debugfs_parent_entry); if (clk->parent) { char parent_path[100]; snprintf(parent_path, 100, "../%s", clk->parent->name); clk->debugfs_parent_entry = debugfs_create_symlink("parent", clk->debugfs_entry, parent_path); } else { clk->debugfs_parent_entry = NULL; } } void jz4740_clock_debugfs_init(void) { jz4740_clock_debugfs = debugfs_create_dir("jz4740-clock", NULL); if (IS_ERR(jz4740_clock_debugfs)) jz4740_clock_debugfs = NULL; }
gpl-2.0
agat63/AGAT_GS3_kernel
virt/kvm/async_pf.c
8218
5284
/* * kvm asynchronous fault support * * Copyright 2010 Red Hat, Inc. * * Author: * Gleb Natapov <gleb@redhat.com> * * This file is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/kvm_host.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/mmu_context.h> #include "async_pf.h" #include <trace/events/kvm.h> static struct kmem_cache *async_pf_cache; int kvm_async_pf_init(void) { async_pf_cache = KMEM_CACHE(kvm_async_pf, 0); if (!async_pf_cache) return -ENOMEM; return 0; } void kvm_async_pf_deinit(void) { if (async_pf_cache) kmem_cache_destroy(async_pf_cache); async_pf_cache = NULL; } void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu) { INIT_LIST_HEAD(&vcpu->async_pf.done); INIT_LIST_HEAD(&vcpu->async_pf.queue); spin_lock_init(&vcpu->async_pf.lock); } static void async_pf_execute(struct work_struct *work) { struct page *page = NULL; struct kvm_async_pf *apf = container_of(work, struct kvm_async_pf, work); struct mm_struct *mm = apf->mm; struct kvm_vcpu *vcpu = apf->vcpu; unsigned long addr = apf->addr; gva_t gva = apf->gva; might_sleep(); use_mm(mm); down_read(&mm->mmap_sem); get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL); up_read(&mm->mmap_sem); unuse_mm(mm); spin_lock(&vcpu->async_pf.lock); list_add_tail(&apf->link, &vcpu->async_pf.done); apf->page = page; apf->done = true; spin_unlock(&vcpu->async_pf.lock); /* * apf may be freed by kvm_check_async_pf_completion() after * this point */ trace_kvm_async_pf_completed(addr, page, gva); if (waitqueue_active(&vcpu->wq)) wake_up_interruptible(&vcpu->wq); mmdrop(mm); kvm_put_kvm(vcpu->kvm); } void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) { /* cancel outstanding work queue item */ while (!list_empty(&vcpu->async_pf.queue)) { struct kvm_async_pf *work = list_entry(vcpu->async_pf.queue.next, typeof(*work), queue); cancel_work_sync(&work->work); list_del(&work->queue); if (!work->done) /* work was canceled */ kmem_cache_free(async_pf_cache, work); } spin_lock(&vcpu->async_pf.lock); while (!list_empty(&vcpu->async_pf.done)) { struct kvm_async_pf *work = list_entry(vcpu->async_pf.done.next, typeof(*work), link); list_del(&work->link); if (work->page) put_page(work->page); kmem_cache_free(async_pf_cache, work); } spin_unlock(&vcpu->async_pf.lock); vcpu->async_pf.queued = 0; } void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) { struct kvm_async_pf *work; while (!list_empty_careful(&vcpu->async_pf.done) && kvm_arch_can_inject_async_page_present(vcpu)) { spin_lock(&vcpu->async_pf.lock); work = list_first_entry(&vcpu->async_pf.done, typeof(*work), link); list_del(&work->link); spin_unlock(&vcpu->async_pf.lock); if (work->page) kvm_arch_async_page_ready(vcpu, work); kvm_arch_async_page_present(vcpu, work); list_del(&work->queue); vcpu->async_pf.queued--; if (work->page) put_page(work->page); kmem_cache_free(async_pf_cache, work); } } int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, struct kvm_arch_async_pf *arch) { struct kvm_async_pf *work; if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) return 0; /* setup delayed work */ /* * do alloc nowait since if we are going to sleep anyway we * may as well sleep faulting in page */ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT); if (!work) return 0; work->page = NULL; work->done = false; work->vcpu = vcpu; work->gva = gva; work->addr = gfn_to_hva(vcpu->kvm, gfn); work->arch = *arch; work->mm = current->mm; atomic_inc(&work->mm->mm_count); kvm_get_kvm(work->vcpu->kvm); /* this can't really happen otherwise gfn_to_pfn_async would succeed */ if (unlikely(kvm_is_error_hva(work->addr))) goto retry_sync; INIT_WORK(&work->work, async_pf_execute); if (!schedule_work(&work->work)) goto retry_sync; list_add_tail(&work->queue, &vcpu->async_pf.queue); vcpu->async_pf.queued++; kvm_arch_async_page_not_present(vcpu, work); return 1; retry_sync: kvm_put_kvm(work->vcpu->kvm); mmdrop(work->mm); kmem_cache_free(async_pf_cache, work); return 0; } int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) { struct kvm_async_pf *work; if (!list_empty_careful(&vcpu->async_pf.done)) return 0; work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC); if (!work) return -ENOMEM; work->page = bad_page; get_page(bad_page); INIT_LIST_HEAD(&work->queue); /* for list_del to work */ spin_lock(&vcpu->async_pf.lock); list_add_tail(&work->link, &vcpu->async_pf.done); spin_unlock(&vcpu->async_pf.lock); vcpu->async_pf.queued++; return 0; }
gpl-2.0
multipath-tcp/mptcp_3.1.x
samples/kprobes/kretprobe_example.c
13850
2997
/* * kretprobe_example.c * * Here's a sample kernel module showing the use of return probes to * report the return value and total time taken for probed function * to run. * * usage: insmod kretprobe_example.ko func=<func_name> * * If no func_name is specified, do_fork is instrumented * * For more information on theory of operation of kretprobes, see * Documentation/kprobes.txt * * Build and insert the kernel module as done in the kprobe example. * You will see the trace data in /var/log/messages and on the console * whenever the probed function returns. (Some messages may be suppressed * if syslogd is configured to eliminate duplicate messages.) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/kprobes.h> #include <linux/ktime.h> #include <linux/limits.h> #include <linux/sched.h> static char func_name[NAME_MAX] = "do_fork"; module_param_string(func, func_name, NAME_MAX, S_IRUGO); MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the" " function's execution time"); /* per-instance private data */ struct my_data { ktime_t entry_stamp; }; /* Here we use the entry_hanlder to timestamp function entry */ static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs) { struct my_data *data; if (!current->mm) return 1; /* Skip kernel threads */ data = (struct my_data *)ri->data; data->entry_stamp = ktime_get(); return 0; } /* * Return-probe handler: Log the return value and duration. Duration may turn * out to be zero consistently, depending upon the granularity of time * accounting on the platform. */ static int ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs) { int retval = regs_return_value(regs); struct my_data *data = (struct my_data *)ri->data; s64 delta; ktime_t now; now = ktime_get(); delta = ktime_to_ns(ktime_sub(now, data->entry_stamp)); printk(KERN_INFO "%s returned %d and took %lld ns to execute\n", func_name, retval, (long long)delta); return 0; } static struct kretprobe my_kretprobe = { .handler = ret_handler, .entry_handler = entry_handler, .data_size = sizeof(struct my_data), /* Probe up to 20 instances concurrently. */ .maxactive = 20, }; static int __init kretprobe_init(void) { int ret; my_kretprobe.kp.symbol_name = func_name; ret = register_kretprobe(&my_kretprobe); if (ret < 0) { printk(KERN_INFO "register_kretprobe failed, returned %d\n", ret); return -1; } printk(KERN_INFO "Planted return probe at %s: %p\n", my_kretprobe.kp.symbol_name, my_kretprobe.kp.addr); return 0; } static void __exit kretprobe_exit(void) { unregister_kretprobe(&my_kretprobe); printk(KERN_INFO "kretprobe at %p unregistered\n", my_kretprobe.kp.addr); /* nmissed > 0 suggests that maxactive was set too low. */ printk(KERN_INFO "Missed probing %d instances of %s\n", my_kretprobe.nmissed, my_kretprobe.kp.symbol_name); } module_init(kretprobe_init) module_exit(kretprobe_exit) MODULE_LICENSE("GPL");
gpl-2.0
aatjitra/PR25
samples/kprobes/kretprobe_example.c
13850
2997
/* * kretprobe_example.c * * Here's a sample kernel module showing the use of return probes to * report the return value and total time taken for probed function * to run. * * usage: insmod kretprobe_example.ko func=<func_name> * * If no func_name is specified, do_fork is instrumented * * For more information on theory of operation of kretprobes, see * Documentation/kprobes.txt * * Build and insert the kernel module as done in the kprobe example. * You will see the trace data in /var/log/messages and on the console * whenever the probed function returns. (Some messages may be suppressed * if syslogd is configured to eliminate duplicate messages.) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/kprobes.h> #include <linux/ktime.h> #include <linux/limits.h> #include <linux/sched.h> static char func_name[NAME_MAX] = "do_fork"; module_param_string(func, func_name, NAME_MAX, S_IRUGO); MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the" " function's execution time"); /* per-instance private data */ struct my_data { ktime_t entry_stamp; }; /* Here we use the entry_hanlder to timestamp function entry */ static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs) { struct my_data *data; if (!current->mm) return 1; /* Skip kernel threads */ data = (struct my_data *)ri->data; data->entry_stamp = ktime_get(); return 0; } /* * Return-probe handler: Log the return value and duration. Duration may turn * out to be zero consistently, depending upon the granularity of time * accounting on the platform. */ static int ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs) { int retval = regs_return_value(regs); struct my_data *data = (struct my_data *)ri->data; s64 delta; ktime_t now; now = ktime_get(); delta = ktime_to_ns(ktime_sub(now, data->entry_stamp)); printk(KERN_INFO "%s returned %d and took %lld ns to execute\n", func_name, retval, (long long)delta); return 0; } static struct kretprobe my_kretprobe = { .handler = ret_handler, .entry_handler = entry_handler, .data_size = sizeof(struct my_data), /* Probe up to 20 instances concurrently. */ .maxactive = 20, }; static int __init kretprobe_init(void) { int ret; my_kretprobe.kp.symbol_name = func_name; ret = register_kretprobe(&my_kretprobe); if (ret < 0) { printk(KERN_INFO "register_kretprobe failed, returned %d\n", ret); return -1; } printk(KERN_INFO "Planted return probe at %s: %p\n", my_kretprobe.kp.symbol_name, my_kretprobe.kp.addr); return 0; } static void __exit kretprobe_exit(void) { unregister_kretprobe(&my_kretprobe); printk(KERN_INFO "kretprobe at %p unregistered\n", my_kretprobe.kp.addr); /* nmissed > 0 suggests that maxactive was set too low. */ printk(KERN_INFO "Missed probing %d instances of %s\n", my_kretprobe.nmissed, my_kretprobe.kp.symbol_name); } module_init(kretprobe_init) module_exit(kretprobe_exit) MODULE_LICENSE("GPL");
gpl-2.0
azureplus/linux
drivers/target/iscsi/iscsi_target_erl0.c
27
29408
/****************************************************************************** * This file contains error recovery level zero functions used by * the iSCSI Target driver. * * (c) Copyright 2007-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. ******************************************************************************/ #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> #include <target/iscsi/iscsi_target_core.h> #include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_tq.h" #include "iscsi_target_erl0.h" #include "iscsi_target_erl1.h" #include "iscsi_target_erl2.h" #include "iscsi_target_util.h" #include "iscsi_target.h" /* * Used to set values in struct iscsi_cmd that iscsit_dataout_check_sequence() * checks against to determine a PDU's Offset+Length is within the current * DataOUT Sequence. Used for DataSequenceInOrder=Yes only. */ void iscsit_set_dataout_sequence_values( struct iscsi_cmd *cmd) { struct iscsi_conn *conn = cmd->conn; /* * Still set seq_start_offset and seq_end_offset for Unsolicited * DataOUT, even if DataSequenceInOrder=No. */ if (cmd->unsolicited_data) { cmd->seq_start_offset = cmd->write_data_done; cmd->seq_end_offset = (cmd->write_data_done + ((cmd->se_cmd.data_length > conn->sess->sess_ops->FirstBurstLength) ? conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length)); return; } if (!conn->sess->sess_ops->DataSequenceInOrder) return; if (!cmd->seq_start_offset && !cmd->seq_end_offset) { cmd->seq_start_offset = cmd->write_data_done; cmd->seq_end_offset = (cmd->se_cmd.data_length > conn->sess->sess_ops->MaxBurstLength) ? (cmd->write_data_done + conn->sess->sess_ops->MaxBurstLength) : cmd->se_cmd.data_length; } else { cmd->seq_start_offset = cmd->seq_end_offset; cmd->seq_end_offset = ((cmd->seq_end_offset + conn->sess->sess_ops->MaxBurstLength) >= cmd->se_cmd.data_length) ? cmd->se_cmd.data_length : (cmd->seq_end_offset + conn->sess->sess_ops->MaxBurstLength); } } static int iscsit_dataout_within_command_recovery_check( struct iscsi_cmd *cmd, unsigned char *buf) { struct iscsi_conn *conn = cmd->conn; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); /* * We do the within-command recovery checks here as it is * the first function called in iscsi_check_pre_dataout(). * Basically, if we are in within-command recovery and * the PDU does not contain the offset the sequence needs, * dump the payload. * * This only applies to DataPDUInOrder=Yes, for * DataPDUInOrder=No we only re-request the failed PDU * and check that all PDUs in a sequence are received * upon end of sequence. */ if (conn->sess->sess_ops->DataSequenceInOrder) { if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) && cmd->write_data_done != be32_to_cpu(hdr->offset)) goto dump; cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY; } else { struct iscsi_seq *seq; seq = iscsit_get_seq_holder(cmd, be32_to_cpu(hdr->offset), payload_length); if (!seq) return DATAOUT_CANNOT_RECOVER; /* * Set the struct iscsi_seq pointer to reuse later. */ cmd->seq_ptr = seq; if (conn->sess->sess_ops->DataPDUInOrder) { if (seq->status == DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY && (seq->offset != be32_to_cpu(hdr->offset) || seq->data_sn != be32_to_cpu(hdr->datasn))) goto dump; } else { if (seq->status == DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY && seq->data_sn != be32_to_cpu(hdr->datasn)) goto dump; } if (seq->status == DATAOUT_SEQUENCE_COMPLETE) goto dump; if (seq->status != DATAOUT_SEQUENCE_COMPLETE) seq->status = 0; } return DATAOUT_NORMAL; dump: pr_err("Dumping DataOUT PDU Offset: %u Length: %d DataSN:" " 0x%08x\n", hdr->offset, payload_length, hdr->datasn); return iscsit_dump_data_payload(conn, payload_length, 1); } static int iscsit_dataout_check_unsolicited_sequence( struct iscsi_cmd *cmd, unsigned char *buf) { u32 first_burst_len; struct iscsi_conn *conn = cmd->conn; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); if ((be32_to_cpu(hdr->offset) < cmd->seq_start_offset) || ((be32_to_cpu(hdr->offset) + payload_length) > cmd->seq_end_offset)) { pr_err("Command ITT: 0x%08x with Offset: %u," " Length: %u outside of Unsolicited Sequence %u:%u while" " DataSequenceInOrder=Yes.\n", cmd->init_task_tag, be32_to_cpu(hdr->offset), payload_length, cmd->seq_start_offset, cmd->seq_end_offset); return DATAOUT_CANNOT_RECOVER; } first_burst_len = (cmd->first_burst_len + payload_length); if (first_burst_len > conn->sess->sess_ops->FirstBurstLength) { pr_err("Total %u bytes exceeds FirstBurstLength: %u" " for this Unsolicited DataOut Burst.\n", first_burst_len, conn->sess->sess_ops->FirstBurstLength); transport_send_check_condition_and_sense(&cmd->se_cmd, TCM_INCORRECT_AMOUNT_OF_DATA, 0); return DATAOUT_CANNOT_RECOVER; } /* * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity * checks for the current Unsolicited DataOUT Sequence. */ if (hdr->flags & ISCSI_FLAG_CMD_FINAL) { /* * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of * sequence checks are handled in * iscsit_dataout_datapduinorder_no_fbit(). */ if (!conn->sess->sess_ops->DataPDUInOrder) goto out; if ((first_burst_len != cmd->se_cmd.data_length) && (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) { pr_err("Unsolicited non-immediate data" " received %u does not equal FirstBurstLength: %u, and" " does not equal ExpXferLen %u.\n", first_burst_len, conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length); transport_send_check_condition_and_sense(&cmd->se_cmd, TCM_INCORRECT_AMOUNT_OF_DATA, 0); return DATAOUT_CANNOT_RECOVER; } } else { if (first_burst_len == conn->sess->sess_ops->FirstBurstLength) { pr_err("Command ITT: 0x%08x reached" " FirstBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol" " error.\n", cmd->init_task_tag, conn->sess->sess_ops->FirstBurstLength); return DATAOUT_CANNOT_RECOVER; } if (first_burst_len == cmd->se_cmd.data_length) { pr_err("Command ITT: 0x%08x reached" " ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol" " error.\n", cmd->init_task_tag, cmd->se_cmd.data_length); return DATAOUT_CANNOT_RECOVER; } } out: return DATAOUT_NORMAL; } static int iscsit_dataout_check_sequence( struct iscsi_cmd *cmd, unsigned char *buf) { u32 next_burst_len; struct iscsi_conn *conn = cmd->conn; struct iscsi_seq *seq = NULL; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); /* * For DataSequenceInOrder=Yes: Check that the offset and offset+length * is within range as defined by iscsi_set_dataout_sequence_values(). * * For DataSequenceInOrder=No: Check that an struct iscsi_seq exists for * offset+length tuple. */ if (conn->sess->sess_ops->DataSequenceInOrder) { /* * Due to possibility of recovery DataOUT sent by the initiator * fullfilling an Recovery R2T, it's best to just dump the * payload here, instead of erroring out. */ if ((be32_to_cpu(hdr->offset) < cmd->seq_start_offset) || ((be32_to_cpu(hdr->offset) + payload_length) > cmd->seq_end_offset)) { pr_err("Command ITT: 0x%08x with Offset: %u," " Length: %u outside of Sequence %u:%u while" " DataSequenceInOrder=Yes.\n", cmd->init_task_tag, be32_to_cpu(hdr->offset), payload_length, cmd->seq_start_offset, cmd->seq_end_offset); if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) return DATAOUT_CANNOT_RECOVER; return DATAOUT_WITHIN_COMMAND_RECOVERY; } next_burst_len = (cmd->next_burst_len + payload_length); } else { seq = iscsit_get_seq_holder(cmd, be32_to_cpu(hdr->offset), payload_length); if (!seq) return DATAOUT_CANNOT_RECOVER; /* * Set the struct iscsi_seq pointer to reuse later. */ cmd->seq_ptr = seq; if (seq->status == DATAOUT_SEQUENCE_COMPLETE) { if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) return DATAOUT_CANNOT_RECOVER; return DATAOUT_WITHIN_COMMAND_RECOVERY; } next_burst_len = (seq->next_burst_len + payload_length); } if (next_burst_len > conn->sess->sess_ops->MaxBurstLength) { pr_err("Command ITT: 0x%08x, NextBurstLength: %u and" " Length: %u exceeds MaxBurstLength: %u. protocol" " error.\n", cmd->init_task_tag, (next_burst_len - payload_length), payload_length, conn->sess->sess_ops->MaxBurstLength); return DATAOUT_CANNOT_RECOVER; } /* * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity * checks for the current DataOUT Sequence. */ if (hdr->flags & ISCSI_FLAG_CMD_FINAL) { /* * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of * sequence checks are handled in * iscsit_dataout_datapduinorder_no_fbit(). */ if (!conn->sess->sess_ops->DataPDUInOrder) goto out; if (conn->sess->sess_ops->DataSequenceInOrder) { if ((next_burst_len < conn->sess->sess_ops->MaxBurstLength) && ((cmd->write_data_done + payload_length) < cmd->se_cmd.data_length)) { pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL" " before end of DataOUT sequence, protocol" " error.\n", cmd->init_task_tag); return DATAOUT_CANNOT_RECOVER; } } else { if (next_burst_len < seq->xfer_len) { pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL" " before end of DataOUT sequence, protocol" " error.\n", cmd->init_task_tag); return DATAOUT_CANNOT_RECOVER; } } } else { if (conn->sess->sess_ops->DataSequenceInOrder) { if (next_burst_len == conn->sess->sess_ops->MaxBurstLength) { pr_err("Command ITT: 0x%08x reached" " MaxBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is" " not set, protocol error.", cmd->init_task_tag, conn->sess->sess_ops->MaxBurstLength); return DATAOUT_CANNOT_RECOVER; } if ((cmd->write_data_done + payload_length) == cmd->se_cmd.data_length) { pr_err("Command ITT: 0x%08x reached" " last DataOUT PDU in sequence but ISCSI_FLAG_" "CMD_FINAL is not set, protocol error.\n", cmd->init_task_tag); return DATAOUT_CANNOT_RECOVER; } } else { if (next_burst_len == seq->xfer_len) { pr_err("Command ITT: 0x%08x reached" " last DataOUT PDU in sequence but ISCSI_FLAG_" "CMD_FINAL is not set, protocol error.\n", cmd->init_task_tag); return DATAOUT_CANNOT_RECOVER; } } } out: return DATAOUT_NORMAL; } static int iscsit_dataout_check_datasn( struct iscsi_cmd *cmd, unsigned char *buf) { u32 data_sn = 0; struct iscsi_conn *conn = cmd->conn; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); /* * Considering the target has no method of re-requesting DataOUT * by DataSN, if we receieve a greater DataSN than expected we * assume the functions for DataPDUInOrder=[Yes,No] below will * handle it. * * If the DataSN is less than expected, dump the payload. */ if (conn->sess->sess_ops->DataSequenceInOrder) data_sn = cmd->data_sn; else { struct iscsi_seq *seq = cmd->seq_ptr; data_sn = seq->data_sn; } if (be32_to_cpu(hdr->datasn) > data_sn) { pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" " higher than expected 0x%08x.\n", cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn); goto recover; } else if (be32_to_cpu(hdr->datasn) < data_sn) { pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" " lower than expected 0x%08x, discarding payload.\n", cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn); goto dump; } return DATAOUT_NORMAL; recover: if (!conn->sess->sess_ops->ErrorRecoveryLevel) { pr_err("Unable to perform within-command recovery" " while ERL=0.\n"); return DATAOUT_CANNOT_RECOVER; } dump: if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) return DATAOUT_CANNOT_RECOVER; return DATAOUT_WITHIN_COMMAND_RECOVERY; } static int iscsit_dataout_pre_datapduinorder_yes( struct iscsi_cmd *cmd, unsigned char *buf) { int dump = 0, recovery = 0; struct iscsi_conn *conn = cmd->conn; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); /* * For DataSequenceInOrder=Yes: If the offset is greater than the global * DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has * occurred and fail the connection. * * For DataSequenceInOrder=No: If the offset is greater than the per * sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol * error has occurred and fail the connection. */ if (conn->sess->sess_ops->DataSequenceInOrder) { if (be32_to_cpu(hdr->offset) != cmd->write_data_done) { pr_err("Command ITT: 0x%08x, received offset" " %u different than expected %u.\n", cmd->init_task_tag, be32_to_cpu(hdr->offset), cmd->write_data_done); recovery = 1; goto recover; } } else { struct iscsi_seq *seq = cmd->seq_ptr; if (be32_to_cpu(hdr->offset) > seq->offset) { pr_err("Command ITT: 0x%08x, received offset" " %u greater than expected %u.\n", cmd->init_task_tag, be32_to_cpu(hdr->offset), seq->offset); recovery = 1; goto recover; } else if (be32_to_cpu(hdr->offset) < seq->offset) { pr_err("Command ITT: 0x%08x, received offset" " %u less than expected %u, discarding payload.\n", cmd->init_task_tag, be32_to_cpu(hdr->offset), seq->offset); dump = 1; goto dump; } } return DATAOUT_NORMAL; recover: if (!conn->sess->sess_ops->ErrorRecoveryLevel) { pr_err("Unable to perform within-command recovery" " while ERL=0.\n"); return DATAOUT_CANNOT_RECOVER; } dump: if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) return DATAOUT_CANNOT_RECOVER; return (recovery) ? iscsit_recover_dataout_sequence(cmd, be32_to_cpu(hdr->offset), payload_length) : (dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL; } static int iscsit_dataout_pre_datapduinorder_no( struct iscsi_cmd *cmd, unsigned char *buf) { struct iscsi_pdu *pdu; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); pdu = iscsit_get_pdu_holder(cmd, be32_to_cpu(hdr->offset), payload_length); if (!pdu) return DATAOUT_CANNOT_RECOVER; cmd->pdu_ptr = pdu; switch (pdu->status) { case ISCSI_PDU_NOT_RECEIVED: case ISCSI_PDU_CRC_FAILED: case ISCSI_PDU_TIMED_OUT: break; case ISCSI_PDU_RECEIVED_OK: pr_err("Command ITT: 0x%08x received already gotten" " Offset: %u, Length: %u\n", cmd->init_task_tag, be32_to_cpu(hdr->offset), payload_length); return iscsit_dump_data_payload(cmd->conn, payload_length, 1); default: return DATAOUT_CANNOT_RECOVER; } return DATAOUT_NORMAL; } static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 length) { struct iscsi_r2t *r2t; if (cmd->unsolicited_data) return 0; r2t = iscsit_get_r2t_for_eos(cmd, offset, length); if (!r2t) return -1; spin_lock_bh(&cmd->r2t_lock); r2t->seq_complete = 1; cmd->outstanding_r2ts--; spin_unlock_bh(&cmd->r2t_lock); return 0; } static int iscsit_dataout_update_datapduinorder_no( struct iscsi_cmd *cmd, u32 data_sn, int f_bit) { int ret = 0; struct iscsi_pdu *pdu = cmd->pdu_ptr; pdu->data_sn = data_sn; switch (pdu->status) { case ISCSI_PDU_NOT_RECEIVED: pdu->status = ISCSI_PDU_RECEIVED_OK; break; case ISCSI_PDU_CRC_FAILED: pdu->status = ISCSI_PDU_RECEIVED_OK; break; case ISCSI_PDU_TIMED_OUT: pdu->status = ISCSI_PDU_RECEIVED_OK; break; default: return DATAOUT_CANNOT_RECOVER; } if (f_bit) { ret = iscsit_dataout_datapduinorder_no_fbit(cmd, pdu); if (ret == DATAOUT_CANNOT_RECOVER) return ret; } return DATAOUT_NORMAL; } static int iscsit_dataout_post_crc_passed( struct iscsi_cmd *cmd, unsigned char *buf) { int ret, send_r2t = 0; struct iscsi_conn *conn = cmd->conn; struct iscsi_seq *seq = NULL; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); if (cmd->unsolicited_data) { if ((cmd->first_burst_len + payload_length) == conn->sess->sess_ops->FirstBurstLength) { if (iscsit_dataout_update_r2t(cmd, be32_to_cpu(hdr->offset), payload_length) < 0) return DATAOUT_CANNOT_RECOVER; send_r2t = 1; } if (!conn->sess->sess_ops->DataPDUInOrder) { ret = iscsit_dataout_update_datapduinorder_no(cmd, be32_to_cpu(hdr->datasn), (hdr->flags & ISCSI_FLAG_CMD_FINAL)); if (ret == DATAOUT_CANNOT_RECOVER) return ret; } cmd->first_burst_len += payload_length; if (conn->sess->sess_ops->DataSequenceInOrder) cmd->data_sn++; else { seq = cmd->seq_ptr; seq->data_sn++; seq->offset += payload_length; } if (send_r2t) { if (seq) seq->status = DATAOUT_SEQUENCE_COMPLETE; cmd->first_burst_len = 0; cmd->unsolicited_data = 0; } } else { if (conn->sess->sess_ops->DataSequenceInOrder) { if ((cmd->next_burst_len + payload_length) == conn->sess->sess_ops->MaxBurstLength) { if (iscsit_dataout_update_r2t(cmd, be32_to_cpu(hdr->offset), payload_length) < 0) return DATAOUT_CANNOT_RECOVER; send_r2t = 1; } if (!conn->sess->sess_ops->DataPDUInOrder) { ret = iscsit_dataout_update_datapduinorder_no( cmd, be32_to_cpu(hdr->datasn), (hdr->flags & ISCSI_FLAG_CMD_FINAL)); if (ret == DATAOUT_CANNOT_RECOVER) return ret; } cmd->next_burst_len += payload_length; cmd->data_sn++; if (send_r2t) cmd->next_burst_len = 0; } else { seq = cmd->seq_ptr; if ((seq->next_burst_len + payload_length) == seq->xfer_len) { if (iscsit_dataout_update_r2t(cmd, be32_to_cpu(hdr->offset), payload_length) < 0) return DATAOUT_CANNOT_RECOVER; send_r2t = 1; } if (!conn->sess->sess_ops->DataPDUInOrder) { ret = iscsit_dataout_update_datapduinorder_no( cmd, be32_to_cpu(hdr->datasn), (hdr->flags & ISCSI_FLAG_CMD_FINAL)); if (ret == DATAOUT_CANNOT_RECOVER) return ret; } seq->data_sn++; seq->offset += payload_length; seq->next_burst_len += payload_length; if (send_r2t) { seq->next_burst_len = 0; seq->status = DATAOUT_SEQUENCE_COMPLETE; } } } if (send_r2t && conn->sess->sess_ops->DataSequenceInOrder) cmd->data_sn = 0; cmd->write_data_done += payload_length; if (cmd->write_data_done == cmd->se_cmd.data_length) return DATAOUT_SEND_TO_TRANSPORT; else if (send_r2t) return DATAOUT_SEND_R2T; else return DATAOUT_NORMAL; } static int iscsit_dataout_post_crc_failed( struct iscsi_cmd *cmd, unsigned char *buf) { struct iscsi_conn *conn = cmd->conn; struct iscsi_pdu *pdu; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); if (conn->sess->sess_ops->DataPDUInOrder) goto recover; /* * The rest of this function is only called when DataPDUInOrder=No. */ pdu = cmd->pdu_ptr; switch (pdu->status) { case ISCSI_PDU_NOT_RECEIVED: pdu->status = ISCSI_PDU_CRC_FAILED; break; case ISCSI_PDU_CRC_FAILED: break; case ISCSI_PDU_TIMED_OUT: pdu->status = ISCSI_PDU_CRC_FAILED; break; default: return DATAOUT_CANNOT_RECOVER; } recover: return iscsit_recover_dataout_sequence(cmd, be32_to_cpu(hdr->offset), payload_length); } /* * Called from iscsit_handle_data_out() before DataOUT Payload is received * and CRC computed. */ int iscsit_check_pre_dataout( struct iscsi_cmd *cmd, unsigned char *buf) { int ret; struct iscsi_conn *conn = cmd->conn; ret = iscsit_dataout_within_command_recovery_check(cmd, buf); if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || (ret == DATAOUT_CANNOT_RECOVER)) return ret; ret = iscsit_dataout_check_datasn(cmd, buf); if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || (ret == DATAOUT_CANNOT_RECOVER)) return ret; if (cmd->unsolicited_data) { ret = iscsit_dataout_check_unsolicited_sequence(cmd, buf); if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || (ret == DATAOUT_CANNOT_RECOVER)) return ret; } else { ret = iscsit_dataout_check_sequence(cmd, buf); if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || (ret == DATAOUT_CANNOT_RECOVER)) return ret; } return (conn->sess->sess_ops->DataPDUInOrder) ? iscsit_dataout_pre_datapduinorder_yes(cmd, buf) : iscsit_dataout_pre_datapduinorder_no(cmd, buf); } /* * Called from iscsit_handle_data_out() after DataOUT Payload is received * and CRC computed. */ int iscsit_check_post_dataout( struct iscsi_cmd *cmd, unsigned char *buf, u8 data_crc_failed) { struct iscsi_conn *conn = cmd->conn; cmd->dataout_timeout_retries = 0; if (!data_crc_failed) return iscsit_dataout_post_crc_passed(cmd, buf); else { if (!conn->sess->sess_ops->ErrorRecoveryLevel) { pr_err("Unable to recover from DataOUT CRC" " failure while ERL=0, closing session.\n"); iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, buf); return DATAOUT_CANNOT_RECOVER; } iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, buf); return iscsit_dataout_post_crc_failed(cmd, buf); } } static void iscsit_handle_time2retain_timeout(unsigned long data) { struct iscsi_session *sess = (struct iscsi_session *) data; struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; spin_lock_bh(&se_tpg->session_lock); if (sess->time2retain_timer_flags & ISCSI_TF_STOP) { spin_unlock_bh(&se_tpg->session_lock); return; } if (atomic_read(&sess->session_reinstatement)) { pr_err("Exiting Time2Retain handler because" " session_reinstatement=1\n"); spin_unlock_bh(&se_tpg->session_lock); return; } sess->time2retain_timer_flags |= ISCSI_TF_EXPIRED; pr_err("Time2Retain timer expired for SID: %u, cleaning up" " iSCSI session.\n", sess->sid); { struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; if (tiqn) { spin_lock(&tiqn->sess_err_stats.lock); strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name, (void *)sess->sess_ops->InitiatorName); tiqn->sess_err_stats.last_sess_failure_type = ISCSI_SESS_ERR_CXN_TIMEOUT; tiqn->sess_err_stats.cxn_timeout_errors++; atomic_long_inc(&sess->conn_timeout_errors); spin_unlock(&tiqn->sess_err_stats.lock); } } spin_unlock_bh(&se_tpg->session_lock); target_put_session(sess->se_sess); } void iscsit_start_time2retain_handler(struct iscsi_session *sess) { int tpg_active; /* * Only start Time2Retain timer when the associated TPG is still in * an ACTIVE (eg: not disabled or shutdown) state. */ spin_lock(&sess->tpg->tpg_state_lock); tpg_active = (sess->tpg->tpg_state == TPG_STATE_ACTIVE); spin_unlock(&sess->tpg->tpg_state_lock); if (!tpg_active) return; if (sess->time2retain_timer_flags & ISCSI_TF_RUNNING) return; pr_debug("Starting Time2Retain timer for %u seconds on" " SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid); init_timer(&sess->time2retain_timer); sess->time2retain_timer.expires = (get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ); sess->time2retain_timer.data = (unsigned long)sess; sess->time2retain_timer.function = iscsit_handle_time2retain_timeout; sess->time2retain_timer_flags &= ~ISCSI_TF_STOP; sess->time2retain_timer_flags |= ISCSI_TF_RUNNING; add_timer(&sess->time2retain_timer); } /* * Called with spin_lock_bh(&struct se_portal_group->session_lock) held */ int iscsit_stop_time2retain_timer(struct iscsi_session *sess) { struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED) return -1; if (!(sess->time2retain_timer_flags & ISCSI_TF_RUNNING)) return 0; sess->time2retain_timer_flags |= ISCSI_TF_STOP; spin_unlock(&se_tpg->session_lock); del_timer_sync(&sess->time2retain_timer); spin_lock(&se_tpg->session_lock); sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING; pr_debug("Stopped Time2Retain Timer for SID: %u\n", sess->sid); return 0; } void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn) { spin_lock_bh(&conn->state_lock); if (atomic_read(&conn->connection_exit)) { spin_unlock_bh(&conn->state_lock); goto sleep; } if (atomic_read(&conn->transport_failed)) { spin_unlock_bh(&conn->state_lock); goto sleep; } spin_unlock_bh(&conn->state_lock); if (conn->tx_thread && conn->tx_thread_active) send_sig(SIGINT, conn->tx_thread, 1); if (conn->rx_thread && conn->rx_thread_active) send_sig(SIGINT, conn->rx_thread, 1); sleep: wait_for_completion(&conn->conn_wait_rcfr_comp); complete(&conn->conn_post_wait_comp); } void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep) { spin_lock_bh(&conn->state_lock); if (atomic_read(&conn->connection_exit)) { spin_unlock_bh(&conn->state_lock); return; } if (atomic_read(&conn->transport_failed)) { spin_unlock_bh(&conn->state_lock); return; } if (atomic_read(&conn->connection_reinstatement)) { spin_unlock_bh(&conn->state_lock); return; } if (conn->tx_thread && conn->tx_thread_active) send_sig(SIGINT, conn->tx_thread, 1); if (conn->rx_thread && conn->rx_thread_active) send_sig(SIGINT, conn->rx_thread, 1); atomic_set(&conn->connection_reinstatement, 1); if (!sleep) { spin_unlock_bh(&conn->state_lock); return; } atomic_set(&conn->sleep_on_conn_wait_comp, 1); spin_unlock_bh(&conn->state_lock); wait_for_completion(&conn->conn_wait_comp); complete(&conn->conn_post_wait_comp); } EXPORT_SYMBOL(iscsit_cause_connection_reinstatement); void iscsit_fall_back_to_erl0(struct iscsi_session *sess) { pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:" " %u\n", sess->sid); atomic_set(&sess->session_fall_back_to_erl0, 1); } static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn) { struct iscsi_session *sess = conn->sess; if ((sess->sess_ops->ErrorRecoveryLevel == 2) && !atomic_read(&sess->session_reinstatement) && !atomic_read(&sess->session_fall_back_to_erl0)) iscsit_connection_recovery_transport_reset(conn); else { pr_debug("Performing cleanup for failed iSCSI" " Connection ID: %hu from %s\n", conn->cid, sess->sess_ops->InitiatorName); iscsit_close_connection(conn); } } void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) { spin_lock_bh(&conn->state_lock); if (atomic_read(&conn->connection_exit)) { spin_unlock_bh(&conn->state_lock); return; } atomic_set(&conn->connection_exit, 1); if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { spin_unlock_bh(&conn->state_lock); iscsit_close_connection(conn); return; } if (conn->conn_state == TARG_CONN_STATE_CLEANUP_WAIT) { spin_unlock_bh(&conn->state_lock); return; } pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n"); conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT; spin_unlock_bh(&conn->state_lock); iscsit_handle_connection_cleanup(conn); } /* * This is the simple function that makes the magic of * sync and steering happen in the follow paradoxical order: * * 0) Receive conn->of_marker (bytes left until next OFMarker) * bytes into an offload buffer. When we pass the exact number * of bytes in conn->of_marker, iscsit_dump_data_payload() and hence * rx_data() will automatically receive the identical u32 marker * values and store it in conn->of_marker_offset; * 1) Now conn->of_marker_offset will contain the offset to the start * of the next iSCSI PDU. Dump these remaining bytes into another * offload buffer. * 2) We are done! * Next byte in the TCP stream will contain the next iSCSI PDU! * Cool Huh?! */ int iscsit_recover_from_unknown_opcode(struct iscsi_conn *conn) { /* * Make sure the remaining bytes to next maker is a sane value. */ if (conn->of_marker > (conn->conn_ops->OFMarkInt * 4)) { pr_err("Remaining bytes to OFMarker: %u exceeds" " OFMarkInt bytes: %u.\n", conn->of_marker, conn->conn_ops->OFMarkInt * 4); return -1; } pr_debug("Advancing %u bytes in TCP stream to get to the" " next OFMarker.\n", conn->of_marker); if (iscsit_dump_data_payload(conn, conn->of_marker, 0) < 0) return -1; /* * Make sure the offset marker we retrived is a valid value. */ if (conn->of_marker_offset > (ISCSI_HDR_LEN + (ISCSI_CRC_LEN * 2) + conn->conn_ops->MaxRecvDataSegmentLength)) { pr_err("OfMarker offset value: %u exceeds limit.\n", conn->of_marker_offset); return -1; } pr_debug("Discarding %u bytes of TCP stream to get to the" " next iSCSI Opcode.\n", conn->of_marker_offset); if (iscsit_dump_data_payload(conn, conn->of_marker_offset, 0) < 0) return -1; return 0; }
gpl-2.0
Blackwolf1337/JaVisionPlus
tools/Assimilate/AssimilateDoc.cpp
27
86209
// AssimilateDoc.cpp : implementation of the CAssimilateDoc class // #include "stdafx.h" #include "Includes.h" #include "BuildAll.h" #include "sourcesafe.h" #include "gla.h" // just for string stuff #include <set> using namespace std; #define sSAVEINFOSTRINGCHECK "(SaveInfo):" // so comment reader can stop these damn things accumulating #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif int giLODLevelOverride = 0; // MUST default to 0 void SS_DisposingOfCurrent(LPCSTR psFileName, bool bDirty); static bool FileUsesGLAReference(LPCSTR psFilename, LPCSTR psGLAReference); keywordArray_t CAssimilateDoc::s_Symbols[] = { "\\", TK_BACKSLASH, "/", TK_SLASH, ".", TK_DOT, "_", TK_UNDERSCORE, "-", TK_DASH, "$", TK_DOLLAR, NULL, TK_EOF, }; keywordArray_t CAssimilateDoc::s_Keywords[] = { "aseanimgrabinit", TK_AS_GRABINIT, "scale", TK_AS_SCALE, "keepmotion", TK_AS_KEEPMOTION, "pcj", TK_AS_PCJ, "aseanimgrab", TK_AS_GRAB, "aseanimgrab_gla", TK_AS_GRAB_GLA, "aseanimgrabfinalize", TK_AS_GRABFINALIZE, "aseanimconvert", TK_AS_CONVERT, "aseanimconvertmdx", TK_AS_CONVERTMDX, "aseanimconvertmdx_noask", TK_AS_CONVERTMDX_NOASK, NULL, TK_EOF, }; keywordArray_t CAssimilateDoc::s_grabKeywords[] = { "frames", TK_AS_FRAMES, "fill", TK_AS_FILL, "sound", TK_AS_SOUND, "action", TK_AS_ACTION, "enum", TK_AS_ENUM, "loop", TK_AS_LOOP, "qdskipstart", TK_AS_QDSKIPSTART, // useful so qdata can quickly skip extra stuff without having to know the syntax of what to skip "qdskipstop", TK_AS_QDSKIPSTOP, "additional", TK_AS_ADDITIONAL, "prequat", TK_AS_PREQUAT, "framespeed", TK_AS_FRAMESPEED, // retro hack because original format only supported frame speeds on additional sequences, not masters "genloopframe", TK_AS_GENLOOPFRAME, NULL, TK_EOF, }; keywordArray_t CAssimilateDoc::s_convertKeywords[] = { "playerparms", TK_AS_PLAYERPARMS, "origin", TK_AS_ORIGIN, "smooth", TK_AS_SMOOTH, "losedupverts", TK_AS_LOSEDUPVERTS, "makeskin", TK_AS_MAKESKIN, "ignorebasedeviations", TK_AS_IGNOREBASEDEVIATIONS, // temporary! "skew90", TK_AS_SKEW90, "noskew90", TK_AS_NOSKEW90, "skel", TK_AS_SKEL, "makeskel", TK_AS_MAKESKEL, NULL, TK_EOF, }; LPCTSTR CAssimilateDoc::GetKeyword(int token, int table) { if ((table == TABLE_ANY) || (table == TABLE_QDT)) { int i = 0; while(s_Keywords[i].m_tokenvalue != TK_EOF) { if (s_Keywords[i].m_tokenvalue == token) { return s_Keywords[i].m_keyword; } i++; } } if ((table == TABLE_ANY) || (table == TABLE_GRAB)) { int i = 0; while(s_grabKeywords[i].m_tokenvalue != TK_EOF) { if (s_grabKeywords[i].m_tokenvalue == token) { return s_grabKeywords[i].m_keyword; } i++; } } if ((table == TABLE_ANY) || (table == TABLE_CONVERT)) { int i = 0; while(s_convertKeywords[i].m_tokenvalue != TK_EOF) { if (s_convertKeywords[i].m_tokenvalue == token) { return s_convertKeywords[i].m_keyword; } i++; } } return NULL; } ///////////////////////////////////////////////////////////////////////////// // CAssimilateDoc IMPLEMENT_DYNCREATE(CAssimilateDoc, CDocument) BEGIN_MESSAGE_MAP(CAssimilateDoc, CDocument) //{{AFX_MSG_MAP(CAssimilateDoc) ON_COMMAND(IDM_ADDFILES, OnAddfiles) ON_COMMAND(IDM_EXTERNAL, OnExternal) ON_COMMAND(IDM_RESEQUENCE, OnResequence) ON_COMMAND(IDM_BUILD, OnBuild) ON_COMMAND(IDM_BUILD_MULTILOD, OnBuildMultiLOD) ON_COMMAND(IDM_VALIDATE, OnValidate) ON_COMMAND(IDM_CARWASH, OnCarWash) ON_COMMAND(IDM_VALIDATE_MULTILOD, OnValidateMultiLOD) ON_COMMAND(ID_VIEW_ANIMENUMS, OnViewAnimEnums) ON_UPDATE_COMMAND_UI(ID_VIEW_ANIMENUMS, OnUpdateViewAnimEnums) ON_COMMAND(ID_VIEW_FRAMEDETAILS, OnViewFrameDetails) ON_UPDATE_COMMAND_UI(ID_VIEW_FRAMEDETAILS, OnUpdateViewFrameDetails) ON_UPDATE_COMMAND_UI(IDM_RESEQUENCE, OnUpdateResequence) ON_UPDATE_COMMAND_UI(ID_FILE_SAVE, OnUpdateFileSave) ON_UPDATE_COMMAND_UI(ID_FILE_SAVE_AS, OnUpdateFileSaveAs) ON_UPDATE_COMMAND_UI(IDM_EXTERNAL, OnUpdateExternal) ON_UPDATE_COMMAND_UI(IDM_VALIDATE, OnUpdateValidate) ON_UPDATE_COMMAND_UI(IDM_BUILD, OnUpdateBuild) ON_COMMAND(ID_EDIT_BUILDALL, OnEditBuildall) ON_COMMAND(IDM_EDIT_BUILDDEPENDANT, OnEditBuildDependant) ON_COMMAND(ID_VIEW_FRAMEDETAILSONADDITIONALSEQUENCES, OnViewFramedetailsonadditionalsequences) ON_UPDATE_COMMAND_UI(ID_VIEW_FRAMEDETAILSONADDITIONALSEQUENCES, OnUpdateViewFramedetailsonadditionalsequences) ON_UPDATE_COMMAND_UI(IDM_EDIT_BUILDDEPENDANT, OnUpdateEditBuilddependant) ON_COMMAND(ID_EDIT_LAUNCHMODVIEWONCURRENT, OnEditLaunchmodviewoncurrent) ON_UPDATE_COMMAND_UI(ID_EDIT_LAUNCHMODVIEWONCURRENT, OnUpdateEditLaunchmodviewoncurrent) //}}AFX_MSG_MAP END_MESSAGE_MAP() ///////////////////////////////////////////////////////////////////////////// // CAssimilateDoc construction/destruction CAssimilateDoc::CAssimilateDoc() { // TODO: add one-time construction code here Init(); } CAssimilateDoc::~CAssimilateDoc() { } BOOL CAssimilateDoc::OnNewDocument() { SS_DisposingOfCurrent(m_strPathName, !!IsModified()); if (!CDocument::OnNewDocument()) return FALSE; // TODO: add reinitialization code here // (SDI documents will reuse this document) SetTitle("Untitled"); // for some reason MFC doesn't do this from time to time return TRUE; } void CAssimilateDoc::Init() { m_comments = NULL; m_modelList = NULL; m_curModel = NULL; m_lastModel = NULL; } ///////////////////////////////////////////////////////////////////////////// // CAssimilateDoc serialization CModel* CAssimilateDoc::AddModel() { CModel* thisModel = CModel::Create(m_comments); m_comments = NULL; if (m_modelList == NULL) { m_modelList = thisModel; } else { CModel* curModel = m_modelList; while (curModel->GetNext() != NULL) { curModel = curModel->GetNext(); } curModel->SetNext(thisModel); } m_curModel = thisModel; return m_curModel; } // remember to account for Mike's m_lastModel and move it down if necessary (until I can throw it away)... // void CAssimilateDoc::DeleteModel(CModel *deleteModel) { // linklist is only 1-way, so we need to find the stage previous to this (if any)... // CModel* prevModel = NULL; CModel* scanModel = GetFirstModel(); while (scanModel && scanModel != deleteModel) { prevModel = scanModel; scanModel = scanModel->GetNext(); } if (scanModel == deleteModel) { // we found it, so was this the first model in the list? // if (prevModel) { prevModel->SetNext(scanModel->GetNext()); // ...no } else { m_modelList = scanModel->GetNext(); // ...yes } scanModel->Delete(); } // fixme: ditch this whenever possible // keep Mike's var up to date... // scanModel = GetFirstModel(); while(scanModel && scanModel->GetNext()) { scanModel = scanModel->GetNext(); } m_lastModel = scanModel; } void CAssimilateDoc::EndModel() { m_lastModel = m_curModel; m_curModel = NULL; } // XSI or GLA anim grab... // void CAssimilateDoc::ParseGrab(CTokenizer* tokenizer, int iGrabType) { if (m_curModel == NULL) { tokenizer->Error("Grab without an active model"); tokenizer->GetToEndOfLine()->Delete(); return; } CToken* curToken = tokenizer->GetToken(NULL, TKF_NUMERICIDENTIFIERSTART | TKF_USES_EOL, 0); if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_IDENTIFIER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } CString path = curToken->GetStringValue(); curToken->Delete(); while(curToken != NULL) { curToken = tokenizer->GetToken(NULL, TKF_NUMERICIDENTIFIERSTART | TKF_USES_EOL | TKF_SPACETOKENS, 0); switch(curToken->GetType()) { case TK_SLASH: case TK_BACKSLASH: path += "/"; curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_NUMERICIDENTIFIERSTART | TKF_USES_EOL | TKF_SPACETOKENS, 0); // hack for "8472" as in models/players/8472/blah.car. Arrggh!!!!!!!!!!!!!!!!!! // if (curToken->GetType() == TK_INT) { path += curToken->GetStringValue(); curToken->Delete(); break; } if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_IDENTIFIER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } path += curToken->GetStringValue(); curToken->Delete(); break; case TK_UNDERSCORE: case TK_DASH: path += curToken->GetStringValue(); curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_NUMERICIDENTIFIERSTART | TKF_USES_EOL | TKF_SPACETOKENS, 0); if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_IDENTIFIER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } path += curToken->GetStringValue(); curToken->Delete(); break; case TK_DOT: path += curToken->GetStringValue(); curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_NUMERICIDENTIFIERSTART | TKF_USES_EOL | TKF_SPACETOKENS, 0); if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_IDENTIFIER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } path += curToken->GetStringValue(); curToken->Delete(); curToken = NULL; break; case TK_SPACE: curToken->Delete(); curToken = NULL; break; case TK_EOL: tokenizer->PutBackToken(curToken); curToken = NULL; break; default: tokenizer->PutBackToken(curToken); curToken = NULL; break; } } CString enumname; int fill = -1; int loop = 0; CString sound; CString action; int startFrame = 0; int targetFrame = 0; int framecount = -1; int framespeed = iDEFAULTSEQFRAMESPEED; bool bFrameSpeedFound = false; int iFrameSpeedFromHeader; // int iStartFrames[MAX_ADDITIONAL_SEQUENCES]={0}; int iFrameCounts[MAX_ADDITIONAL_SEQUENCES]={0}; int iLoopFrames [MAX_ADDITIONAL_SEQUENCES]={0}; int iFrameSpeeds[MAX_ADDITIONAL_SEQUENCES]={0}; CString csEnums [MAX_ADDITIONAL_SEQUENCES]; int iAdditionalSeqNum = 0; bool bGenLoopFrame = false; bool bSomeParamsFound = false; curToken = tokenizer->GetToken(TKF_USES_EOL); switch (iGrabType) { case TK_AS_GRAB: { while (curToken->GetType() == TK_DASH) { bSomeParamsFound = true; curToken->Delete(); curToken = tokenizer->GetToken(s_grabKeywords, TKF_USES_EOL, 0); switch (curToken->GetType()) { case TK_AS_FRAMES: curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_USES_EOL, 0); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } startFrame = curToken->GetIntValue(); curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_USES_EOL, 0); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } targetFrame = curToken->GetIntValue(); curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_USES_EOL, 0); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } framecount = curToken->GetIntValue(); break; case TK_AS_FILL: curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_USES_EOL, 0); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } fill = curToken->GetIntValue(); break; case TK_AS_ENUM: curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_USES_EOL, 0); if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_IDENTIFIER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } enumname = curToken->GetStringValue(); break; case TK_AS_SOUND: curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_USES_EOL, 0); if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_IDENTIFIER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } sound = curToken->GetStringValue(); break; case TK_AS_ACTION: curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_USES_EOL, 0); if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_IDENTIFIER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } action = curToken->GetStringValue(); break; case TK_AS_LOOP: curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_USES_EOL, 0); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } loop = curToken->GetIntValue(); break; case TK_AS_QDSKIPSTART: case TK_AS_QDSKIPSTOP: //curToken->Delete(); // don't do this, the whole thing relies on case statements leaving one current token for the outside loop to delete break; case TK_AS_GENLOOPFRAME: bGenLoopFrame = true; break; case TK_AS_FRAMESPEED: // this is still read in for compatibility, but gets overwritten lower down curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_USES_EOL, 0); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } framespeed = curToken->GetIntValue(); bFrameSpeedFound = true; break; case TK_AS_PREQUAT: m_curModel->SetPreQuat(true); break; case TK_AS_ADDITIONAL: curToken->Delete(); if (iAdditionalSeqNum == MAX_ADDITIONAL_SEQUENCES) { tokenizer->Error(TKERR_USERERROR, va("Trying to define > %d additional sequences for this master",MAX_ADDITIONAL_SEQUENCES)); tokenizer->GetToEndOfLine()->Delete(); return; } // startframe... // curToken = tokenizer->GetToken(NULL, TKF_USES_EOL, 0); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } iStartFrames[iAdditionalSeqNum] = curToken->GetIntValue(); // framecount... // curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_USES_EOL, 0); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } iFrameCounts[iAdditionalSeqNum] = curToken->GetIntValue(); // loopframe... // curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_USES_EOL, 0); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } iLoopFrames[iAdditionalSeqNum] = curToken->GetIntValue(); // framespeed... // curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_USES_EOL, 0); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } iFrameSpeeds[iAdditionalSeqNum] = curToken->GetIntValue(); // enum... // curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_USES_EOL, 0); if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_IDENTIFIER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } csEnums[iAdditionalSeqNum] = curToken->GetStringValue(); iAdditionalSeqNum++; break; default: tokenizer->Error(TKERR_UNEXPECTED_TOKEN, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } curToken->Delete(); curToken = tokenizer->GetToken(s_grabKeywords, TKF_USES_EOL, 0); } } break; case TK_AS_GRAB_GLA: { // no additional params permitted for this type currently... // } break; } path.MakeLower(); // // if no extension, assume ".xsi"... (or ".gla" now) // if (!(path.GetAt(path.GetLength()-4) == '.')) { path += (iGrabType == TK_AS_GRAB)?".xsi":".gla"; } if (curToken->GetType() != TK_EOL) { tokenizer->Error(TKERR_UNEXPECTED_TOKEN, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } // ignore any user params about speed and frame counts, and just re-grab them from the XSI file... // // if (bSomeParamsFound) // { // } // else { // at this point, it must be one of the paramless entries in a .CAR file, so we need to // provide the values for: startFrame, targetFrame, framecount // // read in values from the actual file, in case we need to use them... // CString nameASE = ((CAssimilateApp*)AfxGetApp())->GetQuakeDir(); nameASE+= path; int iStartFrame, iFrameCount; ReadASEHeader( nameASE, iStartFrame, iFrameCount, iFrameSpeedFromHeader, (iGrabType == TK_AS_GRAB_GLA) ); // if (strstr(nameASE,"death16")) // { // int z=1; // } startFrame = 0; // always targetFrame= 0; // any old shite value framecount = iFrameCount; if (iGrabType != TK_AS_GRAB_GLA) { if (!bFrameSpeedFound) { framespeed = iFrameSpeedFromHeader; } } } curToken->Delete(); CSequence* sequence = CSequence::_Create(bGenLoopFrame,(iGrabType == TK_AS_GRAB_GLA), path, startFrame, targetFrame, framecount, framespeed, iFrameSpeedFromHeader); m_curModel->AddSequence(sequence); sequence->AddComment(m_curModel->ExtractComments()); sequence->DeriveName(); if (enumname.IsEmpty()) { sequence->SetEnum(sequence->GetName()); } else { sequence->SetEnum(enumname); } sequence->SetFill(fill); sequence->SetSound(sound); sequence->SetAction(action); sequence->SetValidEnum(((CAssimilateApp*)AfxGetApp())->ValidEnum(sequence->GetEnum())); sequence->SetLoopFrame(loop); for (int i=0; i<MAX_ADDITIONAL_SEQUENCES; i++) { sequence->AdditionalSeqs[i]->SetStartFrame(iStartFrames[i]); sequence->AdditionalSeqs[i]->SetFrameCount(iFrameCounts[i]); sequence->AdditionalSeqs[i]->SetFrameSpeed(iFrameSpeeds[i]); sequence->AdditionalSeqs[i]->SetLoopFrame (iLoopFrames [i]); sequence->AdditionalSeqs[i]->SetEnum (csEnums [i]); sequence->AdditionalSeqs[i]->SetValidEnum (((CAssimilateApp*)AfxGetApp())->ValidEnum(sequence->AdditionalSeqs[i]->GetEnum())); } } // return = success. if false ret, return from caller because of error // bool Tokenizer_ReadPath(CString& path, CTokenizer* &tokenizer, CToken* &curToken ) { curToken = tokenizer->GetToken(NULL, TKF_NUMERICIDENTIFIERSTART | TKF_USES_EOL, 0); if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_IDENTIFIER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return false; } path += curToken->GetStringValue(); curToken->Delete(); while(curToken != NULL) { curToken = tokenizer->GetToken(NULL, TKF_NUMERICIDENTIFIERSTART | TKF_USES_EOL | (path.IsEmpty()?0:TKF_SPACETOKENS), 0); switch(curToken->GetType()) { case TK_SLASH: case TK_BACKSLASH: path += "/"; curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_NUMERICIDENTIFIERSTART | TKF_USES_EOL | TKF_SPACETOKENS, 0); // hack for "8472" as in models/players/8472/blah.car. Arrggh!!!!!!!!!!!!!!!!!! // if (curToken->GetType() == TK_INT) { path += curToken->GetStringValue(); curToken->Delete(); break; } if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_IDENTIFIER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return false; } path += curToken->GetStringValue(); curToken->Delete(); break; case TK_UNDERSCORE: case TK_DASH: path += curToken->GetStringValue(); curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_NUMERICIDENTIFIERSTART | TKF_USES_EOL | TKF_SPACETOKENS, 0); if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_IDENTIFIER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return false; } path += curToken->GetStringValue(); curToken->Delete(); break; case TK_DOT: path += curToken->GetStringValue(); curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_NUMERICIDENTIFIERSTART | TKF_USES_EOL | TKF_SPACETOKENS, 0); if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_IDENTIFIER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return false; } path += curToken->GetStringValue(); curToken->Delete(); curToken = NULL; break; case TK_SPACE: case TK_EOL: curToken->Delete(); curToken = NULL; break; default: tokenizer->PutBackToken(curToken); curToken = NULL; break; } } return true; } void CAssimilateDoc::ParseConvert(CTokenizer* tokenizer, int iTokenType) { if (m_lastModel == NULL) { tokenizer->Error("Convert without an internal model"); tokenizer->GetToEndOfLine()->Delete(); return; } CToken* curToken = NULL; CString path; if (!Tokenizer_ReadPath(path, tokenizer, curToken )) return; /* while(curToken != NULL) { curToken = tokenizer->GetToken(NULL, TKF_NUMERICIDENTIFIERSTART | TKF_USES_EOL | TKF_SPACETOKENS, 0); switch(curToken->GetType()) { case TK_SLASH: case TK_BACKSLASH: path += "/"; curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_NUMERICIDENTIFIERSTART | TKF_USES_EOL | TKF_SPACETOKENS, 0); // hack for "8472" as in models/players/8472/blah.car. Arrggh!!!!!!!!!!!!!!!!!! // if (curToken->GetType() == TK_INT) { path += curToken->GetStringValue(); curToken->Delete(); break; } if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_IDENTIFIER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } path += curToken->GetStringValue(); curToken->Delete(); break; case TK_UNDERSCORE: case TK_DASH: path += curToken->GetStringValue(); curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_NUMERICIDENTIFIERSTART | TKF_USES_EOL | TKF_SPACETOKENS, 0); if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_IDENTIFIER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } path += curToken->GetStringValue(); curToken->Delete(); break; case TK_DOT: path += curToken->GetStringValue(); curToken->Delete(); curToken = tokenizer->GetToken(NULL, TKF_NUMERICIDENTIFIERSTART | TKF_USES_EOL | TKF_SPACETOKENS, 0); if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_IDENTIFIER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } path += curToken->GetStringValue(); curToken->Delete(); curToken = NULL; break; case TK_SPACE: case TK_EOL: curToken->Delete(); curToken = NULL; break; default: tokenizer->PutBackToken(curToken); curToken = NULL; break; } } */ int originx = 0; // important to default to 0! int originy = 0; // int originz = 0; // int parm1 = 0; int parm2 = 0; int parm3 = 0; int parm4 = 0; curToken = tokenizer->GetToken(); while(curToken->GetType() == TK_DASH) { curToken->Delete(); curToken = tokenizer->GetToken(s_convertKeywords, 0, 0); switch(curToken->GetType()) { case TK_AS_ORIGIN: curToken->Delete(); curToken = tokenizer->GetToken(); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } originx = curToken->GetIntValue(); curToken->Delete(); curToken = tokenizer->GetToken(); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } originy = curToken->GetIntValue(); curToken->Delete(); curToken = tokenizer->GetToken(); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } originz = curToken->GetIntValue(); curToken->Delete(); curToken = tokenizer->GetToken(); break; case TK_AS_PLAYERPARMS: curToken->Delete(); curToken = tokenizer->GetToken(); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } parm1 = curToken->GetIntValue(); curToken->Delete(); /* this param no longer exists... curToken = tokenizer->GetToken(); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } parm2 = curToken->GetIntValue(); curToken->Delete(); */ curToken = tokenizer->GetToken(); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } parm3 = curToken->GetIntValue(); curToken->Delete(); /* this param no longer exists... curToken = tokenizer->GetToken(); if (curToken->GetType() != TK_INTEGER) { tokenizer->Error(TKERR_EXPECTED_INTEGER, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } parm4 = curToken->GetIntValue(); curToken->Delete(); */ parm4 = 1; curToken = tokenizer->GetToken(); break; case TK_AS_SMOOTH: curToken->Delete(); m_lastModel->SetSmooth(true); curToken = tokenizer->GetToken(); break; case TK_AS_LOSEDUPVERTS: curToken->Delete(); m_lastModel->SetLoseDupVerts(true); curToken = tokenizer->GetToken(); break; case TK_AS_MAKESKIN: curToken->Delete(); m_lastModel->SetMakeSkin(true); curToken = tokenizer->GetToken(); break; case TK_AS_IGNOREBASEDEVIATIONS: curToken->Delete(); m_lastModel->SetIgnoreBaseDeviations(true); curToken = tokenizer->GetToken(); break; case TK_AS_SKEW90: curToken->Delete(); m_lastModel->SetSkew90(true); curToken = tokenizer->GetToken(); break; case TK_AS_NOSKEW90: curToken->Delete(); m_lastModel->SetNoSkew90(true); curToken = tokenizer->GetToken(); break; /* case TK_AS_SKEL: { CString strSkelPath; curToken->Delete(); if (!Tokenizer_ReadPath(strSkelPath, tokenizer, curToken )) { return; } m_lastModel->SetSkelPath(strSkelPath); m_lastModel->SetMakeSkelPath(""); curToken = tokenizer->GetToken(); } break; */ case TK_AS_MAKESKEL: { CString strMakeSkelPath; curToken->Delete(); if (!Tokenizer_ReadPath(strMakeSkelPath, tokenizer, curToken)) { return; } m_lastModel->SetMakeSkelPath(strMakeSkelPath); // m_lastModel->SetSkelPath(""); curToken = tokenizer->GetToken(); } break; default: tokenizer->Error(TKERR_UNEXPECTED_TOKEN, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } } tokenizer->PutBackToken(curToken); path.MakeLower(); m_lastModel->DeriveName(path); m_lastModel->SetParms(parm1, parm2, parm3, parm4); m_lastModel->SetOrigin(originx, originy, originz); m_lastModel->SetConvertType(iTokenType); } void CAssimilateDoc::AddComment(LPCTSTR comment) { // some code to stop those damn timestamps accumulating... // if (!strnicmp(comment,sSAVEINFOSTRINGCHECK,strlen(sSAVEINFOSTRINGCHECK))) { return; } CComment* thisComment = CComment::Create(comment); if (m_curModel != NULL) { m_curModel->AddComment(thisComment); return; } if (m_comments == NULL) { m_comments = thisComment; } else { CComment* curComment = m_comments; while (curComment->GetNext() != NULL) { curComment = curComment->GetNext(); } curComment->SetNext(thisComment); } } /////////////////////////////////////////////// #define MAX_FOUND_FILES 0x1000 #define MAX_OSPATH MAX_PATH #include <stdio.h> #include <io.h> char **Sys_ListFiles( const char *directory, const char *extension, int *numfiles ) { char search[MAX_OSPATH]; int nfiles; char **listCopy; char *list[MAX_FOUND_FILES]; struct _finddata_t findinfo; int findhandle; int flag; int i; if ( !extension) { extension = ""; } if ( extension[0] == '/' && extension[1] == 0 ) { extension = ""; flag = 0; } else { flag = _A_SUBDIR; } sprintf( search, "%s\\*%s", directory, extension ); // search nfiles = 0; findhandle = _findfirst (search, &findinfo); if (findhandle == -1) { *numfiles = 0; return NULL; } do { if ( flag ^ ( findinfo.attrib & _A_SUBDIR ) ) { if ( nfiles == MAX_FOUND_FILES - 1 ) { break; } list[ nfiles ] = strdup( strlwr(findinfo.name) ); nfiles++; } } while ( _findnext (findhandle, &findinfo) != -1 ); list[ nfiles ] = 0; _findclose (findhandle); // return a copy of the list *numfiles = nfiles; if ( !nfiles ) { return NULL; } listCopy = (char **) malloc( ( nfiles + 1 ) * sizeof( *listCopy ) ); for ( i = 0 ; i < nfiles ; i++ ) { listCopy[i] = list[i]; } listCopy[i] = NULL; return listCopy; } void Sys_FreeFileList( char **_list ) { int i; if ( !_list ) { return; } for ( i = 0 ; _list[i] ; i++ ) { free( _list[i] ); } free( _list ); } ////////////////////////////////////////// CString strSkippedFiles; CString strSkippedDirs; CString strCARsFound; int iCARsFound = 0; void AlphaSortCARs(void) { typedef set <string> SortedStrings_t; SortedStrings_t SortedStrings; for (int i=0; i<iCARsFound; i++) { CString strThisFile = strCARsFound; int iLoc = strThisFile.Find("\n"); if (iLoc>=0) { SortedStrings.insert(SortedStrings.end(), (LPCSTR)(strThisFile.Left(iLoc))); strCARsFound= strCARsFound.Mid(iLoc+1); } } // clear files-found string out, and re-enter from sorted set... // strCARsFound = ""; for (SortedStrings_t::iterator it = SortedStrings.begin(); it != SortedStrings.end(); ++it) { strCARsFound += (*it).c_str(); strCARsFound += "\n"; } } void R_CheckCARs( LPCSTR psDir, int iScanDepth, LPCSTR psGLAReferenceItShouldInclude ) { ((CMainFrame*)AfxGetMainWnd())->StatusMessage(va("(%d .CAR files found so far) Scanning Dir: %s",iCARsFound,psDir)); // ignore any dir with "test" in it... // if (//strstr(psDir,"\\test") //|| strstr(psDir,"\\backup") || strstr(psDir,"\\ignore_") ) { strSkippedDirs += psDir; strSkippedDirs += "\n"; return; } char **sysFiles, **dirFiles;//, *args[5]; int numSysFiles, i, /*len,*/ numdirs; char altname[MAX_OSPATH]; // char command1[MAX_OSPATH]; // char command2[MAX_OSPATH]; dirFiles = Sys_ListFiles(psDir, "/", &numdirs); if (numdirs > 2) { // if (!iScanDepth) // recursion limiter, to avoid scanning backup subdirs within model subdirs { for (i=2;i<numdirs;i++) { sprintf(altname, "%s\\%s", psDir, dirFiles[i]); //if (stricmp(altname,"q:\\send\\quake\\baseq3\\models\\players")) // dont recurse this dir { R_CheckCARs(altname, iScanDepth+1, psGLAReferenceItShouldInclude ); } } } } // sprintf(command1, "-targa"); // sprintf(command2, "-outfile"); sysFiles = Sys_ListFiles( psDir, ".car", &numSysFiles ); for ( i=0; i<numSysFiles; i++ ) { CString strThisFile = va("%s\\%s", psDir, sysFiles[i]); if (strstr((LPCSTR) strThisFile,"copy of ")) { strSkippedFiles += strThisFile; strSkippedFiles += "\n"; continue; } if (psGLAReferenceItShouldInclude && psGLAReferenceItShouldInclude [0] && !FileUsesGLAReference(strThisFile, psGLAReferenceItShouldInclude) ) { strSkippedFiles += strThisFile; strSkippedFiles += "\n"; continue; } strCARsFound += strThisFile + "\n"; iCARsFound++; /* char tgain[MAX_OSPATH]; sprintf(tgain,"%s\\%s", psDir, sysFiles[i]); strcpy( altname, tgain ); len = strlen( altname ); altname[len-3] = 'j'; altname[len-2] = 'p'; altname[len-1] = 'g'; args[0] = "cjpeg"; args[1] = command2; args[2] = altname; args[3] = command1; args[4] = tgain; //printf("%s", tgain); */ /* len = qmain(5, args); if (!len) { iNumberOf_FilesConverted++; iSizeOf_JPGsWritten += scGetFileLen(altname); iSizeOf_TGAsDeleted += scGetFileLen(tgain); printf(" nuked!(NOT)"); // remove(tgain); } printf("\n"); */ /* byte * pPixels = NULL; int iWidth; int iHeight; bool bRedundant = ScanTGA(tgain, &pPixels, &iWidth, &iHeight); if (pPixels) { free(pPixels); } if (bRedundant) { strTGAsWithRedundantAlpha += va("%s\n",tgain); iRedundantFilesFound++; } */ } Sys_FreeFileList( sysFiles ); Sys_FreeFileList( dirFiles ); } void CAssimilateDoc::Parse(CFile* file) { Parse(file->GetFilePath()); } void CAssimilateDoc::Parse(LPCSTR psFilename) { gbParseError = false; CAlertErrHandler errhandler; CTokenizer* tokenizer = CTokenizer::Create(TKF_NOCASEKEYWORDS | TKF_COMMENTTOKENS); tokenizer->SetErrHandler(&errhandler); tokenizer->SetSymbols(s_Symbols); tokenizer->SetKeywords(s_Keywords); tokenizer->AddParseFile(psFilename); extern bool gbSkipXSIRead_QuestionAsked; extern bool gbSkipXSIRead; gbSkipXSIRead_QuestionAsked = false; // opening a new file so reset our question gbSkipXSIRead = false; int tokType = TK_UNDEFINED; while(tokType != TK_EOF) { CToken* curToken = tokenizer->GetToken(); tokType = curToken->GetType(); switch(tokType) { case TK_EOF: curToken->Delete(); break; case TK_DOLLAR: curToken->Delete(); curToken = tokenizer->GetToken(); switch(curToken->GetType()) { case TK_AS_GRABINIT: curToken->Delete(); AddModel(); break; case TK_AS_SCALE: curToken->Delete(); curToken = tokenizer->GetToken(); if (curToken->GetType() != TK_FLOAT && curToken->GetType() != TK_INT) { tokenizer->Error(TKERR_EXPECTED_FLOAT, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } m_curModel->SetScale((curToken->GetType() == TK_FLOAT)?curToken->GetFloatValue():curToken->GetIntValue()); curToken->Delete(); break; case TK_AS_KEEPMOTION: curToken->Delete(); m_curModel->SetKeepMotion(true); break; case TK_AS_PCJ: curToken->Delete(); curToken = tokenizer->GetToken(); if (curToken->GetType() != TK_IDENTIFIER && curToken->GetType() != TK_DOLLAR) // eg: '$pcj pelvis' or '$pcj $flatten' { tokenizer->Error(TKERR_EXPECTED_STRING, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } if (curToken->GetType() == TK_DOLLAR) // read string after '$' char { curToken->Delete(); curToken = tokenizer->GetToken(); if (curToken->GetType() != TK_IDENTIFIER) { tokenizer->Error(TKERR_EXPECTED_STRING, curToken->GetStringValue()); curToken->Delete(); tokenizer->GetToEndOfLine()->Delete(); return; } m_curModel->PCJList_AddEntry(va("$%s",curToken->GetStringValue())); } else { m_curModel->PCJList_AddEntry(curToken->GetStringValue()); } curToken->Delete(); break; case TK_AS_GRAB: curToken->Delete(); ParseGrab(tokenizer,TK_AS_GRAB); break; case TK_AS_GRAB_GLA: curToken->Delete(); ParseGrab(tokenizer,TK_AS_GRAB_GLA); break; case TK_AS_GRABFINALIZE: curToken->Delete(); EndModel(); break; case TK_AS_CONVERT: curToken->Delete(); ParseConvert(tokenizer,TK_AS_CONVERT); break; case TK_AS_CONVERTMDX: curToken->Delete(); ParseConvert(tokenizer,TK_AS_CONVERTMDX); break; case TK_AS_CONVERTMDX_NOASK: curToken->Delete(); ParseConvert(tokenizer,TK_AS_CONVERTMDX_NOASK); break; case TK_COMMENT: AddComment(curToken->GetStringValue()); curToken->Delete(); break; default: tokenizer->Error(TKERR_UNEXPECTED_TOKEN); curToken->Delete(); break; } break; case TK_COMMENT: AddComment(curToken->GetStringValue()); curToken->Delete(); break; default: tokenizer->Error(TKERR_UNEXPECTED_TOKEN); curToken->Delete(); break; } } tokenizer->Delete(); UpdateAllViews(NULL, AS_NEWFILE, NULL); Resequence(); } void CAssimilateDoc::Write(CFile* file) { CTxtFile* outfile = CTxtFile::Create(file); /* // write out time/date stamp... // CString commentLine; CTime time = CTime::GetCurrentTime(); commentLine.Format("// %s %s updated %s", sSAVEINFOSTRINGCHECK, file->GetFileName(), time.Format("%H:%M %A, %B %d, %Y")); outfile->Writeln(commentLine); */ CModel* curModel = m_modelList; while(curModel != NULL) { curModel->Write(outfile); curModel = curModel->GetNext(); } CComment* curComment = m_comments; while(curComment != NULL) { curComment->Write(outfile); curComment = curComment->GetNext(); } outfile->Delete(); } void CAssimilateDoc::Serialize(CArchive& ar) { if (ar.IsStoring()) { // TODO: add storing code here Write(ar.GetFile()); } else { // TODO: add loading code here Parse(ar.GetFile()); } } ///////////////////////////////////////////////////////////////////////////// // CAssimilateDoc diagnostics #ifdef _DEBUG void CAssimilateDoc::AssertValid() const { CDocument::AssertValid(); } void CAssimilateDoc::Dump(CDumpContext& dc) const { CDocument::Dump(dc); } #endif //_DEBUG ///////////////////////////////////////////////////////////////////////////// // CAssimilateDoc commands void CAssimilateDoc::DeleteContents() { // TODO: Add your specialized code here and/or call the base class UpdateAllViews(NULL, AS_DELETECONTENTS, NULL); while(m_comments != NULL) { CComment* curComment = m_comments; m_comments = curComment->GetNext(); curComment->Delete(); } while(m_modelList != NULL) { CModel* curModel = m_modelList; m_modelList = curModel->GetNext(); curModel->Delete(); } m_curModel = NULL; m_lastModel = NULL; gbReportMissingASEs = true; giFixUpdatedASEFrameCounts = YES; CDocument::DeleteContents(); } CModel* CAssimilateDoc::GetFirstModel() { return m_modelList; } int CAssimilateDoc::GetNumModels() { int iCount = 0; CModel *theModel = m_modelList; while (theModel) { iCount++; theModel = theModel->GetNext(); } return iCount; } void CAssimilateDoc::OnAddfiles() { // TODO: Add your command handler code here CFileDialog theDialog(true, ".xsi", NULL, OFN_EXPLORER | OFN_ALLOWMULTISELECT | OFN_FILEMUSTEXIST | OFN_PATHMUSTEXIST, _T("Anim Files (*.ase)(*.xsi)(*.gla)|*.ase;*.xsi;*.gla|All Files|*.*||"), NULL); //////////// // Model files (*.MDR)(*.MD3)|*.md?| //////////// char filenamebuffer[16384]; filenamebuffer[0] = '\0'; theDialog.m_ofn.lpstrFile = filenamebuffer; theDialog.m_ofn.nMaxFile = sizeof(filenamebuffer); CString strInitialDir = ((CAssimilateApp*)AfxGetApp())->GetQuakeDir(); strInitialDir+= "models/players"; strInitialDir.Replace("/","\\"); theDialog.m_ofn.lpstrInitialDir = strInitialDir; int result = theDialog.DoModal(); if (result != IDOK) { return; } CWaitCursor waitcursor; POSITION pos = theDialog.GetStartPosition(); while(pos != NULL) { CString thisfile = theDialog.GetNextPathName(pos); /* int loc = thisfile.Find(':'); if (loc > 0) { thisfile = thisfile.Right(thisfile.GetLength() - loc - 1); } */ Filename_RemoveBASEQ(thisfile); AddFile(thisfile); } SetModifiedFlag(); UpdateAllViews(NULL, AS_NEWFILE, NULL); Resequence(); // must be AFTER UpdateAllViews } void CAssimilateDoc::AddFile(LPCTSTR name) { CString strTemp(name); strTemp.MakeLower(); if (!strstr(strTemp,"root.xsi") || GetYesNo("You're trying to add \"root.xsi\", which is inherent, you should only do this if you're making a model that has no seperate anim files\n\nProceed?")) { // update, ignore any files with a "_1" (2,3, etc) just before the suffix (this skips LOD files)... // // int iNameLen = strlen(name); // (also, only check files of at least namelen "_?.ase") // // if ( iNameLen>6 && name[iNameLen-6]=='_' && isdigit(name[iNameLen-5]) ) { // this is a LOD filename, so ignore it... } // else { CModel *curModel = GetCurrentUserSelectedModel(); if (!curModel) { curModel = AddModel(); CString path = name; path.MakeLower(); path.Replace('\\', '/'); int loc = path.ReverseFind('.'); if (loc > -1) { path = path.Left(loc); } loc = path.ReverseFind('/'); path = path.Left(loc); path = path + "/root"; curModel->DeriveName(path); } // check that we don't already have this file... // if (!curModel->ContainsFile(name)) { curModel->AddSequence(CSequence::CreateFromFile(name, curModel->ExtractComments())); } } } } void CAssimilateDoc::OnExternal() { bool bCFGWritten = false; if (WriteCFGFiles(true,bCFGWritten)) { CString strReport; if (bCFGWritten) { if (((CAssimilateApp*)AfxGetApp())->GetMultiPlayerMode()) { strReport = "\n\n( CFG file written for MULTI-PLAYER format )"; } else { strReport = "\n\n( CFG file written for SINGLE-PLAYER format )"; } } else { strReport = "\n\n( CFG not needed, not written )"; } InfoBox(strReport); } } // called both from the menu, and now from the Build() member... // bool CAssimilateDoc::WriteCFGFiles(bool bPromptForNames, bool &bCFGWritten) { bCFGWritten = false; CModel* curModel = m_modelList; while(curModel != NULL) { bool bThisCFGWritten = false; if (!curModel->WriteExternal(bPromptForNames,bThisCFGWritten)) { return false; } if (bThisCFGWritten) bCFGWritten = true; curModel = curModel->GetNext(); } return true; } void CAssimilateDoc::Resequence() { CModel* curModel = m_modelList; while(curModel != NULL) { curModel->Resequence(true); curModel = curModel->GetNext(); } SetModifiedFlag(); UpdateAllViews(NULL, AS_FILESUPDATED, NULL); // needed } void CAssimilateDoc::OnResequence() { extern bool gbSkipXSIRead_QuestionAsked; extern bool gbSkipXSIRead; gbSkipXSIRead_QuestionAsked = false; // make it happen again gbSkipXSIRead = false; Resequence(); } void CAssimilateDoc::OnViewAnimEnums() { gbViewAnimEnums = !gbViewAnimEnums; UpdateAllViews(NULL, AS_FILESUPDATED, NULL); } void CAssimilateDoc::OnUpdateViewAnimEnums(CCmdUI* pCmdUI) { pCmdUI->SetCheck(gbViewAnimEnums); } void CAssimilateDoc::OnViewFrameDetails() { gbViewFrameDetails = !gbViewFrameDetails; UpdateAllViews(NULL, AS_FILESUPDATED, NULL); } void CAssimilateDoc::OnUpdateViewFrameDetails(CCmdUI* pCmdUI) { pCmdUI->SetCheck(gbViewFrameDetails); } // 1) save qdt, 2) run qdata on it, 3) if success, save .cfg file... // bool CAssimilateDoc::Build(bool bAllowedToShowSuccessBox, int iLODLevel, bool bSkipSave) // damn this stupid Serialize() crap { bool bSuccess = false; if (Validate()) // notepad will have been launched with a textfile of errors at this point if faulty { // seems valid, so save the QDT... // giLODLevelOverride = iLODLevel; if (!bSkipSave) { OnFileSave(); } CString csQDataLocation = ((CAssimilateApp*)AfxGetApp())->GetQDataFilename(); // hack-city!!!!!!!!!! // CModel* curModel = ghAssimilateView->GetDocument()->GetCurrentUserSelectedModel(); if (curModel->GetPreQuat()) { csQDataLocation.MakeLower(); csQDataLocation.Replace("carcass","carcass_prequat"); } CString params = csQDataLocation; if (!bSkipSave) { params += " -keypress"; } params += " -silent"; params += " "; params += m_strPathName; // += (eg) {"Q:\quake\baseq3\models\players\ste_assimilate_test\ste_testaa.qdt"} Filename_AccountForLOD(params, giLODLevelOverride); PROCESS_INFORMATION pi; STARTUPINFO startupinfo; startupinfo.cb = sizeof(startupinfo); startupinfo.lpReserved = NULL; startupinfo.lpDesktop = NULL; startupinfo.lpTitle = NULL; startupinfo.dwFlags = 0; startupinfo.cbReserved2 = 0; startupinfo.lpReserved2 = NULL; params.Replace("/","\\"); csQDataLocation.Replace("/","\\"); LPTSTR paramsPass = params.GetBuffer(params.GetLength() + 1); StartWait(); // ++++++++++++++++++++++++ if (CreateProcess(csQDataLocation, paramsPass, NULL, NULL, FALSE, CREATE_NEW_CONSOLE, NULL, NULL, &startupinfo, &pi)) { WaitForSingleObject(pi.hProcess, INFINITE); EndWait(); // ------------------------ DWORD result; GetExitCodeProcess(pi.hProcess,&result); if (result) { char error[64]; sprintf(error,"Process returned error: %d",result); MessageBox(NULL,error,"Build Failed",MB_OK|MB_ICONERROR); } CloseHandle(pi.hProcess); if (result==0) { // QData was run successfully at this point, so write the CFG file(s)... // if (iLODLevel) // only LOD 0 writes out the CFG file { bSuccess = true; } else { bool bCFGWritten = false; if (WriteCFGFiles(false,bCFGWritten)) // false = no name prompt, derive automatically from model name { // success (on a plate)... // if (bAllowedToShowSuccessBox) { CString strReport("Everything seemed to go ok\n\nCAR"); if (bCFGWritten) { strReport += " and CFG files written"; if (((CAssimilateApp*)AfxGetApp())->GetMultiPlayerMode()) { strReport += "\n\n\n\n( CFG file written for MULTI-PLAYER format )"; } else { strReport += "\n\n\n\n( CFG file written for SINGLE-PLAYER format )"; } } else { strReport += " file written"; strReport += "\n\n\n\n( CFG file not written for GLA-referencing model )"; } InfoBox(strReport); } bSuccess = true; } } } } else { EndWait(); // ------------------------ MessageBox(NULL,"Could not spawn process.","Build Failed",MB_OK|MB_ICONERROR); } params.ReleaseBuffer(); } giLODLevelOverride = 0; return bSuccess; } void CAssimilateDoc::OnBuildMultiLOD() { int iErrors = 0; // to save time, I'll run all the validates first, and only if they're all ok will I go on to the build // (which incidentally does a harmless re-validate again) // for (int i=0; i< 1+EXTRA_LOD_LEVELS; i++) { iErrors += Validate(false, i)?0:1; } // go ahead if all clear... // // (I'll write them in reverse-LOD order so the last one written is the standard one. This should hopefully avoid // any problems with the current document potentially becoming "(name)_3.qdt" from then on to MFC) // if (!iErrors) { for (int i=EXTRA_LOD_LEVELS; i>=0; i--) { Build( !i, // bool bAllowedToShowSuccessBox (ie only on last one) i, // int iLODLevel false // bool bSkipSave ); } } } // 1) save qdt, 2) run qdata on it, 3) if success, save .cfg file... // void CAssimilateDoc::OnBuild() { Build( true, // bool bAllowedToShowSuccessBox, 0, // int iLODLevel false // bool bSkipSave ); } void CAssimilateDoc::ClearModelUserSelectionBools() { CModel* curModel = m_modelList; while (curModel) { curModel->SetUserSelectionBool(false); curModel = curModel->GetNext(); } } // if there's only one model loaded, return that, if none, return NULL, else if >1, return selected one, else NULL // CModel* CAssimilateDoc::GetCurrentUserSelectedModel() { CModel *curModel = m_modelList; if (!curModel || !curModel->GetNext()) { return curModel; // 0 or 1 models total loaded } // more than one loaded, so find the selected one and return that... // while (curModel) { if (curModel->GetUserSelectionBool()) { return curModel; } curModel = curModel->GetNext(); } return NULL; // multiple loaded, but none selected } static bool FileUsesGLAReference(LPCSTR psFilename, LPCSTR psGLAReference) { bool bReturn = false; FILE *fHandle = fopen(psFilename,"rt"); if (fHandle) { int iLen = filesize(fHandle); if (iLen>0) { char *psText = (char*) malloc(iLen+1); if (psText) { fread(psText,1,iLen,fHandle); psText[iLen]='\0'; strlwr(psText); // this is a simple test that could be made more precise, but for now... // if ( (strstr(psText,"aseanimgrab_gla") || strstr(psText,"makeskel")) && strstr(psText,psGLAReference) ) { bReturn = true; } free(psText); } } fclose(fHandle); } return bReturn; } #define sASSUMEPATH "w:\\game\\base" bool gbCarWash_YesToXSIScan; bool gbCarWash_DoingScan = false; // MUST default to this bool gbQueryGoAhead = true; // MUST defualt to this LPCSTR gpsCARWashDirOverride = NULL;// MUST default to this CString strCarWashErrors; bool gbCarwashErrorsOccured; CString strMustContainThisGLA; // MUST be blank, else name of GLA to be present to be considered void CAssimilateDoc::OnCarWashActual() { gbCarwashErrorsOccured = false; CString strStartDir = gpsCARWashDirOverride?gpsCARWashDirOverride:((CAssimilateApp*)AfxGetApp())->GetQuakeDir(); strStartDir.Replace("/","\\"); if (!strStartDir.GetLength()) { ErrorBox("Quake path not known at this point. Prefs not setup?"); gbCarwashErrorsOccured = true; return; // if (!GetYesNo("Quake path not known at this point because you've not loaded anything yet\n\nShould I assume " "\"" sASSUMEPATH "\"" "?")) // return; // // strStartDir = sASSUMEPATH; } // (this app was written so that GetQuakeDir() returns a path with a trailing slash, not nice normally, but here...) // // if (strStartDir.GetAt(strStartDir.GetLength()-1)=='\\') // strStartDir = strStartDir.Left(strStartDir.GetLength()-1); if (gpsCARWashDirOverride == NULL) { strStartDir += "models";//\\players"; } if (gbQueryGoAhead) { if (!GetYesNo(va("About to scan: \"%s\\*.CAR /s\"\n\n"/*"This can take a LONG time, "*/"Proceed?",strStartDir))) return; } gbCarWash_YesToXSIScan = GetYesNo("Full XSI scan? ( \"NO\" will skip XSI reads, but v3.0 files are quick to read anyway now )"); CWaitCursor wait; strCARsFound.Empty(); iCARsFound = 0; strSkippedDirs.Empty(); strSkippedFiles.Empty(); R_CheckCARs( strStartDir, 0, strMustContainThisGLA); //bool bBuildListOnly AlphaSortCARs(); // not important to alpha-sort here (during car-wash), just looks nicer ((CMainFrame*)AfxGetMainWnd())->StatusMessage("Ready"); // ok, now ready to begin pass 2... // CString strReport; if (!iCARsFound) { ASSERT(0); strReport = "No suitable .CAR files found for processing!\n\n"; if (!strSkippedDirs.IsEmpty()) { strReport+= "Skipped Dirs:\n\n"; strReport+= strSkippedDirs; } if (!strSkippedFiles.IsEmpty()) { strReport+= "\n\nSkipped Files:\n\n"; strReport+= strSkippedFiles; } ErrorBox(strReport); gbCarwashErrorsOccured = true; } else { //---------------- gbCarWash_DoingScan = true; strCarWashErrors.Empty(); //---------------- CString strTotalErrors; strReport = "Processed files:\n\n"; for (int i=0; i<iCARsFound; i++) { CString strThisFile = strCARsFound; int iLoc = strThisFile.Find("\n"); if (iLoc>=0) { strThisFile = strThisFile.Left(iLoc); strCARsFound= strCARsFound.Mid(iLoc+1); strReport += strThisFile + "\n"; ((CMainFrame*)AfxGetMainWnd())->StatusMessage(va("Scanning File %d/%d: %s",i+1,iCARsFound,(LPCSTR)strThisFile)); if (1)//strMustContainThisGLA.IsEmpty() || FileUsesGLAReference(strThisFile, strMustContainThisGLA)) { OnNewDocument(); Parse(strThisFile); if (gbParseError) { strTotalErrors += va("\nParse error in CAR file \"%s\"\n",(LPCSTR)strThisFile); } else { OnValidate(); if (!strCarWashErrors.IsEmpty()) { // "something is wrong..." :-) // strTotalErrors += va("\nError in file \"%s\":\n\n%s\n",(LPCSTR)strThisFile,(LPCSTR)strCarWashErrors); } strCarWashErrors.Empty(); } } } else { ASSERT(0); strThisFile.Insert(0,"I fucked up, the following line didn't seem to have a CR: (tell me! -Ste)\n\n"); ErrorBox(strThisFile); } } //---------------- gbCarWash_DoingScan = false; //---------------- OnNewDocument(); // trash whatever was loaded last // strReport = "Processed files:\n\n"; // strReport+= strCARsFound; strReport+= "\n\nSkipped Dirs:\n\n"; strReport+= strSkippedDirs; strReport+= "\n\nSkipped Files:\n\n"; strReport+= strSkippedFiles; if (strTotalErrors.IsEmpty()) { strReport.Insert(0,"(No additional errors found)\n\n"); } else { strReport+= "\n\nAdditional errors will now be sent to Notepad!..."; gbCarwashErrorsOccured = true; } if (gbQueryGoAhead || gbCarwashErrorsOccured) { InfoBox(strReport); } if (!strTotalErrors.IsEmpty()) { strTotalErrors.Insert(0, "The following errors occured during CARWash...\n\n"); SendToNotePad(strTotalErrors, "carwash_errors.txt"); } } //#define sASSUMEPATH "q:\\quake\\baseq3" // strTGAsWithRedundantAlpha.Insert(0,"The following files are defined as 32-bit (ie with alpha), but the alpha channel is blank (ie all 255)...\n\n"); // SendToNotePad(strTGAsWithRedundantAlpha, "TGAs_With_Redundant_Alpha.txt"); ((CMainFrame*)AfxGetMainWnd())->StatusMessage("Ready"); } void CAssimilateDoc::OnCarWash() { strMustContainThisGLA.Empty(); OnCarWashActual(); } // creates as temp file, then spawns notepad with it... // bool SendToNotePad(LPCSTR psWhatever, LPCSTR psLocalFileName) { bool bReturn = false; LPCSTR psOutputFileName = va("%s\\%s",scGetTempPath(),psLocalFileName); FILE *handle = fopen(psOutputFileName,"wt"); if (handle) { fprintf(handle,psWhatever); fclose(handle); char sExecString[MAX_PATH]; sprintf(sExecString,"notepad %s",psOutputFileName); if (WinExec(sExecString, // LPCSTR lpCmdLine, // address of command line SW_SHOWNORMAL // UINT uCmdShow // window style for new application ) >31 // don't ask me, Windoze just uses >31 as OK in this call. ) { // ok... // bReturn = true; } else { ErrorBox("Unable to locate/run NOTEPAD on this machine!\n\n(let me know about this -Ste)"); } } else { ErrorBox(va("Unable to create file \"%s\" for notepad to use!",psOutputFileName)); } return bReturn; } // AFX OnXxxx calls need to be void return, but the validate needs to ret a bool for elsewhere, so... // void CAssimilateDoc::OnValidate() { Validate(!gbCarWash_DoingScan); } void CAssimilateDoc::OnValidateMultiLOD() { int iErrors = 0; for (int i=0; i< 1+EXTRA_LOD_LEVELS; i++) { iErrors += Validate(false, i)?0:1; } if (!iErrors) { InfoBox(va("Everything seems OK\n\n(Original + %d LOD levels checked)",EXTRA_LOD_LEVELS)); } } // checks for things that would stop the build process, such as missing ASE files, invalid loopframes, bad anim enums, etc, // and writes all the faults to a text file that it displays via launching notepad // bool CAssimilateDoc::Validate(bool bInfoBoxAllowed, int iLODLevel) { OnResequence(); int iNumModels = ghAssimilateView->GetDocument()->GetNumModels(); CModel* curModel = ghAssimilateView->GetDocument()->GetCurrentUserSelectedModel(); bool bValidateAll = false; int iFaults = 0; StartWait(); if (iNumModels) { CString sOutputTextFile; sOutputTextFile.Format("%s\\validation_faults%s.txt",scGetTempPath(),iLODLevel?va("_LOD%d",iLODLevel):""); FILE *hFile = fopen(sOutputTextFile,"wt"); if (hFile) { if (iNumModels>1) { if (!curModel) // >1 models, but none selected, so validate all { bValidateAll = true; } else { // >1 models, 1 selected, ask if we should do all... // bValidateAll = GetYesNo(va("Validate ALL models?\n\n( NO = model \"%s\" only )",curModel->GetName())); } } if (bValidateAll) { curModel = ghAssimilateView->GetDocument()->GetFirstModel(); } if (iLODLevel) { fprintf(hFile,"(LOD Level %d)\n",iLODLevel); } while (curModel) { fprintf(hFile,"\nModel: \"%s\"\n\n",curModel->GetName()); int iThisModelFaults = iFaults; if ( ( curModel->GetMakeSkelPath() && strlen(curModel->GetMakeSkelPath()) ) // || // ( curModel->GetSkelPath() && strlen(curModel->GetSkelPath()) ) ) { // ... then all is fine } else { // this is an error, UNLESS you have a GLA sequence... // if (!curModel->HasGLA()) { //fprintf(hFile, "Model must have either a 'skel' or 'makeskel' path\n ( Double-click on the model name in the treeview to edit )\n"); fprintf(hFile, "Model must have a 'makeskel' path\n ( Double-click on the top tree item's name (should be a folder), then click \"Makes it's own skeleton\" in the dialog )\n"); iFaults++; } } // // validate all sequences within this model... // int iGLACount = 0; int iXSICount = 0; CSequence* curSequence = curModel->GetFirstSequence(); while (curSequence) { // we'll need to check these counts after checking all sequences... // if (curSequence->IsGLA()) { iGLACount++; } else { iXSICount++; } #define SEQREPORT CString temp;temp.Format("Sequence \"%s\":",curSequence->GetName());while (temp.GetLength()<35)temp+=" "; // check 1, does the ASE file exist? (actually this is talking about XSIs/GLAs, but WTF... // CString nameASE = ((CAssimilateApp*)AfxGetApp())->GetQuakeDir(); nameASE+= curSequence->GetPath(); Filename_AccountForLOD(nameASE, iLODLevel); const int iNameSpacing=35; if (!FileExists(nameASE)) { if (!gbCarWash_DoingScan) // because this will only duplicate reports otherwise { SEQREPORT; temp += va(" ASE/XSI/GLA file not found: \"%s\"\n",nameASE); fprintf(hFile,temp); iFaults++; } } // new check, if this is an LOD ASE it must have the same framecount as the base (non-LOD) version... // if (iLODLevel) { // read this file's framecount... // int iStartFrame, iFrameCount, iFrameSpeed; curSequence->ReadASEHeader( nameASE, iStartFrame, iFrameCount, iFrameSpeed); // read basefile's framecount... // CString baseASEname = ((CAssimilateApp*)AfxGetApp())->GetQuakeDir(); baseASEname+= curSequence->GetPath(); int iStartFrameBASE, iFrameCountBASE, iFrameSpeedBASE; curSequence->ReadASEHeader( baseASEname, iStartFrameBASE, iFrameCountBASE, iFrameSpeedBASE); // same?... // if (iFrameCount != iFrameCountBASE) { SEQREPORT; temp += va(" (SERIOUS ERROR) base ASE has %d frames, but this LOD version has %d frames!\n",iFrameCountBASE,iFrameCount); fprintf(hFile,temp); iFaults++; } } // check 2, is the loopframe higher than the framecount of that sequence?... // if (curSequence->GetLoopFrame() >= curSequence->GetFrameCount()) { SEQREPORT; temp += va(" loopframe %d is illegal, max = %d\n",curSequence->GetLoopFrame(),curSequence->GetFrameCount()-1); fprintf(hFile,temp); iFaults++; } if (!curModel->IsGhoul2()) { // check 3, is the enum valid?... // if (curSequence->GetEnumType() == ET_INVALID) { SEQREPORT; temp += va(" invalid animation enum \"%s\"\n",curSequence->GetEnum()); fprintf(hFile,temp); iFaults++; } } int iEnumUsageCount = curModel->AnimEnumInUse(curSequence->GetEnum()); if (iEnumUsageCount>1) { SEQREPORT; temp += va(" animation enum \"%s\" is used %d times\n",curSequence->GetEnum(),iEnumUsageCount); fprintf(hFile,temp); iFaults++; } // a whole bunch of checks for the additional sequences... // if (!curSequence->IsGLA()) { for (int i=0; i<MAX_ADDITIONAL_SEQUENCES; i++) { CSequence *additionalSeq = curSequence->AdditionalSeqs[i]; #define ADDITIONALSEQREPORT CString temp;temp.Format("Sequence \"%s\": (Additional: \"%s\"):",curSequence->GetName(),additionalSeq->GetEnum());while (temp.GetLength()<60)temp+=" "; if (additionalSeq->AdditionalSequenceIsValid()) { // check for duplicate enum names... // int iEnumUsageCount = curModel->AnimEnumInUse(additionalSeq->GetEnum()); if (iEnumUsageCount>1) { ADDITIONALSEQREPORT; temp += va(" animation enum \"%s\" is used %d times\n",additionalSeq->GetEnum(),iEnumUsageCount); fprintf(hFile,temp); iFaults++; } // additional sequences must actually have some frames... // if (additionalSeq->GetFrameCount()<=0) { ADDITIONALSEQREPORT; temp += va(" a frame count of %d is illegal (min = 1)\n",additionalSeq->GetFrameCount()); fprintf(hFile,temp); iFaults++; } else { // the start/count range of this additional seq can't exceed it's master... // if (additionalSeq->GetStartFrame() + additionalSeq->GetFrameCount() > curSequence->GetFrameCount()) { ADDITIONALSEQREPORT; temp += va(" illegal start/count range of %d..%d exceeds master range of %d..%d\n", additionalSeq->GetStartFrame(), additionalSeq->GetStartFrame() + additionalSeq->GetFrameCount(), curSequence->GetStartFrame(), curSequence->GetStartFrame() + curSequence->GetFrameCount() ); fprintf(hFile,temp); iFaults++; } else { // loopframe of an additional seq must be within its own seq framecount... // if (additionalSeq->GetLoopFrame() >= additionalSeq->GetFrameCount()) { ADDITIONALSEQREPORT; temp += va(" loopframe %d is illegal, max is %d\n",additionalSeq->GetLoopFrame(),additionalSeq->GetFrameCount()-1); fprintf(hFile,temp); iFaults++; } } } } else { // is this additional sequence invalid because of being just empty or being bad?... // if (strlen(additionalSeq->GetEnum())) { // it's a bad sequence (probably because of its enum being deleted from anims.h since it was saved) // ADDITIONALSEQREPORT; temp += va(" this animation enum no longer exists in \"%s\"\n",sDEFAULT_ENUM_FILENAME); fprintf(hFile,temp); iFaults++; } } } } curSequence = curSequence->GetNext(); } // special GLA/XSI checks... // { if (iGLACount>1) { fprintf(hFile, "Model has more than one GLA file specified\n"); iFaults++; } if (iGLACount && iXSICount) { fprintf(hFile, "Model has both GLA and XSI files specified. Pick one method or the other\n"); iFaults++; } if (iGLACount && (curModel->GetMakeSkelPath() && strlen(curModel->GetMakeSkelPath())) ) { fprintf(hFile, "Model has both a GLA sequence and a '-makeskel' path, this is meaningless\n"); iFaults++; } /* if (iGLACount && (curModel->GetSkelPath() && strlen(curModel->GetSkelPath())) ) { fprintf(hFile, "Model has both a GLA sequence and a '-skel' path, you should probably blank out the 'skel' path\n"); iFaults++; } */ } if (iThisModelFaults == iFaults) { fprintf(hFile,"(ok)\n"); // just to be nice if reporting on >1 model... } else { fprintf(hFile,"\n(%d faults)\n",iFaults-iThisModelFaults); } curModel = curModel->GetNext(); if (!bValidateAll) break; }// while (curModel) fclose(hFile); if (iFaults) { // now run notepad.exe on the file we've just created... // CString sExecString; sExecString.Format("notepad %s",sOutputTextFile); if (WinExec(sExecString, // LPCSTR lpCmdLine, // address of command line SW_SHOWNORMAL // UINT uCmdShow // window style for new application ) >31 // don't ask me, Windoze just uses >31 as OK in this call. ) { // ok. } else { ErrorBox("Unable to locate/run NOTEPAD on this machine!\n\n(let me know about this -Ste)"); } } else { if (bInfoBoxAllowed) { InfoBox("Everything ok\n\n( All files exist, enums exist, and frames seem to be valid ranges )"); } } }// if (hFile) else { ErrorBox(va("Arrgh! Unable to create file '%s'!\n\n(let me know about this -Ste)",sOutputTextFile)); } }// if (iNumModels) EndWait(); return !iFaults; } void CAssimilateDoc::OnUpdateResequence(CCmdUI* pCmdUI) { pCmdUI->Enable(!!GetNumModels()); } void CAssimilateDoc::OnUpdateFileSave(CCmdUI* pCmdUI) { pCmdUI->Enable(!!GetNumModels()); } void CAssimilateDoc::OnUpdateFileSaveAs(CCmdUI* pCmdUI) { pCmdUI->Enable(!!GetNumModels()); } void CAssimilateDoc::OnUpdateExternal(CCmdUI* pCmdUI) { pCmdUI->Enable(!!GetNumModels()); } void CAssimilateDoc::OnUpdateValidate(CCmdUI* pCmdUI) { pCmdUI->Enable(!!GetNumModels()); } void CAssimilateDoc::OnUpdateBuild(CCmdUI* pCmdUI) { pCmdUI->Enable(!!GetNumModels()); } BOOL CAssimilateDoc::DoFileSave() { // sourcesafe stuff { /////////////////////////////////////////// // // some stuff so I can leave the code below untouched... // LPCSTR filename = (LPCSTR) m_strPathName; #define Sys_Printf(blah) StatusText(blah) // /////////////////////////////////////////// // // check it out first, if necessary... // if ( SS_FunctionsAvailable() ) { if ( SS_IsUnderSourceControl( filename ) ) { if ( SS_IsCheckedOut( filename )) { if ( !SS_IsCheckedOutByMe( filename )) { CString strCheckOuts; int iCount; if (SS_ListCheckOuts( filename, strCheckOuts, iCount )) { ErrorBox( va("File \"%s\" is checked out by:\n\n%s\n... so you can't save over it...\n\n... so you can't compile...\n\nTough luck matey!....(bwahahahaha!!!!!)",filename,(LPCSTR) strCheckOuts)); return false; } } else { Sys_Printf ("(You own this file under SourceSafe)\n"); } } else { if ( GetYesNo( va("The file \"%s\"\n\n...needs to be checked out so I can save over it\n\nProceed? ('No' will abort the save)",filename) )) { if (SS_CheckOut( filename )) { Sys_Printf ("(File checked out ok)\n"); } else { ASSERT(0); // I want to know if this ever happens Sys_Printf ("(Error during file checkout, aborting save\n"); return false; } } else { Sys_Printf ("(Checkout cancelled, aborting save\n"); return false; } } } else { Sys_Printf ("(This file is not under SourceSafe control)\n"); } } // now do seperate check for files that are still write-protected... // DWORD dw = GetFileAttributes( filename ); if (dw != 0xFFFFFFFF && ( dw & FILE_ATTRIBUTE_READONLY )) { // hmmm, still write protected... // if (SS_SetupOk()) { if (GetYesNo( va("The file \"%s\" is write-protected, but probably not because of SourceSafe, just as a safety thing.\n\n(Tell me if you believe this is wrong -Ste)\n\nDo you want me to un-writeprotect it so you can save over it? ('No' will abort the save)",filename ))) { if ( !SetFileAttributes( filename, dw&~FILE_ATTRIBUTE_READONLY) ) { ErrorBox("Failed to remove write protect, aborting..."); return false; } } else { Sys_Printf ("(File was not write-enabled, aborting save)"); return false; } } else { ErrorBox( va("The file \"%s\" is write-protected, but you don't appear to have SourceSafe set up properly on this machine, so I can't tell if the file is protected or just not checked out to you.\n\nIf you really want to edit this you'll have to write-enable it yourself (which I'm deliberately not offering to do for you here <g>)",filename)); } } } BOOL b = CDocument::DoFileSave(); if (b == TRUE) { // sourcesafe LPCSTR filename = (LPCSTR) m_strPathName; #define Sys_Printf(blah) StatusText(blah) if ( SS_FunctionsAvailable() ) { if ( SS_IsUnderSourceControl( filename )) { if ( SS_IsCheckedOutByMe( filename )) { // if ( SS_CheckIn( filename )) // { // Sys_Printf("(Checked in ok)\n"); // } // else // { // Sys_Printf("Error during CheckIn\n"); // } } else { ErrorBox( va("You do not have file \"%s\" checked out",filename)); } } else { // new bit, if it wasn't under SourceSafe, then ask if they want to add it... // if (GetYesNo(va("File \"%s\" is not under SourceSafe control, add to database?",filename))) { if ( SS_Add( filename )) { Sys_Printf("(File was added to SourceSafe Ok)\n"); // check it out as well... // if (SS_CheckOut( filename )) { Sys_Printf ("(File checked out ok)\n"); } else { ASSERT(0); // I want to know if this ever happens Sys_Printf ("( Error during file checkout! )\n"); } } else { ErrorBox( va("Error adding file \"%s\" to SourceSafe",filename)); } } } } } StatusText(NULL); return b; } BOOL CAssimilateDoc::OnSaveDocument(LPCTSTR lpszPathName) { CString strFileName = lpszPathName; Filename_AccountForLOD(strFileName, giLODLevelOverride); // this is actually junk now, should lose all this LODoverride stuff return CDocument::OnSaveDocument(strFileName); } // remember these two from session to session, maybe write to registry sometime?... // bool gbPreValidate = true; CString strInitialBuildPath = "models/players"; void CAssimilateDoc::OnEditBuildDependant() { CModel* curModel = m_modelList; if (curModel) { CString strStartDir = ((CAssimilateApp*)AfxGetApp())->GetQuakeDir(); if (!strStartDir.GetLength()) { // should never happen... // ErrorBox("Base path not known at this point. Prefs not setup?"); return; } LPCSTR psCurrentGLAName = curModel->GetMakeSkelPath(); if (psCurrentGLAName) { char sCurrentGLAName[1024]; strcpy(sCurrentGLAName,psCurrentGLAName); strMustContainThisGLA = sCurrentGLAName; CBuildAll dlgBuildAll(strInitialBuildPath, gbPreValidate); if (dlgBuildAll.DoModal() == IDOK) { dlgBuildAll.GetData(strInitialBuildPath, gbPreValidate); strStartDir += strInitialBuildPath; strStartDir.Replace("/","\\"); while (strStartDir.Replace("\\\\","\\")){} if (gbPreValidate) { gbQueryGoAhead = false; gpsCARWashDirOverride = (LPCSTR) strStartDir; OnCarWashActual(); gbQueryGoAhead = true; gpsCARWashDirOverride = NULL; if (gbCarwashErrorsOccured) { InfoBox("Build-All aborted because of errors"); return; } } ((CMainFrame*)AfxGetMainWnd())->StatusMessage("Validated Ok, building..."); //////////////////////// largely block-pasted from CarWash.... :-) CWaitCursor wait; strCARsFound.Empty(); iCARsFound = 0; strSkippedDirs.Empty(); strSkippedFiles.Empty(); // build up a list... // R_CheckCARs( strStartDir, 0, strMustContainThisGLA); //bool bBuildListOnly AlphaSortCARs(); // important to do them in alpha-order during build, because of "_humanoid" - type dirs. ((CMainFrame*)AfxGetMainWnd())->StatusMessage("Ready"); // ok, now ready to begin pass 2... // CString strReport; if (!iCARsFound) { ASSERT(0); strReport = "No suitable .CAR files found for processing!\n\n"; if (!strSkippedDirs.IsEmpty()) { strReport+= "Skipped Dirs:\n\n"; strReport+= strSkippedDirs; } if (!strSkippedFiles.IsEmpty()) { strReport+= "\n\nSkipped Files:\n\n"; strReport+= strSkippedFiles; } ErrorBox(strReport); } else { //---------------- gbCarWash_DoingScan = true; strCarWashErrors.Empty(); //---------------- CString strTotalErrors; strReport = "Processed files:\n\n"; for (int i=0; i<iCARsFound; i++) { CString strThisFile = strCARsFound; int iLoc = strThisFile.Find("\n"); if (iLoc>=0) { strThisFile = strThisFile.Left(iLoc); strCARsFound= strCARsFound.Mid(iLoc+1); strReport += strThisFile + "\n"; ((CMainFrame*)AfxGetMainWnd())->StatusMessage(va("Scanning File %d/%d: %s",i+1,iCARsFound,(LPCSTR)strThisFile)); if (1)//FileUsesGLAReference(strThisFile, sCurrentGLAName)) { OnNewDocument(); if (OnOpenDocument_Actual(strThisFile, false)) { if (gbParseError) { strTotalErrors += va("\nParse error in file \"%s\"\n",(LPCSTR)strThisFile); break; } else { m_strPathName = strThisFile; // fucking stupid MFC doesn't set this!!!!!!!!!!!!! bool bSuccess = Build( false, // bool bAllowedToShowSuccessBox, 0, // int iLODLevel true // bool bSkipSave ); if (!strCarWashErrors.IsEmpty()) { // "something is wrong..." :-) // strTotalErrors += va("\nError in file \"%s\":\n\n%s\n",strThisFile,strCarWashErrors); } strCarWashErrors.Empty(); if (!bSuccess) break; } } else { strTotalErrors += va("\nUnable to open file \"%s\"\n",(LPCSTR)strThisFile); break; } } else { // this CAR file doesn't use the current GLA name, so ignore it } } else { ASSERT(0); strThisFile.Insert(0,"I fucked up, the following line didn't seem to have a CR: (tell me! -Ste)\n\n"); ErrorBox(strThisFile); } } //---------------- gbCarWash_DoingScan = false; //---------------- OnNewDocument(); // trash whatever was loaded last // strReport = "Processed files:\n\n"; // strReport+= strCARsFound; strReport+= "\n\nSkipped Dirs:\n\n"; strReport+= strSkippedDirs; strReport+= "\n\nSkipped Files:\n\n"; strReport+= strSkippedFiles; if (strTotalErrors.IsEmpty()) { strReport.Insert(0,"(No additional errors found)\n\n"); } else { strReport+= "\n\nAdditional errors will now be sent to Notepad!..."; } InfoBox(strReport); if (!strTotalErrors.IsEmpty()) { strTotalErrors.Insert(0, "The following errors occured during build...\n\n"); SendToNotePad(strTotalErrors, "build_errors.txt"); } } } } else { ErrorBox("Currently loaded model does not make a skeleton, so there are no dependants\n\n\nDUH!!"); } } else { ErrorBox("No model loaded to build dependants of!\n\n\nDUH!!"); } ((CMainFrame*)AfxGetMainWnd())->StatusMessage("Ready"); } void CAssimilateDoc::OnEditBuildall() { // if (!GetYesNo("Safety Feature: Do you really want to rebuild every CAR file in the whole game?")) // return; // validity-check... // CString strStartDir = ((CAssimilateApp*)AfxGetApp())->GetQuakeDir(); if (!strStartDir.GetLength()) { ErrorBox("Base path not known at this point. Prefs not setup?"); return; } CBuildAll dlgBuildAll(strInitialBuildPath, gbPreValidate); if (dlgBuildAll.DoModal() == IDOK) { dlgBuildAll.GetData(strInitialBuildPath, gbPreValidate); strStartDir += strInitialBuildPath; strStartDir.Replace("/","\\"); while (strStartDir.Replace("\\\\","\\")){} if (gbPreValidate) { gbQueryGoAhead = false; gpsCARWashDirOverride = (LPCSTR) strStartDir; OnCarWash(); gbQueryGoAhead = true; gpsCARWashDirOverride = NULL; if (gbCarwashErrorsOccured) { InfoBox("Build-All aborted because of errors"); return; } } ((CMainFrame*)AfxGetMainWnd())->StatusMessage("Validated Ok, building..."); //////////////////////// largely block-pasted from CarWash.... :-) CWaitCursor wait; strCARsFound.Empty(); iCARsFound = 0; strSkippedDirs.Empty(); strSkippedFiles.Empty(); // build up a list... // R_CheckCARs( strStartDir, 0, "" ); //bool bBuildListOnly AlphaSortCARs(); // important to do them in alpha-order during build, because of "_humanoid" - type dirs. ((CMainFrame*)AfxGetMainWnd())->StatusMessage("Ready"); // ok, now ready to begin pass 2... // CString strReport; if (!iCARsFound) { ASSERT(0); strReport = "No suitable .CAR files found for processing!\n\n"; if (!strSkippedDirs.IsEmpty()) { strReport+= "Skipped Dirs:\n\n"; strReport+= strSkippedDirs; } if (!strSkippedFiles.IsEmpty()) { strReport+= "\n\nSkipped Files:\n\n"; strReport+= strSkippedFiles; } ErrorBox(strReport); } else { //---------------- gbCarWash_DoingScan = true; strCarWashErrors.Empty(); //---------------- CString strTotalErrors; strReport = "Processed files:\n\n"; for (int i=0; i<iCARsFound; i++) { CString strThisFile = strCARsFound; int iLoc = strThisFile.Find("\n"); if (iLoc>=0) { strThisFile = strThisFile.Left(iLoc); strCARsFound= strCARsFound.Mid(iLoc+1); strReport += strThisFile + "\n"; ((CMainFrame*)AfxGetMainWnd())->StatusMessage(va("Scanning File %d/%d: %s",i+1,iCARsFound,(LPCSTR)strThisFile)); OnNewDocument(); if (OnOpenDocument_Actual(strThisFile, false)) { if (gbParseError) { strTotalErrors += va("\nParse error in file \"%s\"\n",(LPCSTR)strThisFile); break; } else { m_strPathName = strThisFile; // fucking stupid MFC doesn't set this!!!!!!!!!!!!! bool bSuccess = Build( false, // bool bAllowedToShowSuccessBox, 0, // int iLODLevel true // bool bSkipSave ); if (!strCarWashErrors.IsEmpty()) { // "something is wrong..." :-) // strTotalErrors += va("\nError in file \"%s\":\n\n%s\n",strThisFile,strCarWashErrors); } strCarWashErrors.Empty(); if (!bSuccess) break; } } else { strTotalErrors += va("\nUnable to open file \"%s\"\n",(LPCSTR)strThisFile); break; } } else { ASSERT(0); strThisFile.Insert(0,"I fucked up, the following line didn't seem to have a CR: (tell me! -Ste)\n\n"); ErrorBox(strThisFile); } } //---------------- gbCarWash_DoingScan = false; //---------------- OnNewDocument(); // trash whatever was loaded last // strReport = "Processed files:\n\n"; // strReport+= strCARsFound; strReport+= "\n\nSkipped Dirs:\n\n"; strReport+= strSkippedDirs; strReport+= "\n\nSkipped Files:\n\n"; strReport+= strSkippedFiles; if (strTotalErrors.IsEmpty()) { strReport.Insert(0,"(No additional errors found)\n\n"); } else { strReport+= "\n\nAdditional errors will now be sent to Notepad!..."; } InfoBox(strReport); if (!strTotalErrors.IsEmpty()) { strTotalErrors.Insert(0, "The following errors occured during build...\n\n"); SendToNotePad(strTotalErrors, "build_errors.txt"); } } } ((CMainFrame*)AfxGetMainWnd())->StatusMessage("Ready"); } void SS_DisposingOfCurrent(LPCSTR psFileName, bool bDirty) { if (psFileName[0]) { LPCSTR filename = psFileName; // compile laziness #undef Sys_Printf #define Sys_Printf(blah) if ( SS_FunctionsAvailable() ) { if ( SS_IsUnderSourceControl( filename ) ) { if ( SS_IsCheckedOutByMe( filename )) { if (bDirty) { // if 'need_save' then the user has clicked ok-to-lose-changes, so... // if ( GetYesNo( va("Since you've decided to lose changes on the file:\n\n\"%s\"\n\n...do you want to Undo Checkout as well?",filename))) { if (SS_UndoCheckOut( filename )) { Sys_Printf ("(Undo Checkout performed on map)\n"); } else { ErrorBox("Undo Checkout failed!\n"); } } } else { // if !'need_save' here then the user has saved this out already, so prompt for check in... // if ( GetYesNo( va("Since you've finished with the file:\n\n\"%s\"\n\n...do you want to do a Check In?",filename))) { if ( SS_CheckIn( filename )) { //Sys_Printf ("(CheckIn performed on map)\n"); } else { ErrorBox("CheckIn failed!\n"); } } } } } } } } BOOL CAssimilateDoc::OnOpenDocument_Actual(LPCTSTR lpszPathName, bool bCheckOut) { SS_DisposingOfCurrent(m_strPathName, !!IsModified()); if (bCheckOut) { // checkout the new file? LPCSTR filename = lpszPathName; // compile-laziness :-) if ( SS_FunctionsAvailable() ) { if ( SS_IsUnderSourceControl( filename ) ) { if ( SS_IsCheckedOut( filename )) { if ( !SS_IsCheckedOutByMe( filename )) { CString strCheckOuts; int iCount; if (SS_ListCheckOuts( filename, strCheckOuts, iCount )) { if (!GetYesNo( va("Warning: File \"%s\" is checked out by:\n\n%s\n... Continue loading? ",filename,(LPCSTR) strCheckOuts))) { return FALSE; } } } else { //InfoBox ("(You own this file under SourceSafe)\n"); } } else { if ( GetYesNo( va("The file \"%s\"\n\n...is under SourceSafe control, check it out now?",filename) )) { if (SS_CheckOut( filename )) { //InfoBox ("(File checked out ok)\n"); } else { WarningBox( va("( Problem encountered during check out of file \"%s\" )",filename) ); } } } } else { //InfoBox ("(This file is not under SourceSafe control)\n"); } } } if (!CDocument::OnOpenDocument(lpszPathName)) return FALSE; return TRUE; } BOOL CAssimilateDoc::OnOpenDocument(LPCTSTR lpszPathName) { return OnOpenDocument_Actual(lpszPathName, true) ; } void CAssimilateDoc::OnCloseDocument() { SS_DisposingOfCurrent(m_strPathName, !!IsModified()); CDocument::OnCloseDocument(); } void CAssimilateDoc::OnViewFramedetailsonadditionalsequences() { gbViewFrameDetails_Additional = !gbViewFrameDetails_Additional; UpdateAllViews(NULL, AS_FILESUPDATED, NULL); } void CAssimilateDoc::OnUpdateViewFramedetailsonadditionalsequences(CCmdUI* pCmdUI) { pCmdUI->SetCheck(gbViewFrameDetails_Additional); } void CAssimilateDoc::OnUpdateEditBuilddependant(CCmdUI* pCmdUI) { pCmdUI->Enable(!!GetNumModels()); } bool RunApp(LPCSTR psAppCommand) { CString strExec = psAppCommand; // eg "start q:\\bin_nt\\behaved.exe"; strExec.Replace("/","\\"); // otherwise it only works under NT... char sBatchFilename[512]; GetTempPath(sizeof(sBatchFilename), sBatchFilename); strcat(sBatchFilename,"~temp.bat"); FILE *handle = fopen(sBatchFilename,"wt"); fprintf(handle,strExec); fprintf(handle,"\n"); fclose(handle); STARTUPINFO startupinfo; PROCESS_INFORMATION ProcessInformation; GetStartupInfo (&startupinfo); BOOL ret = CreateProcess(sBatchFilename, //batpath, // pointer to name of executable module NULL, // pointer to command line string NULL, // pointer to process security attributes NULL, // pointer to thread security attributes FALSE, // handle inheritance flag 0 /*DETACHED_PROCESS*/, // creation flags NULL, // pointer to new environment block NULL, // pointer to current directory name &startupinfo, // pointer to STARTUPINFO &ProcessInformation // pointer to PROCESS_INFORMATION ); // remove(sBatchFilename); // if you do this, the CreateProcess fails, presumably it needs it for a few seconds return !!ret; } void CAssimilateDoc::OnEditLaunchmodviewoncurrent() { char sExecString[MAX_PATH]; sprintf(sExecString,"start %s.glm",Filename_WithoutExt(m_strPathName)); if (RunApp(sExecString)) { // ok... // } else { ErrorBox(va("CreateProcess() call \"%s\" failed!\n\n(let me know about this -Ste)",sExecString)); } } void CAssimilateDoc::OnUpdateEditLaunchmodviewoncurrent(CCmdUI* pCmdUI) { pCmdUI->Enable(!!GetNumModels()); }
gpl-2.0
AndreyPopovNew/asuswrt-merlin-rt-n
release/src/router/gdb/bfd/pe-mcore.c
27
1276
/* BFD back-end for MCore PECOFF files. Copyright 1999, 2002, 2007 Free Software Foundation, Inc. This file is part of BFD, the Binary File Descriptor library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ #include "sysdep.h" #include "bfd.h" #ifndef TARGET_BIG_SYM #define TARGET_BIG_SYM mcore_pe_big_vec #define TARGET_BIG_NAME "pe-mcore-big" #define TARGET_LITTLE_SYM mcore_pe_little_vec #define TARGET_LITTLE_NAME "pe-mcore-little" #endif #define COFF_WITH_PE #define PCRELOFFSET TRUE #define COFF_LONG_SECTION_NAMES #define MCORE_PE #include "coff-mcore.c"
gpl-2.0
axDev-toolchain/gcc-4.8
libgfortran/generated/rrspacing_r4.c
27
1645
/* Implementation of the RRSPACING intrinsic Copyright (C) 2006-2013 Free Software Foundation, Inc. Contributed by Steven G. Kargl <kargl@gcc.gnu.org> This file is part of the GNU Fortran 95 runtime library (libgfortran). Libgfortran is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. Libgfortran is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "libgfortran.h" #define MATHFUNC(funcname) funcname ## f #if defined (HAVE_GFC_REAL_4) && defined (HAVE_FABSF) && defined (HAVE_FREXPF) extern GFC_REAL_4 rrspacing_r4 (GFC_REAL_4 s, int p); export_proto(rrspacing_r4); GFC_REAL_4 rrspacing_r4 (GFC_REAL_4 s, int p) { int e; GFC_REAL_4 x; x = MATHFUNC(fabs) (s); if (x == 0.) return 0.; MATHFUNC(frexp) (s, &e); #if defined (HAVE_LDEXPF) return MATHFUNC(ldexp) (x, p - e); #else return MATHFUNC(scalbn) (x, p - e); #endif } #endif
gpl-2.0
emceethemouth/kernel_android
mm/mempolicy.c
27
73053
/* * Simple NUMA memory policy for the Linux kernel. * * Copyright 2003,2004 Andi Kleen, SuSE Labs. * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. * Subject to the GNU Public License, version 2. * * NUMA policy allows the user to give hints in which node(s) memory should * be allocated. * * Support four policies per VMA and per process: * * The VMA policy has priority over the process policy for a page fault. * * interleave Allocate memory interleaved over a set of nodes, * with normal fallback if it fails. * For VMA based allocations this interleaves based on the * offset into the backing object or offset into the mapping * for anonymous memory. For process policy an process counter * is used. * * bind Only allocate memory on a specific set of nodes, * no fallback. * FIXME: memory is allocated starting with the first node * to the last. It would be better if bind would truly restrict * the allocation to memory nodes instead * * preferred Try a specific node first before normal fallback. * As a special case NUMA_NO_NODE here means do the allocation * on the local CPU. This is normally identical to default, * but useful to set in a VMA when you have a non default * process policy. * * default Allocate on the local node first, or when on a VMA * use the process policy. This is what Linux always did * in a NUMA aware kernel and still does by, ahem, default. * * The process policy is applied for most non interrupt memory allocations * in that process' context. Interrupts ignore the policies and always * try to allocate on the local CPU. The VMA policy is only applied for memory * allocations for a VMA in the VM. * * Currently there are a few corner cases in swapping where the policy * is not applied, but the majority should be handled. When process policy * is used it is not remembered over swap outs/swap ins. * * Only the highest zone in the zone hierarchy gets policied. Allocations * requesting a lower zone just use default policy. This implies that * on systems with highmem kernel lowmem allocation don't get policied. * Same with GFP_DMA allocations. * * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between * all users and remembered even when nobody has memory mapped. */ /* Notebook: fix mmap readahead to honour policy and enable policy for any page cache object statistics for bigpages global policy for page cache? currently it uses process policy. Requires first item above. handle mremap for shared memory (currently ignored for the policy) grows down? make bind policy root only? It can trigger oom much faster and the kernel is not always grateful with that. */ #include <linux/mempolicy.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/hugetlb.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/nodemask.h> #include <linux/cpuset.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/export.h> #include <linux/nsproxy.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/compat.h> #include <linux/swap.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/migrate.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/ctype.h> #include <linux/mm_inline.h> #include <linux/mmu_notifier.h> #include <asm/tlbflush.h> #include <asm/uaccess.h> #include <linux/random.h> #include "internal.h" /* Internal flags */ #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ static struct kmem_cache *policy_cache; static struct kmem_cache *sn_cache; /* Highest zone. An specific allocation for a zone below that is not policied. */ enum zone_type policy_zone = 0; /* * run-time system-wide default policy => local allocation */ static struct mempolicy default_policy = { .refcnt = ATOMIC_INIT(1), /* never free it */ .mode = MPOL_PREFERRED, .flags = MPOL_F_LOCAL, }; static struct mempolicy preferred_node_policy[MAX_NUMNODES]; static struct mempolicy *get_task_policy(struct task_struct *p) { struct mempolicy *pol = p->mempolicy; int node; if (!pol) { node = numa_node_id(); if (node != NUMA_NO_NODE) pol = &preferred_node_policy[node]; /* preferred_node_policy is not initialised early in boot */ if (!pol->mode) pol = NULL; } return pol; } static const struct mempolicy_operations { int (*create)(struct mempolicy *pol, const nodemask_t *nodes); /* * If read-side task has no lock to protect task->mempolicy, write-side * task will rebind the task->mempolicy by two step. The first step is * setting all the newly nodes, and the second step is cleaning all the * disallowed nodes. In this way, we can avoid finding no node to alloc * page. * If we have a lock to protect task->mempolicy in read-side, we do * rebind directly. * * step: * MPOL_REBIND_ONCE - do rebind work at once * MPOL_REBIND_STEP1 - set all the newly nodes * MPOL_REBIND_STEP2 - clean all the disallowed nodes */ void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes, enum mpol_rebind_step step); } mpol_ops[MPOL_MAX]; /* Check that the nodemask contains at least one populated zone */ static int is_valid_nodemask(const nodemask_t *nodemask) { return nodes_intersects(*nodemask, node_states[N_MEMORY]); } static inline int mpol_store_user_nodemask(const struct mempolicy *pol) { return pol->flags & MPOL_MODE_FLAGS; } static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, const nodemask_t *rel) { nodemask_t tmp; nodes_fold(tmp, *orig, nodes_weight(*rel)); nodes_onto(*ret, tmp, *rel); } static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) { if (nodes_empty(*nodes)) return -EINVAL; pol->v.nodes = *nodes; return 0; } static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) { if (!nodes) pol->flags |= MPOL_F_LOCAL; /* local allocation */ else if (nodes_empty(*nodes)) return -EINVAL; /* no allowed nodes */ else pol->v.preferred_node = first_node(*nodes); return 0; } static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) { if (!is_valid_nodemask(nodes)) return -EINVAL; pol->v.nodes = *nodes; return 0; } /* * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if * any, for the new policy. mpol_new() has already validated the nodes * parameter with respect to the policy mode and flags. But, we need to * handle an empty nodemask with MPOL_PREFERRED here. * * Must be called holding task's alloc_lock to protect task's mems_allowed * and mempolicy. May also be called holding the mmap_semaphore for write. */ static int mpol_set_nodemask(struct mempolicy *pol, const nodemask_t *nodes, struct nodemask_scratch *nsc) { int ret; /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ if (pol == NULL) return 0; /* Check N_MEMORY */ nodes_and(nsc->mask1, cpuset_current_mems_allowed, node_states[N_MEMORY]); VM_BUG_ON(!nodes); if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) nodes = NULL; /* explicit local allocation */ else { if (pol->flags & MPOL_F_RELATIVE_NODES) mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1); else nodes_and(nsc->mask2, *nodes, nsc->mask1); if (mpol_store_user_nodemask(pol)) pol->w.user_nodemask = *nodes; else pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; } if (nodes) ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); else ret = mpol_ops[pol->mode].create(pol, NULL); return ret; } /* * This function just creates a new policy, does some check and simple * initialization. You must invoke mpol_set_nodemask() to set nodes. */ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, nodemask_t *nodes) { struct mempolicy *policy; pr_debug("setting mode %d flags %d nodes[0] %lx\n", mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); if (mode == MPOL_DEFAULT) { if (nodes && !nodes_empty(*nodes)) return ERR_PTR(-EINVAL); return NULL; } VM_BUG_ON(!nodes); /* * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). * All other modes require a valid pointer to a non-empty nodemask. */ if (mode == MPOL_PREFERRED) { if (nodes_empty(*nodes)) { if (((flags & MPOL_F_STATIC_NODES) || (flags & MPOL_F_RELATIVE_NODES))) return ERR_PTR(-EINVAL); } } else if (mode == MPOL_LOCAL) { if (!nodes_empty(*nodes)) return ERR_PTR(-EINVAL); mode = MPOL_PREFERRED; } else if (nodes_empty(*nodes)) return ERR_PTR(-EINVAL); policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!policy) return ERR_PTR(-ENOMEM); atomic_set(&policy->refcnt, 1); policy->mode = mode; policy->flags = flags; return policy; } /* Slow path of a mpol destructor. */ void __mpol_put(struct mempolicy *p) { if (!atomic_dec_and_test(&p->refcnt)) return; kmem_cache_free(policy_cache, p); } static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes, enum mpol_rebind_step step) { } /* * step: * MPOL_REBIND_ONCE - do rebind work at once * MPOL_REBIND_STEP1 - set all the newly nodes * MPOL_REBIND_STEP2 - clean all the disallowed nodes */ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes, enum mpol_rebind_step step) { nodemask_t tmp; if (pol->flags & MPOL_F_STATIC_NODES) nodes_and(tmp, pol->w.user_nodemask, *nodes); else if (pol->flags & MPOL_F_RELATIVE_NODES) mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); else { /* * if step == 1, we use ->w.cpuset_mems_allowed to cache the * result */ if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) { nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed, *nodes); pol->w.cpuset_mems_allowed = step ? tmp : *nodes; } else if (step == MPOL_REBIND_STEP2) { tmp = pol->w.cpuset_mems_allowed; pol->w.cpuset_mems_allowed = *nodes; } else BUG(); } if (nodes_empty(tmp)) tmp = *nodes; if (step == MPOL_REBIND_STEP1) nodes_or(pol->v.nodes, pol->v.nodes, tmp); else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2) pol->v.nodes = tmp; else BUG(); if (!node_isset(current->il_next, tmp)) { current->il_next = next_node(current->il_next, tmp); if (current->il_next >= MAX_NUMNODES) current->il_next = first_node(tmp); if (current->il_next >= MAX_NUMNODES) current->il_next = numa_node_id(); } } static void mpol_rebind_preferred(struct mempolicy *pol, const nodemask_t *nodes, enum mpol_rebind_step step) { nodemask_t tmp; if (pol->flags & MPOL_F_STATIC_NODES) { int node = first_node(pol->w.user_nodemask); if (node_isset(node, *nodes)) { pol->v.preferred_node = node; pol->flags &= ~MPOL_F_LOCAL; } else pol->flags |= MPOL_F_LOCAL; } else if (pol->flags & MPOL_F_RELATIVE_NODES) { mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); pol->v.preferred_node = first_node(tmp); } else if (!(pol->flags & MPOL_F_LOCAL)) { pol->v.preferred_node = node_remap(pol->v.preferred_node, pol->w.cpuset_mems_allowed, *nodes); pol->w.cpuset_mems_allowed = *nodes; } } /* * mpol_rebind_policy - Migrate a policy to a different set of nodes * * If read-side task has no lock to protect task->mempolicy, write-side * task will rebind the task->mempolicy by two step. The first step is * setting all the newly nodes, and the second step is cleaning all the * disallowed nodes. In this way, we can avoid finding no node to alloc * page. * If we have a lock to protect task->mempolicy in read-side, we do * rebind directly. * * step: * MPOL_REBIND_ONCE - do rebind work at once * MPOL_REBIND_STEP1 - set all the newly nodes * MPOL_REBIND_STEP2 - clean all the disallowed nodes */ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask, enum mpol_rebind_step step) { if (!pol) return; if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE && nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) return; if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING)) return; if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING)) BUG(); if (step == MPOL_REBIND_STEP1) pol->flags |= MPOL_F_REBINDING; else if (step == MPOL_REBIND_STEP2) pol->flags &= ~MPOL_F_REBINDING; else if (step >= MPOL_REBIND_NSTEP) BUG(); mpol_ops[pol->mode].rebind(pol, newmask, step); } /* * Wrapper for mpol_rebind_policy() that just requires task * pointer, and updates task mempolicy. * * Called with task's alloc_lock held. */ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, enum mpol_rebind_step step) { mpol_rebind_policy(tsk->mempolicy, new, step); } /* * Rebind each vma in mm to new nodemask. * * Call holding a reference to mm. Takes mm->mmap_sem during call. */ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) { struct vm_area_struct *vma; down_write(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); up_write(&mm->mmap_sem); } static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { [MPOL_DEFAULT] = { .rebind = mpol_rebind_default, }, [MPOL_INTERLEAVE] = { .create = mpol_new_interleave, .rebind = mpol_rebind_nodemask, }, [MPOL_PREFERRED] = { .create = mpol_new_preferred, .rebind = mpol_rebind_preferred, }, [MPOL_BIND] = { .create = mpol_new_bind, .rebind = mpol_rebind_nodemask, }, }; static void migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags); /* Scan through pages checking if pages follow certain conditions. */ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { pte_t *orig_pte; pte_t *pte; spinlock_t *ptl; orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); do { struct page *page; int nid; if (!pte_present(*pte)) continue; page = vm_normal_page(vma, addr, *pte); if (!page) continue; /* * vm_normal_page() filters out zero pages, but there might * still be PageReserved pages to skip, perhaps in a VDSO. */ if (PageReserved(page)) continue; nid = page_to_nid(page); if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) continue; if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) migrate_page_add(page, private, flags); else break; } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap_unlock(orig_pte, ptl); return addr != end; } static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); split_huge_page_pmd(vma, addr, pmd); if (pmd_none_or_trans_huge_or_clear_bad(pmd)) continue; if (check_pte_range(vma, pmd, addr, next, nodes, flags, private)) return -EIO; } while (pmd++, addr = next, addr != end); return 0; } static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { pud_t *pud; unsigned long next; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; if (check_pmd_range(vma, pud, addr, next, nodes, flags, private)) return -EIO; } while (pud++, addr = next, addr != end); return 0; } static inline int check_pgd_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { pgd_t *pgd; unsigned long next; pgd = pgd_offset(vma->vm_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; if (check_pud_range(vma, pgd, addr, next, nodes, flags, private)) return -EIO; } while (pgd++, addr = next, addr != end); return 0; } #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE /* * This is used to mark a range of virtual addresses to be inaccessible. * These are later cleared by a NUMA hinting fault. Depending on these * faults, pages may be migrated for better NUMA placement. * * This is assuming that NUMA faults are handled using PROT_NONE. If * an architecture makes a different choice, it will need further * changes to the core. */ unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) { int nr_updated; BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE); nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1); if (nr_updated) count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); return nr_updated; } #else static unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) { return 0; } #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ /* * Check if all pages in a range are on a set of nodes. * If pagelist != NULL then isolate pages from the LRU and * put them on the pagelist. */ static struct vm_area_struct * check_range(struct mm_struct *mm, unsigned long start, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { int err; struct vm_area_struct *first, *vma, *prev; first = find_vma(mm, start); if (!first) return ERR_PTR(-EFAULT); prev = NULL; for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { unsigned long endvma = vma->vm_end; if (endvma > end) endvma = end; if (vma->vm_start > start) start = vma->vm_start; if (!(flags & MPOL_MF_DISCONTIG_OK)) { if (!vma->vm_next && vma->vm_end < end) return ERR_PTR(-EFAULT); if (prev && prev->vm_end < vma->vm_start) return ERR_PTR(-EFAULT); } if (is_vm_hugetlb_page(vma)) goto next; if (flags & MPOL_MF_LAZY) { change_prot_numa(vma, start, endvma); goto next; } if ((flags & MPOL_MF_STRICT) || ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && vma_migratable(vma))) { err = check_pgd_range(vma, start, endvma, nodes, flags, private); if (err) { first = ERR_PTR(err); break; } } next: prev = vma; } return first; } /* * Apply policy to a single VMA * This must be called with the mmap_sem held for writing. */ static int vma_replace_policy(struct vm_area_struct *vma, struct mempolicy *pol) { int err; struct mempolicy *old; struct mempolicy *new; pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_ops, vma->vm_file, vma->vm_ops ? vma->vm_ops->set_policy : NULL); new = mpol_dup(pol); if (IS_ERR(new)) return PTR_ERR(new); if (vma->vm_ops && vma->vm_ops->set_policy) { err = vma->vm_ops->set_policy(vma, new); if (err) goto err_out; } old = vma->vm_policy; vma->vm_policy = new; /* protected by mmap_sem */ mpol_put(old); return 0; err_out: mpol_put(new); return err; } /* Step 2: apply policy to a range and do splits. */ static int mbind_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct mempolicy *new_pol) { struct vm_area_struct *next; struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; pgoff_t pgoff; unsigned long vmstart; unsigned long vmend; vma = find_vma(mm, start); if (!vma || vma->vm_start > start) return -EFAULT; prev = vma->vm_prev; if (start > vma->vm_start) prev = vma; for (; vma && vma->vm_start < end; prev = vma, vma = next) { next = vma->vm_next; vmstart = max(start, vma->vm_start); vmend = min(end, vma->vm_end); if (mpol_equal(vma_policy(vma), new_pol)) continue; pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT); prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, new_pol, vma_get_anon_name(vma)); if (prev) { vma = prev; next = vma->vm_next; continue; } if (vma->vm_start != vmstart) { err = split_vma(vma->vm_mm, vma, vmstart, 1); if (err) goto out; } if (vma->vm_end != vmend) { err = split_vma(vma->vm_mm, vma, vmend, 0); if (err) goto out; } err = vma_replace_policy(vma, new_pol); if (err) goto out; } out: return err; } /* * Update task->flags PF_MEMPOLICY bit: set iff non-default * mempolicy. Allows more rapid checking of this (combined perhaps * with other PF_* flag bits) on memory allocation hot code paths. * * If called from outside this file, the task 'p' should -only- be * a newly forked child not yet visible on the task list, because * manipulating the task flags of a visible task is not safe. * * The above limitation is why this routine has the funny name * mpol_fix_fork_child_flag(). * * It is also safe to call this with a task pointer of current, * which the static wrapper mpol_set_task_struct_flag() does, * for use within this file. */ void mpol_fix_fork_child_flag(struct task_struct *p) { if (p->mempolicy) p->flags |= PF_MEMPOLICY; else p->flags &= ~PF_MEMPOLICY; } static void mpol_set_task_struct_flag(void) { mpol_fix_fork_child_flag(current); } /* Set the process memory policy */ static long do_set_mempolicy(unsigned short mode, unsigned short flags, nodemask_t *nodes) { struct mempolicy *new, *old; struct mm_struct *mm = current->mm; NODEMASK_SCRATCH(scratch); int ret; if (!scratch) return -ENOMEM; new = mpol_new(mode, flags, nodes); if (IS_ERR(new)) { ret = PTR_ERR(new); goto out; } /* * prevent changing our mempolicy while show_numa_maps() * is using it. * Note: do_set_mempolicy() can be called at init time * with no 'mm'. */ if (mm) down_write(&mm->mmap_sem); task_lock(current); ret = mpol_set_nodemask(new, nodes, scratch); if (ret) { task_unlock(current); if (mm) up_write(&mm->mmap_sem); mpol_put(new); goto out; } old = current->mempolicy; current->mempolicy = new; mpol_set_task_struct_flag(); if (new && new->mode == MPOL_INTERLEAVE && nodes_weight(new->v.nodes)) current->il_next = first_node(new->v.nodes); task_unlock(current); if (mm) up_write(&mm->mmap_sem); mpol_put(old); ret = 0; out: NODEMASK_SCRATCH_FREE(scratch); return ret; } /* * Return nodemask for policy for get_mempolicy() query * * Called with task's alloc_lock held */ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) { nodes_clear(*nodes); if (p == &default_policy) return; switch (p->mode) { case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: *nodes = p->v.nodes; break; case MPOL_PREFERRED: if (!(p->flags & MPOL_F_LOCAL)) node_set(p->v.preferred_node, *nodes); /* else return empty node mask for local allocation */ break; default: BUG(); } } static int lookup_node(struct mm_struct *mm, unsigned long addr) { struct page *p; int err; err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); if (err >= 0) { err = page_to_nid(p); put_page(p); } return err; } /* Retrieve NUMA policy */ static long do_get_mempolicy(int *policy, nodemask_t *nmask, unsigned long addr, unsigned long flags) { int err; struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; struct mempolicy *pol = current->mempolicy; if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) return -EINVAL; if (flags & MPOL_F_MEMS_ALLOWED) { if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) return -EINVAL; *policy = 0; /* just so it's initialized */ task_lock(current); *nmask = cpuset_current_mems_allowed; task_unlock(current); return 0; } if (flags & MPOL_F_ADDR) { /* * Do NOT fall back to task policy if the * vma/shared policy at addr is NULL. We * want to return MPOL_DEFAULT in this case. */ down_read(&mm->mmap_sem); vma = find_vma_intersection(mm, addr, addr+1); if (!vma) { up_read(&mm->mmap_sem); return -EFAULT; } if (vma->vm_ops && vma->vm_ops->get_policy) pol = vma->vm_ops->get_policy(vma, addr); else pol = vma->vm_policy; } else if (addr) return -EINVAL; if (!pol) pol = &default_policy; /* indicates default behavior */ if (flags & MPOL_F_NODE) { if (flags & MPOL_F_ADDR) { err = lookup_node(mm, addr); if (err < 0) goto out; *policy = err; } else if (pol == current->mempolicy && pol->mode == MPOL_INTERLEAVE) { *policy = current->il_next; } else { err = -EINVAL; goto out; } } else { *policy = pol == &default_policy ? MPOL_DEFAULT : pol->mode; /* * Internal mempolicy flags must be masked off before exposing * the policy to userspace. */ *policy |= (pol->flags & MPOL_MODE_FLAGS); } if (vma) { up_read(&current->mm->mmap_sem); vma = NULL; } err = 0; if (nmask) { if (mpol_store_user_nodemask(pol)) { *nmask = pol->w.user_nodemask; } else { task_lock(current); get_policy_nodemask(pol, nmask); task_unlock(current); } } out: mpol_cond_put(pol); if (vma) up_read(&current->mm->mmap_sem); return err; } #ifdef CONFIG_MIGRATION /* * page migration */ static void migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags) { /* * Avoid migrating a page that is shared with others. */ if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { if (!isolate_lru_page(page)) { list_add_tail(&page->lru, pagelist); inc_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); } } } static struct page *new_node_page(struct page *page, unsigned long node, int **x) { return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); } /* * Migrate pages from one node to a target node. * Returns error or the number of pages not migrated. */ static int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags) { nodemask_t nmask; LIST_HEAD(pagelist); int err = 0; nodes_clear(nmask); node_set(source, nmask); /* * This does not "check" the range but isolates all pages that * need migration. Between passing in the full user address * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. */ VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, flags | MPOL_MF_DISCONTIG_OK, &pagelist); if (!list_empty(&pagelist)) { err = migrate_pages(&pagelist, new_node_page, dest, MIGRATE_SYNC, MR_SYSCALL); if (err) putback_lru_pages(&pagelist); } return err; } /* * Move pages between the two nodesets so as to preserve the physical * layout as much as possible. * * Returns the number of page that could not be moved. */ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags) { int busy = 0; int err; nodemask_t tmp; err = migrate_prep(); if (err) return err; down_read(&mm->mmap_sem); err = migrate_vmas(mm, from, to, flags); if (err) goto out; /* * Find a 'source' bit set in 'tmp' whose corresponding 'dest' * bit in 'to' is not also set in 'tmp'. Clear the found 'source' * bit in 'tmp', and return that <source, dest> pair for migration. * The pair of nodemasks 'to' and 'from' define the map. * * If no pair of bits is found that way, fallback to picking some * pair of 'source' and 'dest' bits that are not the same. If the * 'source' and 'dest' bits are the same, this represents a node * that will be migrating to itself, so no pages need move. * * If no bits are left in 'tmp', or if all remaining bits left * in 'tmp' correspond to the same bit in 'to', return false * (nothing left to migrate). * * This lets us pick a pair of nodes to migrate between, such that * if possible the dest node is not already occupied by some other * source node, minimizing the risk of overloading the memory on a * node that would happen if we migrated incoming memory to a node * before migrating outgoing memory source that same node. * * A single scan of tmp is sufficient. As we go, we remember the * most recent <s, d> pair that moved (s != d). If we find a pair * that not only moved, but what's better, moved to an empty slot * (d is not set in tmp), then we break out then, with that pair. * Otherwise when we finish scanning from_tmp, we at least have the * most recent <s, d> pair that moved. If we get all the way through * the scan of tmp without finding any node that moved, much less * moved to an empty node, then there is nothing left worth migrating. */ tmp = *from; while (!nodes_empty(tmp)) { int s,d; int source = -1; int dest = 0; for_each_node_mask(s, tmp) { /* * do_migrate_pages() tries to maintain the relative * node relationship of the pages established between * threads and memory areas. * * However if the number of source nodes is not equal to * the number of destination nodes we can not preserve * this node relative relationship. In that case, skip * copying memory from a node that is in the destination * mask. * * Example: [2,3,4] -> [3,4,5] moves everything. * [0-7] - > [3,4,5] moves only 0,1,2,6,7. */ if ((nodes_weight(*from) != nodes_weight(*to)) && (node_isset(s, *to))) continue; d = node_remap(s, *from, *to); if (s == d) continue; source = s; /* Node moved. Memorize */ dest = d; /* dest not in remaining from nodes? */ if (!node_isset(dest, tmp)) break; } if (source == -1) break; node_clear(source, tmp); err = migrate_to_node(mm, source, dest, flags); if (err > 0) busy += err; if (err < 0) break; } out: up_read(&mm->mmap_sem); if (err < 0) return err; return busy; } /* * Allocate a new page for page migration based on vma policy. * Start assuming that page is mapped by vma pointed to by @private. * Search forward from there, if not. N.B., this assumes that the * list of pages handed to migrate_pages()--which is how we get here-- * is in virtual address order. */ static struct page *new_vma_page(struct page *page, unsigned long private, int **x) { struct vm_area_struct *vma = (struct vm_area_struct *)private; unsigned long uninitialized_var(address); while (vma) { address = page_address_in_vma(page, vma); if (address != -EFAULT) break; vma = vma->vm_next; } /* * if !vma, alloc_page_vma() will use task or system default policy */ return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); } #else static void migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags) { } int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags) { return -ENOSYS; } static struct page *new_vma_page(struct page *page, unsigned long private, int **x) { return NULL; } #endif static long do_mbind(unsigned long start, unsigned long len, unsigned short mode, unsigned short mode_flags, nodemask_t *nmask, unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; struct mempolicy *new; unsigned long end; int err; LIST_HEAD(pagelist); if (flags & ~(unsigned long)MPOL_MF_VALID) return -EINVAL; if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) return -EPERM; if (start & ~PAGE_MASK) return -EINVAL; if (mode == MPOL_DEFAULT) flags &= ~MPOL_MF_STRICT; len = (len + PAGE_SIZE - 1) & PAGE_MASK; end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; new = mpol_new(mode, mode_flags, nmask); if (IS_ERR(new)) return PTR_ERR(new); if (flags & MPOL_MF_LAZY) new->flags |= MPOL_F_MOF; /* * If we are using the default policy then operation * on discontinuous address spaces is okay after all */ if (!new) flags |= MPOL_MF_DISCONTIG_OK; pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", start, start + len, mode, mode_flags, nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { err = migrate_prep(); if (err) goto mpol_out; } { NODEMASK_SCRATCH(scratch); if (scratch) { down_write(&mm->mmap_sem); task_lock(current); err = mpol_set_nodemask(new, nmask, scratch); task_unlock(current); if (err) up_write(&mm->mmap_sem); } else err = -ENOMEM; NODEMASK_SCRATCH_FREE(scratch); } if (err) goto mpol_out; vma = check_range(mm, start, end, nmask, flags | MPOL_MF_INVERT, &pagelist); err = PTR_ERR(vma); /* maybe ... */ if (!IS_ERR(vma)) err = mbind_range(mm, start, end, new); if (!err) { int nr_failed = 0; if (!list_empty(&pagelist)) { WARN_ON_ONCE(flags & MPOL_MF_LAZY); nr_failed = migrate_pages(&pagelist, new_vma_page, (unsigned long)vma, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); if (nr_failed) putback_lru_pages(&pagelist); } if (nr_failed && (flags & MPOL_MF_STRICT)) err = -EIO; } else putback_lru_pages(&pagelist); up_write(&mm->mmap_sem); mpol_out: mpol_put(new); return err; } /* * User space interface with variable sized bitmaps for nodelists. */ /* Copy a node mask from user space. */ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, unsigned long maxnode) { unsigned long k; unsigned long nlongs; unsigned long endmask; --maxnode; nodes_clear(*nodes); if (maxnode == 0 || !nmask) return 0; if (maxnode > PAGE_SIZE*BITS_PER_BYTE) return -EINVAL; nlongs = BITS_TO_LONGS(maxnode); if ((maxnode % BITS_PER_LONG) == 0) endmask = ~0UL; else endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; /* When the user specified more nodes than supported just check if the non supported part is all zero. */ if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { if (nlongs > PAGE_SIZE/sizeof(long)) return -EINVAL; for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { unsigned long t; if (get_user(t, nmask + k)) return -EFAULT; if (k == nlongs - 1) { if (t & endmask) return -EINVAL; } else if (t) return -EINVAL; } nlongs = BITS_TO_LONGS(MAX_NUMNODES); endmask = ~0UL; } if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) return -EFAULT; nodes_addr(*nodes)[nlongs-1] &= endmask; return 0; } /* Copy a kernel node mask to user space */ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, nodemask_t *nodes) { unsigned long copy = ALIGN(maxnode-1, 64) / 8; const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); if (copy > nbytes) { if (copy > PAGE_SIZE) return -EINVAL; if (clear_user((char __user *)mask + nbytes, copy - nbytes)) return -EFAULT; copy = nbytes; } return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; } SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, unsigned long, mode, unsigned long __user *, nmask, unsigned long, maxnode, unsigned, flags) { nodemask_t nodes; int err; unsigned short mode_flags; mode_flags = mode & MPOL_MODE_FLAGS; mode &= ~MPOL_MODE_FLAGS; if (mode >= MPOL_MAX) return -EINVAL; if ((mode_flags & MPOL_F_STATIC_NODES) && (mode_flags & MPOL_F_RELATIVE_NODES)) return -EINVAL; err = get_nodes(&nodes, nmask, maxnode); if (err) return err; return do_mbind(start, len, mode, mode_flags, &nodes, flags); } /* Set the process memory policy */ SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask, unsigned long, maxnode) { int err; nodemask_t nodes; unsigned short flags; flags = mode & MPOL_MODE_FLAGS; mode &= ~MPOL_MODE_FLAGS; if ((unsigned int)mode >= MPOL_MAX) return -EINVAL; if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) return -EINVAL; err = get_nodes(&nodes, nmask, maxnode); if (err) return err; return do_set_mempolicy(mode, flags, &nodes); } SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, const unsigned long __user *, old_nodes, const unsigned long __user *, new_nodes) { const struct cred *cred = current_cred(), *tcred; struct mm_struct *mm = NULL; struct task_struct *task; nodemask_t task_nodes; int err; nodemask_t *old; nodemask_t *new; NODEMASK_SCRATCH(scratch); if (!scratch) return -ENOMEM; old = &scratch->mask1; new = &scratch->mask2; err = get_nodes(old, old_nodes, maxnode); if (err) goto out; err = get_nodes(new, new_nodes, maxnode); if (err) goto out; /* Find the mm_struct */ rcu_read_lock(); task = pid ? find_task_by_vpid(pid) : current; if (!task) { rcu_read_unlock(); err = -ESRCH; goto out; } get_task_struct(task); err = -EINVAL; /* * Check if this process has the right to modify the specified * process. The right exists if the process has administrative * capabilities, superuser privileges or the same * userid as the target process. */ tcred = __task_cred(task); if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && !capable(CAP_SYS_NICE)) { rcu_read_unlock(); err = -EPERM; goto out_put; } rcu_read_unlock(); task_nodes = cpuset_mems_allowed(task); /* Is the user allowed to access the target nodes? */ if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { err = -EPERM; goto out_put; } if (!nodes_subset(*new, node_states[N_MEMORY])) { err = -EINVAL; goto out_put; } err = security_task_movememory(task); if (err) goto out_put; mm = get_task_mm(task); put_task_struct(task); if (!mm) { err = -EINVAL; goto out; } err = do_migrate_pages(mm, old, new, capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); mmput(mm); out: NODEMASK_SCRATCH_FREE(scratch); return err; out_put: put_task_struct(task); goto out; } /* Retrieve NUMA policy */ SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, unsigned long __user *, nmask, unsigned long, maxnode, unsigned long, addr, unsigned long, flags) { int err; int uninitialized_var(pval); nodemask_t nodes; if (nmask != NULL && maxnode < MAX_NUMNODES) return -EINVAL; err = do_get_mempolicy(&pval, &nodes, addr, flags); if (err) return err; if (policy && put_user(pval, policy)) return -EFAULT; if (nmask) err = copy_nodes_to_user(nmask, maxnode, &nodes); return err; } #ifdef CONFIG_COMPAT asmlinkage long compat_sys_get_mempolicy(int __user *policy, compat_ulong_t __user *nmask, compat_ulong_t maxnode, compat_ulong_t addr, compat_ulong_t flags) { long err; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) nm = compat_alloc_user_space(alloc_size); err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); if (!err && nmask) { unsigned long copy_size; copy_size = min_t(unsigned long, sizeof(bm), alloc_size); err = copy_from_user(bm, nm, copy_size); /* ensure entire bitmap is zeroed */ err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); err |= compat_put_bitmap(nmask, bm, nr_bits); } return err; } asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, compat_ulong_t maxnode) { long err = 0; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { err = compat_get_bitmap(bm, nmask, nr_bits); nm = compat_alloc_user_space(alloc_size); err |= copy_to_user(nm, bm, alloc_size); } if (err) return -EFAULT; return sys_set_mempolicy(mode, nm, nr_bits+1); } asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, compat_ulong_t mode, compat_ulong_t __user *nmask, compat_ulong_t maxnode, compat_ulong_t flags) { long err = 0; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; nodemask_t bm; nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); nm = compat_alloc_user_space(alloc_size); err |= copy_to_user(nm, nodes_addr(bm), alloc_size); } if (err) return -EFAULT; return sys_mbind(start, len, mode, nm, nr_bits+1, flags); } #endif /* * get_vma_policy(@task, @vma, @addr) * @task - task for fallback if vma policy == default * @vma - virtual memory area whose policy is sought * @addr - address in @vma for shared policy lookup * * Returns effective policy for a VMA at specified address. * Falls back to @task or system default policy, as necessary. * Current or other task's task mempolicy and non-shared vma policies must be * protected by task_lock(task) by the caller. * Shared policies [those marked as MPOL_F_SHARED] require an extra reference * count--added by the get_policy() vm_op, as appropriate--to protect against * freeing by another task. It is the caller's responsibility to free the * extra reference for shared policies. */ struct mempolicy *get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol = get_task_policy(task); if (vma) { if (vma->vm_ops && vma->vm_ops->get_policy) { struct mempolicy *vpol = vma->vm_ops->get_policy(vma, addr); if (vpol) pol = vpol; } else if (vma->vm_policy) { pol = vma->vm_policy; /* * shmem_alloc_page() passes MPOL_F_SHARED policy with * a pseudo vma whose vma->vm_ops=NULL. Take a reference * count on these policies which will be dropped by * mpol_cond_put() later */ if (mpol_needs_cond_ref(pol)) mpol_get(pol); } } if (!pol) pol = &default_policy; return pol; } static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) { enum zone_type dynamic_policy_zone = policy_zone; BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); /* * if policy->v.nodes has movable memory only, * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. * * policy->v.nodes is intersect with node_states[N_MEMORY]. * so if the following test faile, it implies * policy->v.nodes has movable memory only. */ if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) dynamic_policy_zone = ZONE_MOVABLE; return zone >= dynamic_policy_zone; } /* * Return a nodemask representing a mempolicy for filtering nodes for * page allocation */ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) { /* Lower zones don't get a nodemask applied for MPOL_BIND */ if (unlikely(policy->mode == MPOL_BIND) && apply_policy_zone(policy, gfp_zone(gfp)) && cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) return &policy->v.nodes; return NULL; } /* Return a zonelist indicated by gfp for node representing a mempolicy */ static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, int nd) { switch (policy->mode) { case MPOL_PREFERRED: if (!(policy->flags & MPOL_F_LOCAL)) nd = policy->v.preferred_node; break; case MPOL_BIND: /* * Normally, MPOL_BIND allocations are node-local within the * allowed nodemask. However, if __GFP_THISNODE is set and the * current node isn't part of the mask, we use the zonelist for * the first node in the mask instead. */ if (unlikely(gfp & __GFP_THISNODE) && unlikely(!node_isset(nd, policy->v.nodes))) nd = first_node(policy->v.nodes); break; default: BUG(); } return node_zonelist(nd, gfp); } /* Do dynamic interleaving for a process */ static unsigned interleave_nodes(struct mempolicy *policy) { unsigned nid, next; struct task_struct *me = current; nid = me->il_next; next = next_node(nid, policy->v.nodes); if (next >= MAX_NUMNODES) next = first_node(policy->v.nodes); if (next < MAX_NUMNODES) me->il_next = next; return nid; } /* * Depending on the memory policy provide a node from which to allocate the * next slab entry. * @policy must be protected by freeing by the caller. If @policy is * the current task's mempolicy, this protection is implicit, as only the * task can change it's policy. The system default policy requires no * such protection. */ unsigned slab_node(void) { struct mempolicy *policy; if (in_interrupt()) return numa_node_id(); policy = current->mempolicy; if (!policy || policy->flags & MPOL_F_LOCAL) return numa_node_id(); switch (policy->mode) { case MPOL_PREFERRED: /* * handled MPOL_F_LOCAL above */ return policy->v.preferred_node; case MPOL_INTERLEAVE: return interleave_nodes(policy); case MPOL_BIND: { /* * Follow bind policy behavior and start allocation at the * first node. */ struct zonelist *zonelist; struct zone *zone; enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0]; (void)first_zones_zonelist(zonelist, highest_zoneidx, &policy->v.nodes, &zone); return zone ? zone->node : numa_node_id(); } default: BUG(); } } /* Do static interleaving for a VMA with known offset. */ static unsigned offset_il_node(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long off) { unsigned nnodes = nodes_weight(pol->v.nodes); unsigned target; int c; int nid = -1; if (!nnodes) return numa_node_id(); target = (unsigned int)off % nnodes; c = 0; do { nid = next_node(nid, pol->v.nodes); c++; } while (c <= target); return nid; } /* Determine a node number for interleave */ static inline unsigned interleave_nid(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long addr, int shift) { if (vma) { unsigned long off; /* * for small pages, there is no difference between * shift and PAGE_SHIFT, so the bit-shift is safe. * for huge pages, since vm_pgoff is in units of small * pages, we need to shift off the always 0 bits to get * a useful offset. */ BUG_ON(shift < PAGE_SHIFT); off = vma->vm_pgoff >> (shift - PAGE_SHIFT); off += (addr - vma->vm_start) >> shift; return offset_il_node(pol, vma, off); } else return interleave_nodes(pol); } /* * Return the bit number of a random bit set in the nodemask. * (returns -1 if nodemask is empty) */ int node_random(const nodemask_t *maskp) { int w, bit = -1; w = nodes_weight(*maskp); if (w) bit = bitmap_ord_to_pos(maskp->bits, get_random_int() % w, MAX_NUMNODES); return bit; } #ifdef CONFIG_HUGETLBFS /* * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) * @vma = virtual memory area whose policy is sought * @addr = address in @vma for shared policy lookup and interleave policy * @gfp_flags = for requested zone * @mpol = pointer to mempolicy pointer for reference counted mempolicy * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask * * Returns a zonelist suitable for a huge page allocation and a pointer * to the struct mempolicy for conditional unref after allocation. * If the effective policy is 'BIND, returns a pointer to the mempolicy's * @nodemask for filtering the zonelist. * * Must be protected by get_mems_allowed() */ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) { struct zonelist *zl; *mpol = get_vma_policy(current, vma, addr); *nodemask = NULL; /* assume !MPOL_BIND */ if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { zl = node_zonelist(interleave_nid(*mpol, vma, addr, huge_page_shift(hstate_vma(vma))), gfp_flags); } else { zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); if ((*mpol)->mode == MPOL_BIND) *nodemask = &(*mpol)->v.nodes; } return zl; } /* * init_nodemask_of_mempolicy * * If the current task's mempolicy is "default" [NULL], return 'false' * to indicate default policy. Otherwise, extract the policy nodemask * for 'bind' or 'interleave' policy into the argument nodemask, or * initialize the argument nodemask to contain the single node for * 'preferred' or 'local' policy and return 'true' to indicate presence * of non-default mempolicy. * * We don't bother with reference counting the mempolicy [mpol_get/put] * because the current task is examining it's own mempolicy and a task's * mempolicy is only ever changed by the task itself. * * N.B., it is the caller's responsibility to free a returned nodemask. */ bool init_nodemask_of_mempolicy(nodemask_t *mask) { struct mempolicy *mempolicy; int nid; if (!(mask && current->mempolicy)) return false; task_lock(current); mempolicy = current->mempolicy; switch (mempolicy->mode) { case MPOL_PREFERRED: if (mempolicy->flags & MPOL_F_LOCAL) nid = numa_node_id(); else nid = mempolicy->v.preferred_node; init_nodemask_of_node(mask, nid); break; case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: *mask = mempolicy->v.nodes; break; default: BUG(); } task_unlock(current); return true; } #endif /* * mempolicy_nodemask_intersects * * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default * policy. Otherwise, check for intersection between mask and the policy * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' * policy, always return true since it may allocate elsewhere on fallback. * * Takes task_lock(tsk) to prevent freeing of its mempolicy. */ bool mempolicy_nodemask_intersects(struct task_struct *tsk, const nodemask_t *mask) { struct mempolicy *mempolicy; bool ret = true; if (!mask) return ret; task_lock(tsk); mempolicy = tsk->mempolicy; if (!mempolicy) goto out; switch (mempolicy->mode) { case MPOL_PREFERRED: /* * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to * allocate from, they may fallback to other nodes when oom. * Thus, it's possible for tsk to have allocated memory from * nodes in mask. */ break; case MPOL_BIND: case MPOL_INTERLEAVE: ret = nodes_intersects(mempolicy->v.nodes, *mask); break; default: BUG(); } out: task_unlock(tsk); return ret; } /* Allocate a page in interleaved policy. Own path because it needs to do special accounting. */ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid) { struct zonelist *zl; struct page *page; zl = node_zonelist(nid, gfp); page = __alloc_pages(gfp, order, zl); if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0])) inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); return page; } /** * alloc_pages_vma - Allocate a page for a VMA. * * @gfp: * %GFP_USER user allocation. * %GFP_KERNEL kernel allocations, * %GFP_HIGHMEM highmem/user allocations, * %GFP_FS allocation should not call back into a file system. * %GFP_ATOMIC don't sleep. * * @order:Order of the GFP allocation. * @vma: Pointer to VMA or NULL if not available. * @addr: Virtual Address of the allocation. Must be inside the VMA. * * This function allocates a page from the kernel page pool and applies * a NUMA policy associated with the VMA or the current process. * When VMA is not NULL caller must hold down_read on the mmap_sem of the * mm_struct of the VMA to prevent it from going away. Should be used for * all allocations for pages that will be mapped into * user space. Returns NULL when no page can be allocated. * * Should be called with the mm_sem of the vma hold. */ struct page * alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, int node) { struct mempolicy *pol; struct page *page; unsigned int cpuset_mems_cookie; retry_cpuset: pol = get_vma_policy(current, vma, addr); cpuset_mems_cookie = get_mems_allowed(); if (unlikely(pol->mode == MPOL_INTERLEAVE)) { unsigned nid; nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); mpol_cond_put(pol); page = alloc_page_interleave(gfp, order, nid); if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) goto retry_cpuset; return page; } page = __alloc_pages_nodemask(gfp, order, policy_zonelist(gfp, pol, node), policy_nodemask(gfp, pol)); if (unlikely(mpol_needs_cond_ref(pol))) __mpol_put(pol); if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) goto retry_cpuset; return page; } /** * alloc_pages_current - Allocate pages. * * @gfp: * %GFP_USER user allocation, * %GFP_KERNEL kernel allocation, * %GFP_HIGHMEM highmem allocation, * %GFP_FS don't call back into a file system. * %GFP_ATOMIC don't sleep. * @order: Power of two of allocation size in pages. 0 is a single page. * * Allocate a page from the kernel page pool. When not in * interrupt context and apply the current process NUMA policy. * Returns NULL when no page can be allocated. * * Don't call cpuset_update_task_memory_state() unless * 1) it's ok to take cpuset_sem (can WAIT), and * 2) allocating for current task (not interrupt). */ struct page *alloc_pages_current(gfp_t gfp, unsigned order) { struct mempolicy *pol = get_task_policy(current); struct page *page; unsigned int cpuset_mems_cookie; if (!pol || in_interrupt() || (gfp & __GFP_THISNODE)) pol = &default_policy; retry_cpuset: cpuset_mems_cookie = get_mems_allowed(); /* * No reference counting needed for current->mempolicy * nor system default_policy */ if (pol->mode == MPOL_INTERLEAVE) page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); else page = __alloc_pages_nodemask(gfp, order, policy_zonelist(gfp, pol, numa_node_id()), policy_nodemask(gfp, pol)); if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) goto retry_cpuset; return page; } EXPORT_SYMBOL(alloc_pages_current); /* * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it * rebinds the mempolicy its copying by calling mpol_rebind_policy() * with the mems_allowed returned by cpuset_mems_allowed(). This * keeps mempolicies cpuset relative after its cpuset moves. See * further kernel/cpuset.c update_nodemask(). * * current's mempolicy may be rebinded by the other task(the task that changes * cpuset's mems), so we needn't do rebind work for current task. */ /* Slow path of a mempolicy duplicate */ struct mempolicy *__mpol_dup(struct mempolicy *old) { struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!new) return ERR_PTR(-ENOMEM); /* task's mempolicy is protected by alloc_lock */ if (old == current->mempolicy) { task_lock(current); *new = *old; task_unlock(current); } else *new = *old; rcu_read_lock(); if (current_cpuset_is_being_rebound()) { nodemask_t mems = cpuset_mems_allowed(current); if (new->flags & MPOL_F_REBINDING) mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2); else mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); } rcu_read_unlock(); atomic_set(&new->refcnt, 1); return new; } /* Slow path of a mempolicy comparison */ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) { if (!a || !b) return false; if (a->mode != b->mode) return false; if (a->flags != b->flags) return false; if (mpol_store_user_nodemask(a)) if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) return false; switch (a->mode) { case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: return !!nodes_equal(a->v.nodes, b->v.nodes); case MPOL_PREFERRED: return a->v.preferred_node == b->v.preferred_node; default: BUG(); return false; } } /* * Shared memory backing store policy support. * * Remember policies even when nobody has shared memory mapped. * The policies are kept in Red-Black tree linked from the inode. * They are protected by the sp->lock spinlock, which should be held * for any accesses to the tree. */ /* lookup first element intersecting start-end */ /* Caller holds sp->lock */ static struct sp_node * sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) { struct rb_node *n = sp->root.rb_node; while (n) { struct sp_node *p = rb_entry(n, struct sp_node, nd); if (start >= p->end) n = n->rb_right; else if (end <= p->start) n = n->rb_left; else break; } if (!n) return NULL; for (;;) { struct sp_node *w = NULL; struct rb_node *prev = rb_prev(n); if (!prev) break; w = rb_entry(prev, struct sp_node, nd); if (w->end <= start) break; n = prev; } return rb_entry(n, struct sp_node, nd); } /* Insert a new shared policy into the list. */ /* Caller holds sp->lock */ static void sp_insert(struct shared_policy *sp, struct sp_node *new) { struct rb_node **p = &sp->root.rb_node; struct rb_node *parent = NULL; struct sp_node *nd; while (*p) { parent = *p; nd = rb_entry(parent, struct sp_node, nd); if (new->start < nd->start) p = &(*p)->rb_left; else if (new->end > nd->end) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->nd, parent, p); rb_insert_color(&new->nd, &sp->root); pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, new->policy ? new->policy->mode : 0); } /* Find shared policy intersecting idx */ struct mempolicy * mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) { struct mempolicy *pol = NULL; struct sp_node *sn; if (!sp->root.rb_node) return NULL; spin_lock(&sp->lock); sn = sp_lookup(sp, idx, idx+1); if (sn) { mpol_get(sn->policy); pol = sn->policy; } spin_unlock(&sp->lock); return pol; } static void sp_free(struct sp_node *n) { mpol_put(n->policy); kmem_cache_free(sn_cache, n); } /** * mpol_misplaced - check whether current page node is valid in policy * * @page - page to be checked * @vma - vm area where page mapped * @addr - virtual address where page mapped * * Lookup current policy node id for vma,addr and "compare to" page's * node id. * * Returns: * -1 - not misplaced, page is in the right node * node - node id where the page should be * * Policy determination "mimics" alloc_page_vma(). * Called from fault path where we know the vma and faulting address. */ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol; struct zone *zone; int curnid = page_to_nid(page); unsigned long pgoff; int polnid = -1; int ret = -1; BUG_ON(!vma); pol = get_vma_policy(current, vma, addr); if (!(pol->flags & MPOL_F_MOF)) goto out; switch (pol->mode) { case MPOL_INTERLEAVE: BUG_ON(addr >= vma->vm_end); BUG_ON(addr < vma->vm_start); pgoff = vma->vm_pgoff; pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; polnid = offset_il_node(pol, vma, pgoff); break; case MPOL_PREFERRED: if (pol->flags & MPOL_F_LOCAL) polnid = numa_node_id(); else polnid = pol->v.preferred_node; break; case MPOL_BIND: /* * allows binding to multiple nodes. * use current page if in policy nodemask, * else select nearest allowed node, if any. * If no allowed nodes, use current [!misplaced]. */ if (node_isset(curnid, pol->v.nodes)) goto out; (void)first_zones_zonelist( node_zonelist(numa_node_id(), GFP_HIGHUSER), gfp_zone(GFP_HIGHUSER), &pol->v.nodes, &zone); polnid = zone->node; break; default: BUG(); } /* Migrate the page towards the node whose CPU is referencing it */ if (pol->flags & MPOL_F_MORON) { int last_nid; polnid = numa_node_id(); /* * Multi-stage node selection is used in conjunction * with a periodic migration fault to build a temporal * task<->page relation. By using a two-stage filter we * remove short/unlikely relations. * * Using P(p) ~ n_p / n_t as per frequentist * probability, we can equate a task's usage of a * particular page (n_p) per total usage of this * page (n_t) (in a given time-span) to a probability. * * Our periodic faults will sample this probability and * getting the same result twice in a row, given these * samples are fully independent, is then given by * P(n)^2, provided our sample period is sufficiently * short compared to the usage pattern. * * This quadric squishes small probabilities, making * it less likely we act on an unlikely task<->page * relation. */ last_nid = page_nid_xchg_last(page, polnid); if (last_nid != polnid) goto out; } if (curnid != polnid) ret = polnid; out: mpol_cond_put(pol); return ret; } static void sp_delete(struct shared_policy *sp, struct sp_node *n) { pr_debug("deleting %lx-l%lx\n", n->start, n->end); rb_erase(&n->nd, &sp->root); sp_free(n); } static void sp_node_init(struct sp_node *node, unsigned long start, unsigned long end, struct mempolicy *pol) { node->start = start; node->end = end; node->policy = pol; } static struct sp_node *sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol) { struct sp_node *n; struct mempolicy *newpol; n = kmem_cache_alloc(sn_cache, GFP_KERNEL); if (!n) return NULL; newpol = mpol_dup(pol); if (IS_ERR(newpol)) { kmem_cache_free(sn_cache, n); return NULL; } newpol->flags |= MPOL_F_SHARED; sp_node_init(n, start, end, newpol); return n; } /* Replace a policy range. */ static int shared_policy_replace(struct shared_policy *sp, unsigned long start, unsigned long end, struct sp_node *new) { struct sp_node *n; struct sp_node *n_new = NULL; struct mempolicy *mpol_new = NULL; int ret = 0; restart: spin_lock(&sp->lock); n = sp_lookup(sp, start, end); /* Take care of old policies in the same range. */ while (n && n->start < end) { struct rb_node *next = rb_next(&n->nd); if (n->start >= start) { if (n->end <= end) sp_delete(sp, n); else n->start = end; } else { /* Old policy spanning whole new range. */ if (n->end > end) { if (!n_new) goto alloc_new; *mpol_new = *n->policy; atomic_set(&mpol_new->refcnt, 1); sp_node_init(n_new, end, n->end, mpol_new); n->end = start; sp_insert(sp, n_new); n_new = NULL; mpol_new = NULL; break; } else n->end = start; } if (!next) break; n = rb_entry(next, struct sp_node, nd); } if (new) sp_insert(sp, new); spin_unlock(&sp->lock); ret = 0; err_out: if (mpol_new) mpol_put(mpol_new); if (n_new) kmem_cache_free(sn_cache, n_new); return ret; alloc_new: spin_unlock(&sp->lock); ret = -ENOMEM; n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); if (!n_new) goto err_out; mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!mpol_new) goto err_out; goto restart; } /** * mpol_shared_policy_init - initialize shared policy for inode * @sp: pointer to inode shared policy * @mpol: struct mempolicy to install * * Install non-NULL @mpol in inode's shared policy rb-tree. * On entry, the current task has a reference on a non-NULL @mpol. * This must be released on exit. * This is called at get_inode() calls and we can use GFP_KERNEL. */ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) { int ret; sp->root = RB_ROOT; /* empty tree == default mempolicy */ spin_lock_init(&sp->lock); if (mpol) { struct vm_area_struct pvma; struct mempolicy *new; NODEMASK_SCRATCH(scratch); if (!scratch) goto put_mpol; /* contextualize the tmpfs mount point mempolicy */ new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); if (IS_ERR(new)) goto free_scratch; /* no valid nodemask intersection */ task_lock(current); ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); task_unlock(current); if (ret) goto put_new; /* Create pseudo-vma that contains just the policy */ memset(&pvma, 0, sizeof(struct vm_area_struct)); pvma.vm_end = TASK_SIZE; /* policy covers entire file */ mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ put_new: mpol_put(new); /* drop initial ref */ free_scratch: NODEMASK_SCRATCH_FREE(scratch); put_mpol: mpol_put(mpol); /* drop our incoming ref on sb mpol */ } } int mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *npol) { int err; struct sp_node *new = NULL; unsigned long sz = vma_pages(vma); pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", vma->vm_pgoff, sz, npol ? npol->mode : -1, npol ? npol->flags : -1, npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); if (npol) { new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); if (!new) return -ENOMEM; } err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); if (err && new) sp_free(new); return err; } /* Free a backing policy store on inode delete. */ void mpol_free_shared_policy(struct shared_policy *p) { struct sp_node *n; struct rb_node *next; if (!p->root.rb_node) return; spin_lock(&p->lock); next = rb_first(&p->root); while (next) { n = rb_entry(next, struct sp_node, nd); next = rb_next(&n->nd); sp_delete(p, n); } spin_unlock(&p->lock); } #ifdef CONFIG_NUMA_BALANCING static bool __initdata numabalancing_override; static void __init check_numabalancing_enable(void) { bool numabalancing_default = false; if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) numabalancing_default = true; if (nr_node_ids > 1 && !numabalancing_override) { printk(KERN_INFO "Enabling automatic NUMA balancing. " "Configure with numa_balancing= or sysctl"); set_numabalancing_state(numabalancing_default); } } static int __init setup_numabalancing(char *str) { int ret = 0; if (!str) goto out; numabalancing_override = true; if (!strcmp(str, "enable")) { set_numabalancing_state(true); ret = 1; } else if (!strcmp(str, "disable")) { set_numabalancing_state(false); ret = 1; } out: if (!ret) printk(KERN_WARNING "Unable to parse numa_balancing=\n"); return ret; } __setup("numa_balancing=", setup_numabalancing); #else static inline void __init check_numabalancing_enable(void) { } #endif /* CONFIG_NUMA_BALANCING */ /* assumes fs == KERNEL_DS */ void __init numa_policy_init(void) { nodemask_t interleave_nodes; unsigned long largest = 0; int nid, prefer = 0; policy_cache = kmem_cache_create("numa_policy", sizeof(struct mempolicy), 0, SLAB_PANIC, NULL); sn_cache = kmem_cache_create("shared_policy_node", sizeof(struct sp_node), 0, SLAB_PANIC, NULL); for_each_node(nid) { preferred_node_policy[nid] = (struct mempolicy) { .refcnt = ATOMIC_INIT(1), .mode = MPOL_PREFERRED, .flags = MPOL_F_MOF | MPOL_F_MORON, .v = { .preferred_node = nid, }, }; } /* * Set interleaving policy for system init. Interleaving is only * enabled across suitably sized nodes (default is >= 16MB), or * fall back to the largest node if they're all smaller. */ nodes_clear(interleave_nodes); for_each_node_state(nid, N_MEMORY) { unsigned long total_pages = node_present_pages(nid); /* Preserve the largest node */ if (largest < total_pages) { largest = total_pages; prefer = nid; } /* Interleave this node? */ if ((total_pages << PAGE_SHIFT) >= (16 << 20)) node_set(nid, interleave_nodes); } /* All too small, use the largest */ if (unlikely(nodes_empty(interleave_nodes))) node_set(prefer, interleave_nodes); if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) printk("numa_policy_init: interleaving failed\n"); check_numabalancing_enable(); } /* Reset policy of current process to default */ void numa_default_policy(void) { do_set_mempolicy(MPOL_DEFAULT, 0, NULL); } /* * Parse and format mempolicy from/to strings */ /* * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. */ static const char * const policy_modes[] = { [MPOL_DEFAULT] = "default", [MPOL_PREFERRED] = "prefer", [MPOL_BIND] = "bind", [MPOL_INTERLEAVE] = "interleave", [MPOL_LOCAL] = "local", }; #ifdef CONFIG_TMPFS /** * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. * @str: string containing mempolicy to parse * @mpol: pointer to struct mempolicy pointer, returned on success. * * Format of input: * <mode>[=<flags>][:<nodelist>] * * On success, returns 0, else 1 */ int mpol_parse_str(char *str, struct mempolicy **mpol) { struct mempolicy *new = NULL; unsigned short mode; unsigned short mode_flags; nodemask_t nodes; char *nodelist = strchr(str, ':'); char *flags = strchr(str, '='); int err = 1; if (nodelist) { /* NUL-terminate mode or flags string */ *nodelist++ = '\0'; if (nodelist_parse(nodelist, nodes)) goto out; if (!nodes_subset(nodes, node_states[N_MEMORY])) goto out; } else nodes_clear(nodes); if (flags) *flags++ = '\0'; /* terminate mode string */ for (mode = 0; mode < MPOL_MAX; mode++) { if (!strcmp(str, policy_modes[mode])) { break; } } if (mode >= MPOL_MAX) goto out; switch (mode) { case MPOL_PREFERRED: /* * Insist on a nodelist of one node only */ if (nodelist) { char *rest = nodelist; while (isdigit(*rest)) rest++; if (*rest) goto out; } break; case MPOL_INTERLEAVE: /* * Default to online nodes with memory if no nodelist */ if (!nodelist) nodes = node_states[N_MEMORY]; break; case MPOL_LOCAL: /* * Don't allow a nodelist; mpol_new() checks flags */ if (nodelist) goto out; mode = MPOL_PREFERRED; break; case MPOL_DEFAULT: /* * Insist on a empty nodelist */ if (!nodelist) err = 0; goto out; case MPOL_BIND: /* * Insist on a nodelist */ if (!nodelist) goto out; } mode_flags = 0; if (flags) { /* * Currently, we only support two mutually exclusive * mode flags. */ if (!strcmp(flags, "static")) mode_flags |= MPOL_F_STATIC_NODES; else if (!strcmp(flags, "relative")) mode_flags |= MPOL_F_RELATIVE_NODES; else goto out; } new = mpol_new(mode, mode_flags, &nodes); if (IS_ERR(new)) goto out; /* * Save nodes for mpol_to_str() to show the tmpfs mount options * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. */ if (mode != MPOL_PREFERRED) new->v.nodes = nodes; else if (nodelist) new->v.preferred_node = first_node(nodes); else new->flags |= MPOL_F_LOCAL; /* * Save nodes for contextualization: this will be used to "clone" * the mempolicy in a specific context [cpuset] at a later time. */ new->w.user_nodemask = nodes; err = 0; out: /* Restore string for error message */ if (nodelist) *--nodelist = ':'; if (flags) *--flags = '='; if (!err) *mpol = new; return err; } #endif /* CONFIG_TMPFS */ /** * mpol_to_str - format a mempolicy structure for printing * @buffer: to contain formatted mempolicy string * @maxlen: length of @buffer * @pol: pointer to mempolicy to be formatted * * Convert a mempolicy into a string. * Returns the number of characters in buffer (if positive) * or an error (negative) */ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) { char *p = buffer; int l; nodemask_t nodes; unsigned short mode; unsigned short flags = pol ? pol->flags : 0; /* * Sanity check: room for longest mode, flag and some nodes */ VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16); if (!pol || pol == &default_policy) mode = MPOL_DEFAULT; else mode = pol->mode; switch (mode) { case MPOL_DEFAULT: nodes_clear(nodes); break; case MPOL_PREFERRED: nodes_clear(nodes); if (flags & MPOL_F_LOCAL) mode = MPOL_LOCAL; else node_set(pol->v.preferred_node, nodes); break; case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: nodes = pol->v.nodes; break; default: return -EINVAL; } l = strlen(policy_modes[mode]); if (buffer + maxlen < p + l + 1) return -ENOSPC; strcpy(p, policy_modes[mode]); p += l; if (flags & MPOL_MODE_FLAGS) { if (buffer + maxlen < p + 2) return -ENOSPC; *p++ = '='; /* * Currently, the only defined flags are mutually exclusive */ if (flags & MPOL_F_STATIC_NODES) p += snprintf(p, buffer + maxlen - p, "static"); else if (flags & MPOL_F_RELATIVE_NODES) p += snprintf(p, buffer + maxlen - p, "relative"); } if (!nodes_empty(nodes)) { if (buffer + maxlen < p + 2) return -ENOSPC; *p++ = ':'; p += nodelist_scnprintf(p, buffer + maxlen - p, nodes); } return p - buffer; }
gpl-2.0
pichina/linux-bcache
arch/arm/plat-s3c24xx/pm.c
27
9026
/* linux/arch/arm/plat-s3c24xx/pm.c * * Copyright (c) 2004,2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C24XX Power Manager (Suspend-To-RAM) support * * See Documentation/arm/Samsung-S3C24XX/Suspend.txt for more information * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Parts based on arch/arm/mach-pxa/pm.c * * Thanks to Dimitry Andric for debugging */ #include <linux/init.h> #include <linux/suspend.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/interrupt.h> #include <linux/serial_core.h> #include <linux/io.h> #include <plat/regs-serial.h> #include <mach/regs-clock.h> #include <mach/regs-gpio.h> #include <mach/regs-mem.h> #include <mach/regs-irq.h> #include <asm/mach/time.h> #include <plat/pm.h> #define PFX "s3c24xx-pm: " static struct sleep_save core_save[] = { SAVE_ITEM(S3C2410_LOCKTIME), SAVE_ITEM(S3C2410_CLKCON), /* we restore the timings here, with the proviso that the board * brings the system up in an slower, or equal frequency setting * to the original system. * * if we cannot guarantee this, then things are going to go very * wrong here, as we modify the refresh and both pll settings. */ SAVE_ITEM(S3C2410_BWSCON), SAVE_ITEM(S3C2410_BANKCON0), SAVE_ITEM(S3C2410_BANKCON1), SAVE_ITEM(S3C2410_BANKCON2), SAVE_ITEM(S3C2410_BANKCON3), SAVE_ITEM(S3C2410_BANKCON4), SAVE_ITEM(S3C2410_BANKCON5), #ifndef CONFIG_CPU_FREQ SAVE_ITEM(S3C2410_CLKDIVN), SAVE_ITEM(S3C2410_MPLLCON), SAVE_ITEM(S3C2410_REFRESH), #endif SAVE_ITEM(S3C2410_UPLLCON), SAVE_ITEM(S3C2410_CLKSLOW), }; static struct gpio_sleep { void __iomem *base; unsigned int gpcon; unsigned int gpdat; unsigned int gpup; } gpio_save[] = { [0] = { .base = S3C2410_GPACON, }, [1] = { .base = S3C2410_GPBCON, }, [2] = { .base = S3C2410_GPCCON, }, [3] = { .base = S3C2410_GPDCON, }, [4] = { .base = S3C2410_GPECON, }, [5] = { .base = S3C2410_GPFCON, }, [6] = { .base = S3C2410_GPGCON, }, [7] = { .base = S3C2410_GPHCON, }, }; static struct sleep_save misc_save[] = { SAVE_ITEM(S3C2410_DCLKCON), }; /* s3c_pm_check_resume_pin * * check to see if the pin is configured correctly for sleep mode, and * make any necessary adjustments if it is not */ static void s3c_pm_check_resume_pin(unsigned int pin, unsigned int irqoffs) { unsigned long irqstate; unsigned long pinstate; int irq = s3c2410_gpio_getirq(pin); if (irqoffs < 4) irqstate = s3c_irqwake_intmask & (1L<<irqoffs); else irqstate = s3c_irqwake_eintmask & (1L<<irqoffs); pinstate = s3c2410_gpio_getcfg(pin); if (!irqstate) { if (pinstate == S3C2410_GPIO_IRQ) S3C_PMDBG("Leaving IRQ %d (pin %d) enabled\n", irq, pin); } else { if (pinstate == S3C2410_GPIO_IRQ) { S3C_PMDBG("Disabling IRQ %d (pin %d)\n", irq, pin); s3c2410_gpio_cfgpin(pin, S3C2410_GPIO_INPUT); } } } /* s3c_pm_configure_extint * * configure all external interrupt pins */ void s3c_pm_configure_extint(void) { int pin; /* for each of the external interrupts (EINT0..EINT15) we * need to check wether it is an external interrupt source, * and then configure it as an input if it is not */ for (pin = S3C2410_GPF0; pin <= S3C2410_GPF7; pin++) { s3c_pm_check_resume_pin(pin, pin - S3C2410_GPF0); } for (pin = S3C2410_GPG0; pin <= S3C2410_GPG7; pin++) { s3c_pm_check_resume_pin(pin, (pin - S3C2410_GPG0)+8); } } /* offsets for CON/DAT/UP registers */ #define OFFS_CON (S3C2410_GPACON - S3C2410_GPACON) #define OFFS_DAT (S3C2410_GPADAT - S3C2410_GPACON) #define OFFS_UP (S3C2410_GPBUP - S3C2410_GPBCON) /* s3c_pm_save_gpios() * * Save the state of the GPIOs */ void s3c_pm_save_gpios(void) { struct gpio_sleep *gps = gpio_save; unsigned int gpio; for (gpio = 0; gpio < ARRAY_SIZE(gpio_save); gpio++, gps++) { void __iomem *base = gps->base; gps->gpcon = __raw_readl(base + OFFS_CON); gps->gpdat = __raw_readl(base + OFFS_DAT); if (gpio > 0) gps->gpup = __raw_readl(base + OFFS_UP); } } /* Test whether the given masked+shifted bits of an GPIO configuration * are one of the SFN (special function) modes. */ static inline int is_sfn(unsigned long con) { return (con == 2 || con == 3); } /* Test if the given masked+shifted GPIO configuration is an input */ static inline int is_in(unsigned long con) { return con == 0; } /* Test if the given masked+shifted GPIO configuration is an output */ static inline int is_out(unsigned long con) { return con == 1; } /** * s3c2410_pm_restore_gpio() - restore the given GPIO bank * @index: The number of the GPIO bank being resumed. * @gps: The sleep confgiuration for the bank. * * Restore one of the GPIO banks that was saved during suspend. This is * not as simple as once thought, due to the possibility of glitches * from the order that the CON and DAT registers are set in. * * The three states the pin can be are {IN,OUT,SFN} which gives us 9 * combinations of changes to check. Three of these, if the pin stays * in the same configuration can be discounted. This leaves us with * the following: * * { IN => OUT } Change DAT first * { IN => SFN } Change CON first * { OUT => SFN } Change CON first, so new data will not glitch * { OUT => IN } Change CON first, so new data will not glitch * { SFN => IN } Change CON first * { SFN => OUT } Change DAT first, so new data will not glitch [1] * * We do not currently deal with the UP registers as these control * weak resistors, so a small delay in change should not need to bring * these into the calculations. * * [1] this assumes that writing to a pin DAT whilst in SFN will set the * state for when it is next output. */ static void s3c2410_pm_restore_gpio(int index, struct gpio_sleep *gps) { void __iomem *base = gps->base; unsigned long gps_gpcon = gps->gpcon; unsigned long gps_gpdat = gps->gpdat; unsigned long old_gpcon; unsigned long old_gpdat; unsigned long old_gpup = 0x0; unsigned long gpcon; int nr; old_gpcon = __raw_readl(base + OFFS_CON); old_gpdat = __raw_readl(base + OFFS_DAT); if (base == S3C2410_GPACON) { /* GPACON only has one bit per control / data and no PULLUPs. * GPACON[x] = 0 => Output, 1 => SFN */ /* first set all SFN bits to SFN */ gpcon = old_gpcon | gps->gpcon; __raw_writel(gpcon, base + OFFS_CON); /* now set all the other bits */ __raw_writel(gps_gpdat, base + OFFS_DAT); __raw_writel(gps_gpcon, base + OFFS_CON); } else { unsigned long old, new, mask; unsigned long change_mask = 0x0; old_gpup = __raw_readl(base + OFFS_UP); /* Create a change_mask of all the items that need to have * their CON value changed before their DAT value, so that * we minimise the work between the two settings. */ for (nr = 0, mask = 0x03; nr < 32; nr += 2, mask <<= 2) { old = (old_gpcon & mask) >> nr; new = (gps_gpcon & mask) >> nr; /* If there is no change, then skip */ if (old == new) continue; /* If both are special function, then skip */ if (is_sfn(old) && is_sfn(new)) continue; /* Change is IN => OUT, do not change now */ if (is_in(old) && is_out(new)) continue; /* Change is SFN => OUT, do not change now */ if (is_sfn(old) && is_out(new)) continue; /* We should now be at the case of IN=>SFN, * OUT=>SFN, OUT=>IN, SFN=>IN. */ change_mask |= mask; } /* Write the new CON settings */ gpcon = old_gpcon & ~change_mask; gpcon |= gps_gpcon & change_mask; __raw_writel(gpcon, base + OFFS_CON); /* Now change any items that require DAT,CON */ __raw_writel(gps_gpdat, base + OFFS_DAT); __raw_writel(gps_gpcon, base + OFFS_CON); __raw_writel(gps->gpup, base + OFFS_UP); } S3C_PMDBG("GPIO[%d] CON %08lx => %08lx, DAT %08lx => %08lx\n", index, old_gpcon, gps_gpcon, old_gpdat, gps_gpdat); } /** s3c2410_pm_restore_gpios() * * Restore the state of the GPIOs */ void s3c_pm_restore_gpios(void) { struct gpio_sleep *gps = gpio_save; int gpio; for (gpio = 0; gpio < ARRAY_SIZE(gpio_save); gpio++, gps++) { s3c2410_pm_restore_gpio(gpio, gps); } } void s3c_pm_restore_core(void) { s3c_pm_do_restore_core(core_save, ARRAY_SIZE(core_save)); s3c_pm_do_restore(misc_save, ARRAY_SIZE(misc_save)); } void s3c_pm_save_core(void) { s3c_pm_do_save(misc_save, ARRAY_SIZE(misc_save)); s3c_pm_do_save(core_save, ARRAY_SIZE(core_save)); }
gpl-2.0
RWTH-OS/linux
drivers/mtd/nand/nand_micron.c
27
8102
/* * Copyright (C) 2017 Free Electrons * Copyright (C) 2017 NextThing Co * * Author: Boris Brezillon <boris.brezillon@free-electrons.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/mtd/nand.h> /* * Special Micron status bit that indicates when the block has been * corrected by on-die ECC and should be rewritten */ #define NAND_STATUS_WRITE_RECOMMENDED BIT(3) struct nand_onfi_vendor_micron { u8 two_plane_read; u8 read_cache; u8 read_unique_id; u8 dq_imped; u8 dq_imped_num_settings; u8 dq_imped_feat_addr; u8 rb_pulldown_strength; u8 rb_pulldown_strength_feat_addr; u8 rb_pulldown_strength_num_settings; u8 otp_mode; u8 otp_page_start; u8 otp_data_prot_addr; u8 otp_num_pages; u8 otp_feat_addr; u8 read_retry_options; u8 reserved[72]; u8 param_revision; } __packed; static int micron_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode) { struct nand_chip *chip = mtd_to_nand(mtd); u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode}; return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY, feature); } /* * Configure chip properties from Micron vendor-specific ONFI table */ static int micron_nand_onfi_init(struct nand_chip *chip) { struct nand_onfi_params *p = &chip->onfi_params; struct nand_onfi_vendor_micron *micron = (void *)p->vendor; if (!chip->onfi_version) return 0; if (le16_to_cpu(p->vendor_revision) < 1) return 0; chip->read_retries = micron->read_retry_options; chip->setup_read_retry = micron_nand_setup_read_retry; return 0; } static int micron_nand_on_die_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *oobregion) { if (section >= 4) return -ERANGE; oobregion->offset = (section * 16) + 8; oobregion->length = 8; return 0; } static int micron_nand_on_die_ooblayout_free(struct mtd_info *mtd, int section, struct mtd_oob_region *oobregion) { if (section >= 4) return -ERANGE; oobregion->offset = (section * 16) + 2; oobregion->length = 6; return 0; } static const struct mtd_ooblayout_ops micron_nand_on_die_ooblayout_ops = { .ecc = micron_nand_on_die_ooblayout_ecc, .free = micron_nand_on_die_ooblayout_free, }; static int micron_nand_on_die_ecc_setup(struct nand_chip *chip, bool enable) { u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = { 0, }; if (enable) feature[0] |= ONFI_FEATURE_ON_DIE_ECC_EN; return chip->onfi_set_features(nand_to_mtd(chip), chip, ONFI_FEATURE_ON_DIE_ECC, feature); } static int micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { int status; int max_bitflips = 0; micron_nand_on_die_ecc_setup(chip, true); chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); status = chip->read_byte(mtd); if (status & NAND_STATUS_FAIL) mtd->ecc_stats.failed++; /* * The internal ECC doesn't tell us the number of bitflips * that have been corrected, but tells us if it recommends to * rewrite the block. If it's the case, then we pretend we had * a number of bitflips equal to the ECC strength, which will * hint the NAND core to rewrite the block. */ else if (status & NAND_STATUS_WRITE_RECOMMENDED) max_bitflips = chip->ecc.strength; chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1); nand_read_page_raw(mtd, chip, buf, oob_required, page); micron_nand_on_die_ecc_setup(chip, false); return max_bitflips; } static int micron_nand_write_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int oob_required, int page) { int status; micron_nand_on_die_ecc_setup(chip, true); chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); nand_write_page_raw(mtd, chip, buf, oob_required, page); chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); status = chip->waitfunc(mtd, chip); micron_nand_on_die_ecc_setup(chip, false); return status & NAND_STATUS_FAIL ? -EIO : 0; } static int micron_nand_read_page_raw_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); nand_read_page_raw(mtd, chip, buf, oob_required, page); return 0; } static int micron_nand_write_page_raw_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int oob_required, int page) { int status; chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); nand_write_page_raw(mtd, chip, buf, oob_required, page); chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); status = chip->waitfunc(mtd, chip); return status & NAND_STATUS_FAIL ? -EIO : 0; } enum { /* The NAND flash doesn't support on-die ECC */ MICRON_ON_DIE_UNSUPPORTED, /* * The NAND flash supports on-die ECC and it can be * enabled/disabled by a set features command. */ MICRON_ON_DIE_SUPPORTED, /* * The NAND flash supports on-die ECC, and it cannot be * disabled. */ MICRON_ON_DIE_MANDATORY, }; /* * Try to detect if the NAND support on-die ECC. To do this, we enable * the feature, and read back if it has been enabled as expected. We * also check if it can be disabled, because some Micron NANDs do not * allow disabling the on-die ECC and we don't support such NANDs for * now. * * This function also has the side effect of disabling on-die ECC if * it had been left enabled by the firmware/bootloader. */ static int micron_supports_on_die_ecc(struct nand_chip *chip) { u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = { 0, }; int ret; if (chip->onfi_version == 0) return MICRON_ON_DIE_UNSUPPORTED; if (chip->bits_per_cell != 1) return MICRON_ON_DIE_UNSUPPORTED; ret = micron_nand_on_die_ecc_setup(chip, true); if (ret) return MICRON_ON_DIE_UNSUPPORTED; chip->onfi_get_features(nand_to_mtd(chip), chip, ONFI_FEATURE_ON_DIE_ECC, feature); if ((feature[0] & ONFI_FEATURE_ON_DIE_ECC_EN) == 0) return MICRON_ON_DIE_UNSUPPORTED; ret = micron_nand_on_die_ecc_setup(chip, false); if (ret) return MICRON_ON_DIE_UNSUPPORTED; chip->onfi_get_features(nand_to_mtd(chip), chip, ONFI_FEATURE_ON_DIE_ECC, feature); if (feature[0] & ONFI_FEATURE_ON_DIE_ECC_EN) return MICRON_ON_DIE_MANDATORY; /* * Some Micron NANDs have an on-die ECC of 4/512, some other * 8/512. We only support the former. */ if (chip->onfi_params.ecc_bits != 4) return MICRON_ON_DIE_UNSUPPORTED; return MICRON_ON_DIE_SUPPORTED; } static int micron_nand_init(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); int ondie; int ret; ret = micron_nand_onfi_init(chip); if (ret) return ret; if (mtd->writesize == 2048) chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; ondie = micron_supports_on_die_ecc(chip); if (ondie == MICRON_ON_DIE_MANDATORY) { pr_err("On-die ECC forcefully enabled, not supported\n"); return -EINVAL; } if (chip->ecc.mode == NAND_ECC_ON_DIE) { if (ondie == MICRON_ON_DIE_UNSUPPORTED) { pr_err("On-die ECC selected but not supported\n"); return -EINVAL; } chip->ecc.options = NAND_ECC_CUSTOM_PAGE_ACCESS; chip->ecc.bytes = 8; chip->ecc.size = 512; chip->ecc.strength = 4; chip->ecc.algo = NAND_ECC_BCH; chip->ecc.read_page = micron_nand_read_page_on_die_ecc; chip->ecc.write_page = micron_nand_write_page_on_die_ecc; chip->ecc.read_page_raw = micron_nand_read_page_raw_on_die_ecc; chip->ecc.write_page_raw = micron_nand_write_page_raw_on_die_ecc; mtd_set_ooblayout(mtd, &micron_nand_on_die_ooblayout_ops); } return 0; } const struct nand_manufacturer_ops micron_nand_manuf_ops = { .init = micron_nand_init, };
gpl-2.0
loongson-community/linux_loongson
mm/thrash.c
283
2098
/* * mm/thrash.c * * Copyright (C) 2004, Red Hat, Inc. * Copyright (C) 2004, Rik van Riel <riel@redhat.com> * Released under the GPL, see the file COPYING for details. * * Simple token based thrashing protection, using the algorithm * described in: http://www.cs.wm.edu/~sjiang/token.pdf * * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com> * Improved algorithm to pass token: * Each task has a priority which is incremented if it contended * for the token in an interval less than its previous attempt. * If the token is acquired, that task's priority is boosted to prevent * the token from bouncing around too often and to let the task make * some progress in its execution. */ #include <linux/jiffies.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/swap.h> static DEFINE_SPINLOCK(swap_token_lock); struct mm_struct *swap_token_mm; static unsigned int global_faults; void grab_swap_token(void) { int current_interval; global_faults++; current_interval = global_faults - current->mm->faultstamp; if (!spin_trylock(&swap_token_lock)) return; /* First come first served */ if (swap_token_mm == NULL) { current->mm->token_priority = current->mm->token_priority + 2; swap_token_mm = current->mm; goto out; } if (current->mm != swap_token_mm) { if (current_interval < current->mm->last_interval) current->mm->token_priority++; else { if (likely(current->mm->token_priority > 0)) current->mm->token_priority--; } /* Check if we deserve the token */ if (current->mm->token_priority > swap_token_mm->token_priority) { current->mm->token_priority += 2; swap_token_mm = current->mm; } } else { /* Token holder came in again! */ current->mm->token_priority += 2; } out: current->mm->faultstamp = global_faults; current->mm->last_interval = current_interval; spin_unlock(&swap_token_lock); return; } /* Called on process exit. */ void __put_swap_token(struct mm_struct *mm) { spin_lock(&swap_token_lock); if (likely(mm == swap_token_mm)) swap_token_mm = NULL; spin_unlock(&swap_token_lock); }
gpl-2.0
dssmex/dssmex-GB-ICS
arch/x86/mm/kmemcheck/shadow.c
539
3558
#include <linux/kmemcheck.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/page.h> #include <asm/pgtable.h> #include "pte.h" #include "shadow.h" /* * Return the shadow address for the given address. Returns NULL if the * address is not tracked. * * We need to be extremely careful not to follow any invalid pointers, * because this function can be called for *any* possible address. */ void *kmemcheck_shadow_lookup(unsigned long address) { pte_t *pte; struct page *page; if (!virt_addr_valid(address)) return NULL; pte = kmemcheck_pte_lookup(address); if (!pte) return NULL; page = virt_to_page(address); if (!page->shadow) return NULL; return page->shadow + (address & (PAGE_SIZE - 1)); } static void mark_shadow(void *address, unsigned int n, enum kmemcheck_shadow status) { unsigned long addr = (unsigned long) address; unsigned long last_addr = addr + n - 1; unsigned long page = addr & PAGE_MASK; unsigned long last_page = last_addr & PAGE_MASK; unsigned int first_n; void *shadow; /* If the memory range crosses a page boundary, stop there. */ if (page == last_page) first_n = n; else first_n = page + PAGE_SIZE - addr; shadow = kmemcheck_shadow_lookup(addr); if (shadow) memset(shadow, status, first_n); addr += first_n; n -= first_n; /* Do full-page memset()s. */ while (n >= PAGE_SIZE) { shadow = kmemcheck_shadow_lookup(addr); if (shadow) memset(shadow, status, PAGE_SIZE); addr += PAGE_SIZE; n -= PAGE_SIZE; } /* Do the remaining page, if any. */ if (n > 0) { shadow = kmemcheck_shadow_lookup(addr); if (shadow) memset(shadow, status, n); } } void kmemcheck_mark_unallocated(void *address, unsigned int n) { mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED); } void kmemcheck_mark_uninitialized(void *address, unsigned int n) { mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED); } /* * Fill the shadow memory of the given address such that the memory at that * address is marked as being initialized. */ void kmemcheck_mark_initialized(void *address, unsigned int n) { mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED); } EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized); void kmemcheck_mark_freed(void *address, unsigned int n) { mark_shadow(address, n, KMEMCHECK_SHADOW_FREED); } void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n) { unsigned int i; for (i = 0; i < n; ++i) kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE); } void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n) { unsigned int i; for (i = 0; i < n; ++i) kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE); } void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n) { unsigned int i; for (i = 0; i < n; ++i) kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE); } enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size) { uint8_t *x; unsigned int i; x = shadow; #ifdef CONFIG_KMEMCHECK_PARTIAL_OK /* * Make sure _some_ bytes are initialized. Gcc frequently generates * code to access neighboring bytes. */ for (i = 0; i < size; ++i) { if (x[i] == KMEMCHECK_SHADOW_INITIALIZED) return x[i]; } #else /* All bytes must be initialized. */ for (i = 0; i < size; ++i) { if (x[i] != KMEMCHECK_SHADOW_INITIALIZED) return x[i]; } #endif return x[0]; } void kmemcheck_shadow_set(void *shadow, unsigned int size) { uint8_t *x; unsigned int i; x = shadow; for (i = 0; i < size; ++i) x[i] = KMEMCHECK_SHADOW_INITIALIZED; }
gpl-2.0
zekezang/linux-2.6.32.2-2440
drivers/gpu/drm/i915/intel_dvo.c
539
14532
/* * Copyright 2006 Dave Airlie <airlied@linux.ie> * Copyright © 2006-2007 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <eric@anholt.net> */ #include <linux/i2c.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" #include "dvo.h" #define SIL164_ADDR 0x38 #define CH7xxx_ADDR 0x76 #define TFP410_ADDR 0x38 static struct intel_dvo_device intel_dvo_devices[] = { { .type = INTEL_DVO_CHIP_TMDS, .name = "sil164", .dvo_reg = DVOC, .slave_addr = SIL164_ADDR, .dev_ops = &sil164_ops, }, { .type = INTEL_DVO_CHIP_TMDS, .name = "ch7xxx", .dvo_reg = DVOC, .slave_addr = CH7xxx_ADDR, .dev_ops = &ch7xxx_ops, }, { .type = INTEL_DVO_CHIP_LVDS, .name = "ivch", .dvo_reg = DVOA, .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */ .dev_ops = &ivch_ops, }, { .type = INTEL_DVO_CHIP_TMDS, .name = "tfp410", .dvo_reg = DVOC, .slave_addr = TFP410_ADDR, .dev_ops = &tfp410_ops, }, { .type = INTEL_DVO_CHIP_LVDS, .name = "ch7017", .dvo_reg = DVOC, .slave_addr = 0x75, .gpio = GPIOE, .dev_ops = &ch7017_ops, } }; static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_i915_private *dev_priv = encoder->dev->dev_private; struct intel_output *intel_output = enc_to_intel_output(encoder); struct intel_dvo_device *dvo = intel_output->dev_priv; u32 dvo_reg = dvo->dvo_reg; u32 temp = I915_READ(dvo_reg); if (mode == DRM_MODE_DPMS_ON) { I915_WRITE(dvo_reg, temp | DVO_ENABLE); I915_READ(dvo_reg); dvo->dev_ops->dpms(dvo, mode); } else { dvo->dev_ops->dpms(dvo, mode); I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); I915_READ(dvo_reg); } } static void intel_dvo_save(struct drm_connector *connector) { struct drm_i915_private *dev_priv = connector->dev->dev_private; struct intel_output *intel_output = to_intel_output(connector); struct intel_dvo_device *dvo = intel_output->dev_priv; /* Each output should probably just save the registers it touches, * but for now, use more overkill. */ dev_priv->saveDVOA = I915_READ(DVOA); dev_priv->saveDVOB = I915_READ(DVOB); dev_priv->saveDVOC = I915_READ(DVOC); dvo->dev_ops->save(dvo); } static void intel_dvo_restore(struct drm_connector *connector) { struct drm_i915_private *dev_priv = connector->dev->dev_private; struct intel_output *intel_output = to_intel_output(connector); struct intel_dvo_device *dvo = intel_output->dev_priv; dvo->dev_ops->restore(dvo); I915_WRITE(DVOA, dev_priv->saveDVOA); I915_WRITE(DVOB, dev_priv->saveDVOB); I915_WRITE(DVOC, dev_priv->saveDVOC); } static int intel_dvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_output *intel_output = to_intel_output(connector); struct intel_dvo_device *dvo = intel_output->dev_priv; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; /* XXX: Validate clock range */ if (dvo->panel_fixed_mode) { if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay) return MODE_PANEL; if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay) return MODE_PANEL; } return dvo->dev_ops->mode_valid(dvo, mode); } static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct intel_output *intel_output = enc_to_intel_output(encoder); struct intel_dvo_device *dvo = intel_output->dev_priv; /* If we have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ if (dvo->panel_fixed_mode != NULL) { #define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x C(hdisplay); C(hsync_start); C(hsync_end); C(htotal); C(vdisplay); C(vsync_start); C(vsync_end); C(vtotal); C(clock); drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); #undef C } if (dvo->dev_ops->mode_fixup) return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode); return true; } static void intel_dvo_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_output *intel_output = enc_to_intel_output(encoder); struct intel_dvo_device *dvo = intel_output->dev_priv; int pipe = intel_crtc->pipe; u32 dvo_val; u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; switch (dvo_reg) { case DVOA: default: dvo_srcdim_reg = DVOA_SRCDIM; break; case DVOB: dvo_srcdim_reg = DVOB_SRCDIM; break; case DVOC: dvo_srcdim_reg = DVOC_SRCDIM; break; } dvo->dev_ops->mode_set(dvo, mode, adjusted_mode); /* Save the data order, since I don't know what it should be set to. */ dvo_val = I915_READ(dvo_reg) & (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG); dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE | DVO_BLANK_ACTIVE_HIGH; if (pipe == 1) dvo_val |= DVO_PIPE_B_SELECT; dvo_val |= DVO_PIPE_STALL; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) dvo_val |= DVO_HSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) dvo_val |= DVO_VSYNC_ACTIVE_HIGH; I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED); /*I915_WRITE(DVOB_SRCDIM, (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ I915_WRITE(dvo_srcdim_reg, (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT)); /*I915_WRITE(DVOB, dvo_val);*/ I915_WRITE(dvo_reg, dvo_val); } /** * Detect the output connection on our DVO device. * * Unimplemented. */ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) { struct intel_output *intel_output = to_intel_output(connector); struct intel_dvo_device *dvo = intel_output->dev_priv; return dvo->dev_ops->detect(dvo); } static int intel_dvo_get_modes(struct drm_connector *connector) { struct intel_output *intel_output = to_intel_output(connector); struct intel_dvo_device *dvo = intel_output->dev_priv; /* We should probably have an i2c driver get_modes function for those * devices which will have a fixed set of modes determined by the chip * (TV-out, for example), but for now with just TMDS and LVDS, * that's not the case. */ intel_ddc_get_modes(intel_output); if (!list_empty(&connector->probed_modes)) return 1; if (dvo->panel_fixed_mode != NULL) { struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode); if (mode) { drm_mode_probed_add(connector, mode); return 1; } } return 0; } static void intel_dvo_destroy (struct drm_connector *connector) { struct intel_output *intel_output = to_intel_output(connector); struct intel_dvo_device *dvo = intel_output->dev_priv; if (dvo) { if (dvo->dev_ops->destroy) dvo->dev_ops->destroy(dvo); if (dvo->panel_fixed_mode) kfree(dvo->panel_fixed_mode); /* no need, in i830_dvoices[] now */ //kfree(dvo); } if (intel_output->i2c_bus) intel_i2c_destroy(intel_output->i2c_bus); if (intel_output->ddc_bus) intel_i2c_destroy(intel_output->ddc_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(intel_output); } #ifdef RANDR_GET_CRTC_INTERFACE static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_output *intel_output = to_intel_output(connector); struct intel_dvo_device *dvo = intel_output->dev_priv; int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); return intel_pipe_to_crtc(pScrn, pipe); } #endif static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { .dpms = intel_dvo_dpms, .mode_fixup = intel_dvo_mode_fixup, .prepare = intel_encoder_prepare, .mode_set = intel_dvo_mode_set, .commit = intel_encoder_commit, }; static const struct drm_connector_funcs intel_dvo_connector_funcs = { .dpms = drm_helper_connector_dpms, .save = intel_dvo_save, .restore = intel_dvo_restore, .detect = intel_dvo_detect, .destroy = intel_dvo_destroy, .fill_modes = drm_helper_probe_single_connector_modes, }; static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { .mode_valid = intel_dvo_mode_valid, .get_modes = intel_dvo_get_modes, .best_encoder = intel_best_encoder, }; static void intel_dvo_enc_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); } static const struct drm_encoder_funcs intel_dvo_enc_funcs = { .destroy = intel_dvo_enc_destroy, }; /** * Attempts to get a fixed panel timing for LVDS (currently only the i830). * * Other chips with DVO LVDS will need to extend this to deal with the LVDS * chip being on DVOB/C and having multiple pipes. */ static struct drm_display_mode * intel_dvo_get_current_mode (struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_output *intel_output = to_intel_output(connector); struct intel_dvo_device *dvo = intel_output->dev_priv; uint32_t dvo_reg = dvo->dvo_reg; uint32_t dvo_val = I915_READ(dvo_reg); struct drm_display_mode *mode = NULL; /* If the DVO port is active, that'll be the LVDS, so we can pull out * its timings to get how the BIOS set up the panel. */ if (dvo_val & DVO_ENABLE) { struct drm_crtc *crtc; int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0; crtc = intel_get_crtc_from_pipe(dev, pipe); if (crtc) { mode = intel_crtc_mode_get(dev, crtc); if (mode) { mode->type |= DRM_MODE_TYPE_PREFERRED; if (dvo_val & DVO_HSYNC_ACTIVE_HIGH) mode->flags |= DRM_MODE_FLAG_PHSYNC; if (dvo_val & DVO_VSYNC_ACTIVE_HIGH) mode->flags |= DRM_MODE_FLAG_PVSYNC; } } } return mode; } void intel_dvo_init(struct drm_device *dev) { struct intel_output *intel_output; struct intel_dvo_device *dvo; struct i2c_adapter *i2cbus = NULL; int ret = 0; int i; int encoder_type = DRM_MODE_ENCODER_NONE; intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); if (!intel_output) return; /* Set up the DDC bus */ intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); if (!intel_output->ddc_bus) goto free_intel; /* Now, try to find a controller */ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { struct drm_connector *connector = &intel_output->base; int gpio; dvo = &intel_dvo_devices[i]; /* Allow the I2C driver info to specify the GPIO to be used in * special cases, but otherwise default to what's defined * in the spec. */ if (dvo->gpio != 0) gpio = dvo->gpio; else if (dvo->type == INTEL_DVO_CHIP_LVDS) gpio = GPIOB; else gpio = GPIOE; /* Set up the I2C bus necessary for the chip we're probing. * It appears that everything is on GPIOE except for panels * on i830 laptops, which are on GPIOB (DVOA). */ if (i2cbus != NULL) intel_i2c_destroy(i2cbus); if (!(i2cbus = intel_i2c_create(dev, gpio, gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { continue; } if (dvo->dev_ops!= NULL) ret = dvo->dev_ops->init(dvo, i2cbus); else ret = false; if (!ret) continue; intel_output->type = INTEL_OUTPUT_DVO; intel_output->crtc_mask = (1 << 0) | (1 << 1); switch (dvo->type) { case INTEL_DVO_CHIP_TMDS: intel_output->clone_mask = (1 << INTEL_DVO_TMDS_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT); drm_connector_init(dev, connector, &intel_dvo_connector_funcs, DRM_MODE_CONNECTOR_DVII); encoder_type = DRM_MODE_ENCODER_TMDS; break; case INTEL_DVO_CHIP_LVDS: intel_output->clone_mask = (1 << INTEL_DVO_LVDS_CLONE_BIT); drm_connector_init(dev, connector, &intel_dvo_connector_funcs, DRM_MODE_CONNECTOR_LVDS); encoder_type = DRM_MODE_ENCODER_LVDS; break; } drm_connector_helper_add(connector, &intel_dvo_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; intel_output->dev_priv = dvo; intel_output->i2c_bus = i2cbus; drm_encoder_init(dev, &intel_output->enc, &intel_dvo_enc_funcs, encoder_type); drm_encoder_helper_add(&intel_output->enc, &intel_dvo_helper_funcs); drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); if (dvo->type == INTEL_DVO_CHIP_LVDS) { /* For our LVDS chipsets, we should hopefully be able * to dig the fixed panel mode out of the BIOS data. * However, it's in a different format from the BIOS * data on chipsets with integrated LVDS (stored in AIM * headers, likely), so for now, just get the current * mode being output through DVO. */ dvo->panel_fixed_mode = intel_dvo_get_current_mode(connector); dvo->panel_wants_dither = true; } drm_sysfs_connector_add(connector); return; } intel_i2c_destroy(intel_output->ddc_bus); /* Didn't find a chip, so tear down. */ if (i2cbus != NULL) intel_i2c_destroy(i2cbus); free_intel: kfree(intel_output); }
gpl-2.0
andreya108/bindu-kernel-base
drivers/usb/musb/musb_gadget.c
795
60636
/* * MUSB OTG driver peripheral support * * Copyright 2005 Mentor Graphics Corporation * Copyright (C) 2005-2006 by Texas Instruments * Copyright (C) 2006-2007 Nokia Corporation * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/module.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include "musb_core.h" /* MUSB PERIPHERAL status 3-mar-2006: * * - EP0 seems solid. It passes both USBCV and usbtest control cases. * Minor glitches: * * + remote wakeup to Linux hosts work, but saw USBCV failures; * in one test run (operator error?) * + endpoint halt tests -- in both usbtest and usbcv -- seem * to break when dma is enabled ... is something wrongly * clearing SENDSTALL? * * - Mass storage behaved ok when last tested. Network traffic patterns * (with lots of short transfers etc) need retesting; they turn up the * worst cases of the DMA, since short packets are typical but are not * required. * * - TX/IN * + both pio and dma behave in with network and g_zero tests * + no cppi throughput issues other than no-hw-queueing * + failed with FLAT_REG (DaVinci) * + seems to behave with double buffering, PIO -and- CPPI * + with gadgetfs + AIO, requests got lost? * * - RX/OUT * + both pio and dma behave in with network and g_zero tests * + dma is slow in typical case (short_not_ok is clear) * + double buffering ok with PIO * + double buffering *FAILS* with CPPI, wrong data bytes sometimes * + request lossage observed with gadgetfs * * - ISO not tested ... might work, but only weakly isochronous * * - Gadget driver disabling of softconnect during bind() is ignored; so * drivers can't hold off host requests until userspace is ready. * (Workaround: they can turn it off later.) * * - PORTABILITY (assumes PIO works): * + DaVinci, basically works with cppi dma * + OMAP 2430, ditto with mentor dma * + TUSB 6010, platform-specific dma in the works */ /* ----------------------------------------------------------------------- */ #define is_buffer_mapped(req) (is_dma_capable() && \ (req->map_state != UN_MAPPED)) /* Maps the buffer to dma */ static inline void map_dma_buffer(struct musb_request *request, struct musb *musb, struct musb_ep *musb_ep) { int compatible = true; struct dma_controller *dma = musb->dma_controller; request->map_state = UN_MAPPED; if (!is_dma_capable() || !musb_ep->dma) return; /* Check if DMA engine can handle this request. * DMA code must reject the USB request explicitly. * Default behaviour is to map the request. */ if (dma->is_compatible) compatible = dma->is_compatible(musb_ep->dma, musb_ep->packet_sz, request->request.buf, request->request.length); if (!compatible) return; if (request->request.dma == DMA_ADDR_INVALID) { request->request.dma = dma_map_single( musb->controller, request->request.buf, request->request.length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); request->map_state = MUSB_MAPPED; } else { dma_sync_single_for_device(musb->controller, request->request.dma, request->request.length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); request->map_state = PRE_MAPPED; } } /* Unmap the buffer from dma and maps it back to cpu */ static inline void unmap_dma_buffer(struct musb_request *request, struct musb *musb) { if (!is_buffer_mapped(request)) return; if (request->request.dma == DMA_ADDR_INVALID) { dev_vdbg(musb->controller, "not unmapping a never mapped buffer\n"); return; } if (request->map_state == MUSB_MAPPED) { dma_unmap_single(musb->controller, request->request.dma, request->request.length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); request->request.dma = DMA_ADDR_INVALID; } else { /* PRE_MAPPED */ dma_sync_single_for_cpu(musb->controller, request->request.dma, request->request.length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); } request->map_state = UN_MAPPED; } /* * Immediately complete a request. * * @param request the request to complete * @param status the status to complete the request with * Context: controller locked, IRQs blocked. */ void musb_g_giveback( struct musb_ep *ep, struct usb_request *request, int status) __releases(ep->musb->lock) __acquires(ep->musb->lock) { struct musb_request *req; struct musb *musb; int busy = ep->busy; req = to_musb_request(request); list_del(&req->list); if (req->request.status == -EINPROGRESS) req->request.status = status; musb = req->musb; ep->busy = 1; spin_unlock(&musb->lock); unmap_dma_buffer(req, musb); if (request->status == 0) dev_dbg(musb->controller, "%s done request %p, %d/%d\n", ep->end_point.name, request, req->request.actual, req->request.length); else dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n", ep->end_point.name, request, req->request.actual, req->request.length, request->status); req->request.complete(&req->ep->end_point, &req->request); spin_lock(&musb->lock); ep->busy = busy; } /* ----------------------------------------------------------------------- */ /* * Abort requests queued to an endpoint using the status. Synchronous. * caller locked controller and blocked irqs, and selected this ep. */ static void nuke(struct musb_ep *ep, const int status) { struct musb *musb = ep->musb; struct musb_request *req = NULL; void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; ep->busy = 1; if (is_dma_capable() && ep->dma) { struct dma_controller *c = ep->musb->dma_controller; int value; if (ep->is_in) { /* * The programming guide says that we must not clear * the DMAMODE bit before DMAENAB, so we only * clear it in the second write... */ musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO); musb_writew(epio, MUSB_TXCSR, 0 | MUSB_TXCSR_FLUSHFIFO); } else { musb_writew(epio, MUSB_RXCSR, 0 | MUSB_RXCSR_FLUSHFIFO); musb_writew(epio, MUSB_RXCSR, 0 | MUSB_RXCSR_FLUSHFIFO); } value = c->channel_abort(ep->dma); dev_dbg(musb->controller, "%s: abort DMA --> %d\n", ep->name, value); c->channel_release(ep->dma); ep->dma = NULL; } while (!list_empty(&ep->req_list)) { req = list_first_entry(&ep->req_list, struct musb_request, list); musb_g_giveback(ep, &req->request, status); } } /* ----------------------------------------------------------------------- */ /* Data transfers - pure PIO, pure DMA, or mixed mode */ /* * This assumes the separate CPPI engine is responding to DMA requests * from the usb core ... sequenced a bit differently from mentor dma. */ static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) { if (can_bulk_split(musb, ep->type)) return ep->hw_ep->max_packet_sz_tx; else return ep->packet_sz; } #ifdef CONFIG_USB_INVENTRA_DMA /* Peripheral tx (IN) using Mentor DMA works as follows: Only mode 0 is used for transfers <= wPktSize, mode 1 is used for larger transfers, One of the following happens: - Host sends IN token which causes an endpoint interrupt -> TxAvail -> if DMA is currently busy, exit. -> if queue is non-empty, txstate(). - Request is queued by the gadget driver. -> if queue was previously empty, txstate() txstate() -> start /\ -> setup DMA | (data is transferred to the FIFO, then sent out when | IN token(s) are recd from Host. | -> DMA interrupt on completion | calls TxAvail. | -> stop DMA, ~DMAENAB, | -> set TxPktRdy for last short pkt or zlp | -> Complete Request | -> Continue next request (call txstate) |___________________________________| * Non-Mentor DMA engines can of course work differently, such as by * upleveling from irq-per-packet to irq-per-buffer. */ #endif /* * An endpoint is transmitting data. This can be called either from * the IRQ routine or from ep.queue() to kickstart a request on an * endpoint. * * Context: controller locked, IRQs blocked, endpoint selected */ static void txstate(struct musb *musb, struct musb_request *req) { u8 epnum = req->epnum; struct musb_ep *musb_ep; void __iomem *epio = musb->endpoints[epnum].regs; struct usb_request *request; u16 fifo_count = 0, csr; int use_dma = 0; musb_ep = req->ep; /* we shouldn't get here while DMA is active ... but we do ... */ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { dev_dbg(musb->controller, "dma pending...\n"); return; } /* read TXCSR before */ csr = musb_readw(epio, MUSB_TXCSR); request = &req->request; fifo_count = min(max_ep_writesize(musb, musb_ep), (int)(request->length - request->actual)); if (csr & MUSB_TXCSR_TXPKTRDY) { dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n", musb_ep->end_point.name, csr); return; } if (csr & MUSB_TXCSR_P_SENDSTALL) { dev_dbg(musb->controller, "%s stalling, txcsr %03x\n", musb_ep->end_point.name, csr); return; } dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", epnum, musb_ep->packet_sz, fifo_count, csr); #ifndef CONFIG_MUSB_PIO_ONLY if (is_buffer_mapped(req)) { struct dma_controller *c = musb->dma_controller; size_t request_size; /* setup DMA, then program endpoint CSR */ request_size = min_t(size_t, request->length - request->actual, musb_ep->dma->max_len); use_dma = (request->dma != DMA_ADDR_INVALID); /* MUSB_TXCSR_P_ISO is still set correctly */ #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) { if (request_size < musb_ep->packet_sz) musb_ep->dma->desired_mode = 0; else musb_ep->dma->desired_mode = 1; use_dma = use_dma && c->channel_program( musb_ep->dma, musb_ep->packet_sz, musb_ep->dma->desired_mode, request->dma + request->actual, request_size); if (use_dma) { if (musb_ep->dma->desired_mode == 0) { /* * We must not clear the DMAMODE bit * before the DMAENAB bit -- and the * latter doesn't always get cleared * before we get here... */ csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_P_WZC_BITS); csr &= ~MUSB_TXCSR_DMAMODE; csr |= (MUSB_TXCSR_DMAENAB | MUSB_TXCSR_MODE); /* against programming guide */ } else { csr |= (MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); /* * Enable Autoset according to table * below * bulk_split hb_mult Autoset_Enable * 0 0 Yes(Normal) * 0 >0 No(High BW ISO) * 1 0 Yes(HS bulk) * 1 >0 Yes(FS bulk) */ if (!musb_ep->hb_mult || (musb_ep->hb_mult && can_bulk_split(musb, musb_ep->type))) csr |= MUSB_TXCSR_AUTOSET; } csr &= ~MUSB_TXCSR_P_UNDERRUN; musb_writew(epio, MUSB_TXCSR, csr); } } #elif defined(CONFIG_USB_TI_CPPI_DMA) /* program endpoint CSR first, then setup DMA */ csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE; musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) | csr); /* ensure writebuffer is empty */ csr = musb_readw(epio, MUSB_TXCSR); /* NOTE host side sets DMAENAB later than this; both are * OK since the transfer dma glue (between CPPI and Mentor * fifos) just tells CPPI it could start. Data only moves * to the USB TX fifo when both fifos are ready. */ /* "mode" is irrelevant here; handle terminating ZLPs like * PIO does, since the hardware RNDIS mode seems unreliable * except for the last-packet-is-already-short case. */ use_dma = use_dma && c->channel_program( musb_ep->dma, musb_ep->packet_sz, 0, request->dma + request->actual, request_size); if (!use_dma) { c->channel_release(musb_ep->dma); musb_ep->dma = NULL; csr &= ~MUSB_TXCSR_DMAENAB; musb_writew(epio, MUSB_TXCSR, csr); /* invariant: prequest->buf is non-null */ } #elif defined(CONFIG_USB_TUSB_OMAP_DMA) use_dma = use_dma && c->channel_program( musb_ep->dma, musb_ep->packet_sz, request->zero, request->dma + request->actual, request_size); #endif } #endif if (!use_dma) { /* * Unmap the dma buffer back to cpu if dma channel * programming fails */ unmap_dma_buffer(req, musb); musb_write_fifo(musb_ep->hw_ep, fifo_count, (u8 *) (request->buf + request->actual)); request->actual += fifo_count; csr |= MUSB_TXCSR_TXPKTRDY; csr &= ~MUSB_TXCSR_P_UNDERRUN; musb_writew(epio, MUSB_TXCSR, csr); } /* host may already have the data when this message shows... */ dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", musb_ep->end_point.name, use_dma ? "dma" : "pio", request->actual, request->length, musb_readw(epio, MUSB_TXCSR), fifo_count, musb_readw(epio, MUSB_TXMAXP)); } /* * FIFO state update (e.g. data ready). * Called from IRQ, with controller locked. */ void musb_g_tx(struct musb *musb, u8 epnum) { u16 csr; struct musb_request *req; struct usb_request *request; u8 __iomem *mbase = musb->mregs; struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; void __iomem *epio = musb->endpoints[epnum].regs; struct dma_channel *dma; musb_ep_select(mbase, epnum); req = next_request(musb_ep); request = &req->request; csr = musb_readw(epio, MUSB_TXCSR); dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); dma = is_dma_capable() ? musb_ep->dma : NULL; /* * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX * probably rates reporting as a host error. */ if (csr & MUSB_TXCSR_P_SENTSTALL) { csr |= MUSB_TXCSR_P_WZC_BITS; csr &= ~MUSB_TXCSR_P_SENTSTALL; musb_writew(epio, MUSB_TXCSR, csr); return; } if (csr & MUSB_TXCSR_P_UNDERRUN) { /* We NAKed, no big deal... little reason to care. */ csr |= MUSB_TXCSR_P_WZC_BITS; csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); musb_writew(epio, MUSB_TXCSR, csr); dev_vdbg(musb->controller, "underrun on ep%d, req %p\n", epnum, request); } if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { /* * SHOULD NOT HAPPEN... has with CPPI though, after * changing SENDSTALL (and other cases); harmless? */ dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name); return; } if (request) { u8 is_dma = 0; if (dma && (csr & MUSB_TXCSR_DMAENAB)) { is_dma = 1; csr |= MUSB_TXCSR_P_WZC_BITS; csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); musb_writew(epio, MUSB_TXCSR, csr); /* Ensure writebuffer is empty. */ csr = musb_readw(epio, MUSB_TXCSR); request->actual += musb_ep->dma->actual_len; dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n", epnum, csr, musb_ep->dma->actual_len, request); } /* * First, maybe a terminating short packet. Some DMA * engines might handle this by themselves. */ if ((request->zero && request->length && (request->length % musb_ep->packet_sz == 0) && (request->actual == request->length)) #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || (is_dma && (!dma->desired_mode || (request->actual & (musb_ep->packet_sz - 1)))) #endif ) { /* * On DMA completion, FIFO may not be * available yet... */ if (csr & MUSB_TXCSR_TXPKTRDY) return; dev_dbg(musb->controller, "sending zero pkt\n"); musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); request->zero = 0; } if (request->actual == request->length) { musb_g_giveback(musb_ep, request, 0); /* * In the giveback function the MUSB lock is * released and acquired after sometime. During * this time period the INDEX register could get * changed by the gadget_queue function especially * on SMP systems. Reselect the INDEX to be sure * we are reading/modifying the right registers */ musb_ep_select(mbase, epnum); req = musb_ep->desc ? next_request(musb_ep) : NULL; if (!req) { dev_dbg(musb->controller, "%s idle now\n", musb_ep->end_point.name); return; } } txstate(musb, req); } } /* ------------------------------------------------------------ */ #ifdef CONFIG_USB_INVENTRA_DMA /* Peripheral rx (OUT) using Mentor DMA works as follows: - Only mode 0 is used. - Request is queued by the gadget class driver. -> if queue was previously empty, rxstate() - Host sends OUT token which causes an endpoint interrupt /\ -> RxReady | -> if request queued, call rxstate | /\ -> setup DMA | | -> DMA interrupt on completion | | -> RxReady | | -> stop DMA | | -> ack the read | | -> if data recd = max expected | | by the request, or host | | sent a short packet, | | complete the request, | | and start the next one. | |_____________________________________| | else just wait for the host | to send the next OUT token. |__________________________________________________| * Non-Mentor DMA engines can of course work differently. */ #endif /* * Context: controller locked, IRQs blocked, endpoint selected */ static void rxstate(struct musb *musb, struct musb_request *req) { const u8 epnum = req->epnum; struct usb_request *request = &req->request; struct musb_ep *musb_ep; void __iomem *epio = musb->endpoints[epnum].regs; unsigned fifo_count = 0; u16 len; u16 csr = musb_readw(epio, MUSB_RXCSR); struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; u8 use_mode_1; if (hw_ep->is_shared_fifo) musb_ep = &hw_ep->ep_in; else musb_ep = &hw_ep->ep_out; len = musb_ep->packet_sz; /* We shouldn't get here while DMA is active, but we do... */ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { dev_dbg(musb->controller, "DMA pending...\n"); return; } if (csr & MUSB_RXCSR_P_SENDSTALL) { dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n", musb_ep->end_point.name, csr); return; } if (is_cppi_enabled() && is_buffer_mapped(req)) { struct dma_controller *c = musb->dma_controller; struct dma_channel *channel = musb_ep->dma; /* NOTE: CPPI won't actually stop advancing the DMA * queue after short packet transfers, so this is almost * always going to run as IRQ-per-packet DMA so that * faults will be handled correctly. */ if (c->channel_program(channel, musb_ep->packet_sz, !request->short_not_ok, request->dma + request->actual, request->length - request->actual)) { /* make sure that if an rxpkt arrived after the irq, * the cppi engine will be ready to take it as soon * as DMA is enabled */ csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE); csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; musb_writew(epio, MUSB_RXCSR, csr); return; } } if (csr & MUSB_RXCSR_RXPKTRDY) { len = musb_readw(epio, MUSB_RXCOUNT); /* * Enable Mode 1 on RX transfers only when short_not_ok flag * is set. Currently short_not_ok flag is set only from * file_storage and f_mass_storage drivers */ if (request->short_not_ok && len == musb_ep->packet_sz) use_mode_1 = 1; else use_mode_1 = 0; if (request->actual < request->length) { #ifdef CONFIG_USB_INVENTRA_DMA if (is_buffer_mapped(req)) { struct dma_controller *c; struct dma_channel *channel; int use_dma = 0; c = musb->dma_controller; channel = musb_ep->dma; /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in * mode 0 only. So we do not get endpoint interrupts due to DMA * completion. We only get interrupts from DMA controller. * * We could operate in DMA mode 1 if we knew the size of the tranfer * in advance. For mass storage class, request->length = what the host * sends, so that'd work. But for pretty much everything else, * request->length is routinely more than what the host sends. For * most these gadgets, end of is signified either by a short packet, * or filling the last byte of the buffer. (Sending extra data in * that last pckate should trigger an overflow fault.) But in mode 1, * we don't get DMA completion interrupt for short packets. * * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), * to get endpoint interrupt on every DMA req, but that didn't seem * to work reliably. * * REVISIT an updated g_file_storage can set req->short_not_ok, which * then becomes usable as a runtime "use mode 1" hint... */ /* Experimental: Mode1 works with mass storage use cases */ if (use_mode_1) { csr |= MUSB_RXCSR_AUTOCLEAR; musb_writew(epio, MUSB_RXCSR, csr); csr |= MUSB_RXCSR_DMAENAB; musb_writew(epio, MUSB_RXCSR, csr); /* * this special sequence (enabling and then * disabling MUSB_RXCSR_DMAMODE) is required * to get DMAReq to activate */ musb_writew(epio, MUSB_RXCSR, csr | MUSB_RXCSR_DMAMODE); musb_writew(epio, MUSB_RXCSR, csr); } else { if (!musb_ep->hb_mult && musb_ep->hw_ep->rx_double_buffered) csr |= MUSB_RXCSR_AUTOCLEAR; csr |= MUSB_RXCSR_DMAENAB; musb_writew(epio, MUSB_RXCSR, csr); } if (request->actual < request->length) { int transfer_size = 0; if (use_mode_1) { transfer_size = min(request->length - request->actual, channel->max_len); musb_ep->dma->desired_mode = 1; } else { transfer_size = min(request->length - request->actual, (unsigned)len); musb_ep->dma->desired_mode = 0; } use_dma = c->channel_program( channel, musb_ep->packet_sz, channel->desired_mode, request->dma + request->actual, transfer_size); } if (use_dma) return; } #elif defined(CONFIG_USB_UX500_DMA) if ((is_buffer_mapped(req)) && (request->actual < request->length)) { struct dma_controller *c; struct dma_channel *channel; int transfer_size = 0; c = musb->dma_controller; channel = musb_ep->dma; /* In case first packet is short */ if (len < musb_ep->packet_sz) transfer_size = len; else if (request->short_not_ok) transfer_size = min(request->length - request->actual, channel->max_len); else transfer_size = min(request->length - request->actual, (unsigned)len); csr &= ~MUSB_RXCSR_DMAMODE; csr |= (MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); musb_writew(epio, MUSB_RXCSR, csr); if (transfer_size <= musb_ep->packet_sz) { musb_ep->dma->desired_mode = 0; } else { musb_ep->dma->desired_mode = 1; /* Mode must be set after DMAENAB */ csr |= MUSB_RXCSR_DMAMODE; musb_writew(epio, MUSB_RXCSR, csr); } if (c->channel_program(channel, musb_ep->packet_sz, channel->desired_mode, request->dma + request->actual, transfer_size)) return; } #endif /* Mentor's DMA */ fifo_count = request->length - request->actual; dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", musb_ep->end_point.name, len, fifo_count, musb_ep->packet_sz); fifo_count = min_t(unsigned, len, fifo_count); #ifdef CONFIG_USB_TUSB_OMAP_DMA if (tusb_dma_omap() && is_buffer_mapped(req)) { struct dma_controller *c = musb->dma_controller; struct dma_channel *channel = musb_ep->dma; u32 dma_addr = request->dma + request->actual; int ret; ret = c->channel_program(channel, musb_ep->packet_sz, channel->desired_mode, dma_addr, fifo_count); if (ret) return; } #endif /* * Unmap the dma buffer back to cpu if dma channel * programming fails. This buffer is mapped if the * channel allocation is successful */ if (is_buffer_mapped(req)) { unmap_dma_buffer(req, musb); /* * Clear DMAENAB and AUTOCLEAR for the * PIO mode transfer */ csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); musb_writew(epio, MUSB_RXCSR, csr); } musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) (request->buf + request->actual)); request->actual += fifo_count; /* REVISIT if we left anything in the fifo, flush * it and report -EOVERFLOW */ /* ack the read! */ csr |= MUSB_RXCSR_P_WZC_BITS; csr &= ~MUSB_RXCSR_RXPKTRDY; musb_writew(epio, MUSB_RXCSR, csr); } } /* reach the end or short packet detected */ if (request->actual == request->length || len < musb_ep->packet_sz) musb_g_giveback(musb_ep, request, 0); } /* * Data ready for a request; called from IRQ */ void musb_g_rx(struct musb *musb, u8 epnum) { u16 csr; struct musb_request *req; struct usb_request *request; void __iomem *mbase = musb->mregs; struct musb_ep *musb_ep; void __iomem *epio = musb->endpoints[epnum].regs; struct dma_channel *dma; struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; if (hw_ep->is_shared_fifo) musb_ep = &hw_ep->ep_in; else musb_ep = &hw_ep->ep_out; musb_ep_select(mbase, epnum); req = next_request(musb_ep); if (!req) return; request = &req->request; csr = musb_readw(epio, MUSB_RXCSR); dma = is_dma_capable() ? musb_ep->dma : NULL; dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, csr, dma ? " (dma)" : "", request); if (csr & MUSB_RXCSR_P_SENTSTALL) { csr |= MUSB_RXCSR_P_WZC_BITS; csr &= ~MUSB_RXCSR_P_SENTSTALL; musb_writew(epio, MUSB_RXCSR, csr); return; } if (csr & MUSB_RXCSR_P_OVERRUN) { /* csr |= MUSB_RXCSR_P_WZC_BITS; */ csr &= ~MUSB_RXCSR_P_OVERRUN; musb_writew(epio, MUSB_RXCSR, csr); dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request); if (request->status == -EINPROGRESS) request->status = -EOVERFLOW; } if (csr & MUSB_RXCSR_INCOMPRX) { /* REVISIT not necessarily an error */ dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name); } if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { /* "should not happen"; likely RXPKTRDY pending for DMA */ dev_dbg(musb->controller, "%s busy, csr %04x\n", musb_ep->end_point.name, csr); return; } if (dma && (csr & MUSB_RXCSR_DMAENAB)) { csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB | MUSB_RXCSR_DMAMODE); musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_P_WZC_BITS | csr); request->actual += musb_ep->dma->actual_len; dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", epnum, csr, musb_readw(epio, MUSB_RXCSR), musb_ep->dma->actual_len, request); #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ defined(CONFIG_USB_UX500_DMA) /* Autoclear doesn't clear RxPktRdy for short packets */ if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered) || (dma->actual_len & (musb_ep->packet_sz - 1))) { /* ack the read! */ csr &= ~MUSB_RXCSR_RXPKTRDY; musb_writew(epio, MUSB_RXCSR, csr); } /* incomplete, and not short? wait for next IN packet */ if ((request->actual < request->length) && (musb_ep->dma->actual_len == musb_ep->packet_sz)) { /* In double buffer case, continue to unload fifo if * there is Rx packet in FIFO. **/ csr = musb_readw(epio, MUSB_RXCSR); if ((csr & MUSB_RXCSR_RXPKTRDY) && hw_ep->rx_double_buffered) goto exit; return; } #endif musb_g_giveback(musb_ep, request, 0); /* * In the giveback function the MUSB lock is * released and acquired after sometime. During * this time period the INDEX register could get * changed by the gadget_queue function especially * on SMP systems. Reselect the INDEX to be sure * we are reading/modifying the right registers */ musb_ep_select(mbase, epnum); req = next_request(musb_ep); if (!req) return; } #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ defined(CONFIG_USB_UX500_DMA) exit: #endif /* Analyze request */ rxstate(musb, req); } /* ------------------------------------------------------------ */ static int musb_gadget_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) { unsigned long flags; struct musb_ep *musb_ep; struct musb_hw_ep *hw_ep; void __iomem *regs; struct musb *musb; void __iomem *mbase; u8 epnum; u16 csr; unsigned tmp; int status = -EINVAL; if (!ep || !desc) return -EINVAL; musb_ep = to_musb_ep(ep); hw_ep = musb_ep->hw_ep; regs = hw_ep->regs; musb = musb_ep->musb; mbase = musb->mregs; epnum = musb_ep->current_epnum; spin_lock_irqsave(&musb->lock, flags); if (musb_ep->desc) { status = -EBUSY; goto fail; } musb_ep->type = usb_endpoint_type(desc); /* check direction and (later) maxpacket size against endpoint */ if (usb_endpoint_num(desc) != epnum) goto fail; /* REVISIT this rules out high bandwidth periodic transfers */ tmp = usb_endpoint_maxp(desc); if (tmp & ~0x07ff) { int ok; if (usb_endpoint_dir_in(desc)) ok = musb->hb_iso_tx; else ok = musb->hb_iso_rx; if (!ok) { dev_dbg(musb->controller, "no support for high bandwidth ISO\n"); goto fail; } musb_ep->hb_mult = (tmp >> 11) & 3; } else { musb_ep->hb_mult = 0; } musb_ep->packet_sz = tmp & 0x7ff; tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1); /* enable the interrupts for the endpoint, set the endpoint * packet size (or fail), set the mode, clear the fifo */ musb_ep_select(mbase, epnum); if (usb_endpoint_dir_in(desc)) { u16 int_txe = musb_readw(mbase, MUSB_INTRTXE); if (hw_ep->is_shared_fifo) musb_ep->is_in = 1; if (!musb_ep->is_in) goto fail; if (tmp > hw_ep->max_packet_sz_tx) { dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); goto fail; } int_txe |= (1 << epnum); musb_writew(mbase, MUSB_INTRTXE, int_txe); /* REVISIT if can_bulk_split(), use by updating "tmp"; * likewise high bandwidth periodic tx */ /* Set TXMAXP with the FIFO size of the endpoint * to disable double buffering mode. */ if (musb->double_buffer_not_ok) { musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); } else { if (can_bulk_split(musb, musb_ep->type)) musb_ep->hb_mult = (hw_ep->max_packet_sz_tx / musb_ep->packet_sz) - 1; musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); } csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; if (musb_readw(regs, MUSB_TXCSR) & MUSB_TXCSR_FIFONOTEMPTY) csr |= MUSB_TXCSR_FLUSHFIFO; if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) csr |= MUSB_TXCSR_P_ISO; /* set twice in case of double buffering */ musb_writew(regs, MUSB_TXCSR, csr); /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ musb_writew(regs, MUSB_TXCSR, csr); } else { u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE); if (hw_ep->is_shared_fifo) musb_ep->is_in = 0; if (musb_ep->is_in) goto fail; if (tmp > hw_ep->max_packet_sz_rx) { dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); goto fail; } int_rxe |= (1 << epnum); musb_writew(mbase, MUSB_INTRRXE, int_rxe); /* REVISIT if can_bulk_combine() use by updating "tmp" * likewise high bandwidth periodic rx */ /* Set RXMAXP with the FIFO size of the endpoint * to disable double buffering mode. */ if (musb->double_buffer_not_ok) musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx); else musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); /* force shared fifo to OUT-only mode */ if (hw_ep->is_shared_fifo) { csr = musb_readw(regs, MUSB_TXCSR); csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); musb_writew(regs, MUSB_TXCSR, csr); } csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) csr |= MUSB_RXCSR_P_ISO; else if (musb_ep->type == USB_ENDPOINT_XFER_INT) csr |= MUSB_RXCSR_DISNYET; /* set twice in case of double buffering */ musb_writew(regs, MUSB_RXCSR, csr); musb_writew(regs, MUSB_RXCSR, csr); } /* NOTE: all the I/O code _should_ work fine without DMA, in case * for some reason you run out of channels here. */ if (is_dma_capable() && musb->dma_controller) { struct dma_controller *c = musb->dma_controller; musb_ep->dma = c->channel_alloc(c, hw_ep, (desc->bEndpointAddress & USB_DIR_IN)); } else musb_ep->dma = NULL; musb_ep->desc = desc; musb_ep->busy = 0; musb_ep->wedged = 0; status = 0; pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", musb_driver_name, musb_ep->end_point.name, ({ char *s; switch (musb_ep->type) { case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; case USB_ENDPOINT_XFER_INT: s = "int"; break; default: s = "iso"; break; }; s; }), musb_ep->is_in ? "IN" : "OUT", musb_ep->dma ? "dma, " : "", musb_ep->packet_sz); schedule_work(&musb->irq_work); fail: spin_unlock_irqrestore(&musb->lock, flags); return status; } /* * Disable an endpoint flushing all requests queued. */ static int musb_gadget_disable(struct usb_ep *ep) { unsigned long flags; struct musb *musb; u8 epnum; struct musb_ep *musb_ep; void __iomem *epio; int status = 0; musb_ep = to_musb_ep(ep); musb = musb_ep->musb; epnum = musb_ep->current_epnum; epio = musb->endpoints[epnum].regs; spin_lock_irqsave(&musb->lock, flags); musb_ep_select(musb->mregs, epnum); /* zero the endpoint sizes */ if (musb_ep->is_in) { u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE); int_txe &= ~(1 << epnum); musb_writew(musb->mregs, MUSB_INTRTXE, int_txe); musb_writew(epio, MUSB_TXMAXP, 0); } else { u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE); int_rxe &= ~(1 << epnum); musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe); musb_writew(epio, MUSB_RXMAXP, 0); } musb_ep->desc = NULL; musb_ep->end_point.desc = NULL; /* abort all pending DMA and requests */ nuke(musb_ep, -ESHUTDOWN); schedule_work(&musb->irq_work); spin_unlock_irqrestore(&(musb->lock), flags); dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name); return status; } /* * Allocate a request for an endpoint. * Reused by ep0 code. */ struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) { struct musb_ep *musb_ep = to_musb_ep(ep); struct musb *musb = musb_ep->musb; struct musb_request *request = NULL; request = kzalloc(sizeof *request, gfp_flags); if (!request) { dev_dbg(musb->controller, "not enough memory\n"); return NULL; } request->request.dma = DMA_ADDR_INVALID; request->epnum = musb_ep->current_epnum; request->ep = musb_ep; return &request->request; } /* * Free a request * Reused by ep0 code. */ void musb_free_request(struct usb_ep *ep, struct usb_request *req) { kfree(to_musb_request(req)); } static LIST_HEAD(buffers); struct free_record { struct list_head list; struct device *dev; unsigned bytes; dma_addr_t dma; }; /* * Context: controller locked, IRQs blocked. */ void musb_ep_restart(struct musb *musb, struct musb_request *req) { dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n", req->tx ? "TX/IN" : "RX/OUT", &req->request, req->request.length, req->epnum); musb_ep_select(musb->mregs, req->epnum); if (req->tx) txstate(musb, req); else rxstate(musb, req); } static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { struct musb_ep *musb_ep; struct musb_request *request; struct musb *musb; int status = 0; unsigned long lockflags; if (!ep || !req) return -EINVAL; if (!req->buf) return -ENODATA; musb_ep = to_musb_ep(ep); musb = musb_ep->musb; request = to_musb_request(req); request->musb = musb; if (request->ep != musb_ep) return -EINVAL; dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req); /* request is mine now... */ request->request.actual = 0; request->request.status = -EINPROGRESS; request->epnum = musb_ep->current_epnum; request->tx = musb_ep->is_in; map_dma_buffer(request, musb, musb_ep); spin_lock_irqsave(&musb->lock, lockflags); /* don't queue if the ep is down */ if (!musb_ep->desc) { dev_dbg(musb->controller, "req %p queued to %s while ep %s\n", req, ep->name, "disabled"); status = -ESHUTDOWN; goto cleanup; } /* add request to the list */ list_add_tail(&request->list, &musb_ep->req_list); /* it this is the head of the queue, start i/o ... */ if (!musb_ep->busy && &request->list == musb_ep->req_list.next) musb_ep_restart(musb, request); cleanup: spin_unlock_irqrestore(&musb->lock, lockflags); return status; } static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) { struct musb_ep *musb_ep = to_musb_ep(ep); struct musb_request *req = to_musb_request(request); struct musb_request *r; unsigned long flags; int status = 0; struct musb *musb = musb_ep->musb; if (!ep || !request || to_musb_request(request)->ep != musb_ep) return -EINVAL; spin_lock_irqsave(&musb->lock, flags); list_for_each_entry(r, &musb_ep->req_list, list) { if (r == req) break; } if (r != req) { dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name); status = -EINVAL; goto done; } /* if the hardware doesn't have the request, easy ... */ if (musb_ep->req_list.next != &req->list || musb_ep->busy) musb_g_giveback(musb_ep, request, -ECONNRESET); /* ... else abort the dma transfer ... */ else if (is_dma_capable() && musb_ep->dma) { struct dma_controller *c = musb->dma_controller; musb_ep_select(musb->mregs, musb_ep->current_epnum); if (c->channel_abort) status = c->channel_abort(musb_ep->dma); else status = -EBUSY; if (status == 0) musb_g_giveback(musb_ep, request, -ECONNRESET); } else { /* NOTE: by sticking to easily tested hardware/driver states, * we leave counting of in-flight packets imprecise. */ musb_g_giveback(musb_ep, request, -ECONNRESET); } done: spin_unlock_irqrestore(&musb->lock, flags); return status; } /* * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any * data but will queue requests. * * exported to ep0 code */ static int musb_gadget_set_halt(struct usb_ep *ep, int value) { struct musb_ep *musb_ep = to_musb_ep(ep); u8 epnum = musb_ep->current_epnum; struct musb *musb = musb_ep->musb; void __iomem *epio = musb->endpoints[epnum].regs; void __iomem *mbase; unsigned long flags; u16 csr; struct musb_request *request; int status = 0; if (!ep) return -EINVAL; mbase = musb->mregs; spin_lock_irqsave(&musb->lock, flags); if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { status = -EINVAL; goto done; } musb_ep_select(mbase, epnum); request = next_request(musb_ep); if (value) { if (request) { dev_dbg(musb->controller, "request in progress, cannot halt %s\n", ep->name); status = -EAGAIN; goto done; } /* Cannot portably stall with non-empty FIFO */ if (musb_ep->is_in) { csr = musb_readw(epio, MUSB_TXCSR); if (csr & MUSB_TXCSR_FIFONOTEMPTY) { dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name); status = -EAGAIN; goto done; } } } else musb_ep->wedged = 0; /* set/clear the stall and toggle bits */ dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear"); if (musb_ep->is_in) { csr = musb_readw(epio, MUSB_TXCSR); csr |= MUSB_TXCSR_P_WZC_BITS | MUSB_TXCSR_CLRDATATOG; if (value) csr |= MUSB_TXCSR_P_SENDSTALL; else csr &= ~(MUSB_TXCSR_P_SENDSTALL | MUSB_TXCSR_P_SENTSTALL); csr &= ~MUSB_TXCSR_TXPKTRDY; musb_writew(epio, MUSB_TXCSR, csr); } else { csr = musb_readw(epio, MUSB_RXCSR); csr |= MUSB_RXCSR_P_WZC_BITS | MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; if (value) csr |= MUSB_RXCSR_P_SENDSTALL; else csr &= ~(MUSB_RXCSR_P_SENDSTALL | MUSB_RXCSR_P_SENTSTALL); musb_writew(epio, MUSB_RXCSR, csr); } /* maybe start the first request in the queue */ if (!musb_ep->busy && !value && request) { dev_dbg(musb->controller, "restarting the request\n"); musb_ep_restart(musb, request); } done: spin_unlock_irqrestore(&musb->lock, flags); return status; } /* * Sets the halt feature with the clear requests ignored */ static int musb_gadget_set_wedge(struct usb_ep *ep) { struct musb_ep *musb_ep = to_musb_ep(ep); if (!ep) return -EINVAL; musb_ep->wedged = 1; return usb_ep_set_halt(ep); } static int musb_gadget_fifo_status(struct usb_ep *ep) { struct musb_ep *musb_ep = to_musb_ep(ep); void __iomem *epio = musb_ep->hw_ep->regs; int retval = -EINVAL; if (musb_ep->desc && !musb_ep->is_in) { struct musb *musb = musb_ep->musb; int epnum = musb_ep->current_epnum; void __iomem *mbase = musb->mregs; unsigned long flags; spin_lock_irqsave(&musb->lock, flags); musb_ep_select(mbase, epnum); /* FIXME return zero unless RXPKTRDY is set */ retval = musb_readw(epio, MUSB_RXCOUNT); spin_unlock_irqrestore(&musb->lock, flags); } return retval; } static void musb_gadget_fifo_flush(struct usb_ep *ep) { struct musb_ep *musb_ep = to_musb_ep(ep); struct musb *musb = musb_ep->musb; u8 epnum = musb_ep->current_epnum; void __iomem *epio = musb->endpoints[epnum].regs; void __iomem *mbase; unsigned long flags; u16 csr, int_txe; mbase = musb->mregs; spin_lock_irqsave(&musb->lock, flags); musb_ep_select(mbase, (u8) epnum); /* disable interrupts */ int_txe = musb_readw(mbase, MUSB_INTRTXE); musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); if (musb_ep->is_in) { csr = musb_readw(epio, MUSB_TXCSR); if (csr & MUSB_TXCSR_FIFONOTEMPTY) { csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; /* * Setting both TXPKTRDY and FLUSHFIFO makes controller * to interrupt current FIFO loading, but not flushing * the already loaded ones. */ csr &= ~MUSB_TXCSR_TXPKTRDY; musb_writew(epio, MUSB_TXCSR, csr); /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ musb_writew(epio, MUSB_TXCSR, csr); } } else { csr = musb_readw(epio, MUSB_RXCSR); csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; musb_writew(epio, MUSB_RXCSR, csr); musb_writew(epio, MUSB_RXCSR, csr); } /* re-enable interrupt */ musb_writew(mbase, MUSB_INTRTXE, int_txe); spin_unlock_irqrestore(&musb->lock, flags); } static const struct usb_ep_ops musb_ep_ops = { .enable = musb_gadget_enable, .disable = musb_gadget_disable, .alloc_request = musb_alloc_request, .free_request = musb_free_request, .queue = musb_gadget_queue, .dequeue = musb_gadget_dequeue, .set_halt = musb_gadget_set_halt, .set_wedge = musb_gadget_set_wedge, .fifo_status = musb_gadget_fifo_status, .fifo_flush = musb_gadget_fifo_flush }; /* ----------------------------------------------------------------------- */ static int musb_gadget_get_frame(struct usb_gadget *gadget) { struct musb *musb = gadget_to_musb(gadget); return (int)musb_readw(musb->mregs, MUSB_FRAME); } static int musb_gadget_wakeup(struct usb_gadget *gadget) { struct musb *musb = gadget_to_musb(gadget); void __iomem *mregs = musb->mregs; unsigned long flags; int status = -EINVAL; u8 power, devctl; int retries; spin_lock_irqsave(&musb->lock, flags); switch (musb->xceiv->state) { case OTG_STATE_B_PERIPHERAL: /* NOTE: OTG state machine doesn't include B_SUSPENDED; * that's part of the standard usb 1.1 state machine, and * doesn't affect OTG transitions. */ if (musb->may_wakeup && musb->is_suspended) break; goto done; case OTG_STATE_B_IDLE: /* Start SRP ... OTG not required. */ devctl = musb_readb(mregs, MUSB_DEVCTL); dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl); devctl |= MUSB_DEVCTL_SESSION; musb_writeb(mregs, MUSB_DEVCTL, devctl); devctl = musb_readb(mregs, MUSB_DEVCTL); retries = 100; while (!(devctl & MUSB_DEVCTL_SESSION)) { devctl = musb_readb(mregs, MUSB_DEVCTL); if (retries-- < 1) break; } retries = 10000; while (devctl & MUSB_DEVCTL_SESSION) { devctl = musb_readb(mregs, MUSB_DEVCTL); if (retries-- < 1) break; } spin_unlock_irqrestore(&musb->lock, flags); otg_start_srp(musb->xceiv->otg); spin_lock_irqsave(&musb->lock, flags); /* Block idling for at least 1s */ musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(1 * HZ)); status = 0; goto done; default: dev_dbg(musb->controller, "Unhandled wake: %s\n", otg_state_string(musb->xceiv->state)); goto done; } status = 0; power = musb_readb(mregs, MUSB_POWER); power |= MUSB_POWER_RESUME; musb_writeb(mregs, MUSB_POWER, power); dev_dbg(musb->controller, "issue wakeup\n"); /* FIXME do this next chunk in a timer callback, no udelay */ mdelay(2); power = musb_readb(mregs, MUSB_POWER); power &= ~MUSB_POWER_RESUME; musb_writeb(mregs, MUSB_POWER, power); done: spin_unlock_irqrestore(&musb->lock, flags); return status; } static int musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) { struct musb *musb = gadget_to_musb(gadget); musb->is_self_powered = !!is_selfpowered; return 0; } static void musb_pullup(struct musb *musb, int is_on) { u8 power; power = musb_readb(musb->mregs, MUSB_POWER); if (is_on) power |= MUSB_POWER_SOFTCONN; else power &= ~MUSB_POWER_SOFTCONN; /* FIXME if on, HdrcStart; if off, HdrcStop */ dev_dbg(musb->controller, "gadget D+ pullup %s\n", is_on ? "on" : "off"); musb_writeb(musb->mregs, MUSB_POWER, power); } #if 0 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) { dev_dbg(musb->controller, "<= %s =>\n", __func__); /* * FIXME iff driver's softconnect flag is set (as it is during probe, * though that can clear it), just musb_pullup(). */ return -EINVAL; } #endif static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) { struct musb *musb = gadget_to_musb(gadget); if (!musb->xceiv->set_power) return -EOPNOTSUPP; return usb_phy_set_power(musb->xceiv, mA); } static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) { struct musb *musb = gadget_to_musb(gadget); unsigned long flags; is_on = !!is_on; pm_runtime_get_sync(musb->controller); /* NOTE: this assumes we are sensing vbus; we'd rather * not pullup unless the B-session is active. */ spin_lock_irqsave(&musb->lock, flags); if (is_on != musb->softconnect) { musb->softconnect = is_on; musb_pullup(musb, is_on); } spin_unlock_irqrestore(&musb->lock, flags); pm_runtime_put(musb->controller); return 0; } static int musb_gadget_start(struct usb_gadget *g, struct usb_gadget_driver *driver); static int musb_gadget_stop(struct usb_gadget *g, struct usb_gadget_driver *driver); static const struct usb_gadget_ops musb_gadget_operations = { .get_frame = musb_gadget_get_frame, .wakeup = musb_gadget_wakeup, .set_selfpowered = musb_gadget_set_self_powered, /* .vbus_session = musb_gadget_vbus_session, */ .vbus_draw = musb_gadget_vbus_draw, .pullup = musb_gadget_pullup, .udc_start = musb_gadget_start, .udc_stop = musb_gadget_stop, }; /* ----------------------------------------------------------------------- */ /* Registration */ /* Only this registration code "knows" the rule (from USB standards) * about there being only one external upstream port. It assumes * all peripheral ports are external... */ static void musb_gadget_release(struct device *dev) { /* kref_put(WHAT) */ dev_dbg(dev, "%s\n", __func__); } static void __devinit init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) { struct musb_hw_ep *hw_ep = musb->endpoints + epnum; memset(ep, 0, sizeof *ep); ep->current_epnum = epnum; ep->musb = musb; ep->hw_ep = hw_ep; ep->is_in = is_in; INIT_LIST_HEAD(&ep->req_list); sprintf(ep->name, "ep%d%s", epnum, (!epnum || hw_ep->is_shared_fifo) ? "" : ( is_in ? "in" : "out")); ep->end_point.name = ep->name; INIT_LIST_HEAD(&ep->end_point.ep_list); if (!epnum) { ep->end_point.maxpacket = 64; ep->end_point.ops = &musb_g_ep0_ops; musb->g.ep0 = &ep->end_point; } else { if (is_in) ep->end_point.maxpacket = hw_ep->max_packet_sz_tx; else ep->end_point.maxpacket = hw_ep->max_packet_sz_rx; ep->end_point.ops = &musb_ep_ops; list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); } } /* * Initialize the endpoints exposed to peripheral drivers, with backlinks * to the rest of the driver state. */ static inline void __devinit musb_g_init_endpoints(struct musb *musb) { u8 epnum; struct musb_hw_ep *hw_ep; unsigned count = 0; /* initialize endpoint list just once */ INIT_LIST_HEAD(&(musb->g.ep_list)); for (epnum = 0, hw_ep = musb->endpoints; epnum < musb->nr_endpoints; epnum++, hw_ep++) { if (hw_ep->is_shared_fifo /* || !epnum */) { init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); count++; } else { if (hw_ep->max_packet_sz_tx) { init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 1); count++; } if (hw_ep->max_packet_sz_rx) { init_peripheral_ep(musb, &hw_ep->ep_out, epnum, 0); count++; } } } } /* called once during driver setup to initialize and link into * the driver model; memory is zeroed. */ int __devinit musb_gadget_setup(struct musb *musb) { int status; /* REVISIT minor race: if (erroneously) setting up two * musb peripherals at the same time, only the bus lock * is probably held. */ musb->g.ops = &musb_gadget_operations; musb->g.max_speed = USB_SPEED_HIGH; musb->g.speed = USB_SPEED_UNKNOWN; /* this "gadget" abstracts/virtualizes the controller */ dev_set_name(&musb->g.dev, "gadget"); musb->g.dev.parent = musb->controller; musb->g.dev.dma_mask = musb->controller->dma_mask; musb->g.dev.release = musb_gadget_release; musb->g.name = musb_driver_name; if (is_otg_enabled(musb)) musb->g.is_otg = 1; musb_g_init_endpoints(musb); musb->is_active = 0; musb_platform_try_idle(musb, 0); status = device_register(&musb->g.dev); if (status != 0) { put_device(&musb->g.dev); return status; } status = usb_add_gadget_udc(musb->controller, &musb->g); if (status) goto err; return 0; err: musb->g.dev.parent = NULL; device_unregister(&musb->g.dev); return status; } void musb_gadget_cleanup(struct musb *musb) { usb_del_gadget_udc(&musb->g); if (musb->g.dev.parent) device_unregister(&musb->g.dev); } /* * Register the gadget driver. Used by gadget drivers when * registering themselves with the controller. * * -EINVAL something went wrong (not driver) * -EBUSY another gadget is already using the controller * -ENOMEM no memory to perform the operation * * @param driver the gadget driver * @return <0 if error, 0 if everything is fine */ static int musb_gadget_start(struct usb_gadget *g, struct usb_gadget_driver *driver) { struct musb *musb = gadget_to_musb(g); struct usb_otg *otg = musb->xceiv->otg; unsigned long flags; int retval = -EINVAL; if (driver->max_speed < USB_SPEED_HIGH) goto err0; pm_runtime_get_sync(musb->controller); dev_dbg(musb->controller, "registering driver %s\n", driver->function); musb->softconnect = 0; musb->gadget_driver = driver; spin_lock_irqsave(&musb->lock, flags); musb->is_active = 1; otg_set_peripheral(otg, &musb->g); musb->xceiv->state = OTG_STATE_B_IDLE; /* * FIXME this ignores the softconnect flag. Drivers are * allowed hold the peripheral inactive until for example * userspace hooks up printer hardware or DSP codecs, so * hosts only see fully functional devices. */ if (!is_otg_enabled(musb)) musb_start(musb); spin_unlock_irqrestore(&musb->lock, flags); if (is_otg_enabled(musb)) { struct usb_hcd *hcd = musb_to_hcd(musb); dev_dbg(musb->controller, "OTG startup...\n"); /* REVISIT: funcall to other code, which also * handles power budgeting ... this way also * ensures HdrcStart is indirectly called. */ retval = usb_add_hcd(musb_to_hcd(musb), 0, 0); if (retval < 0) { dev_dbg(musb->controller, "add_hcd failed, %d\n", retval); goto err2; } if ((musb->xceiv->last_event == USB_EVENT_ID) && otg->set_vbus) otg_set_vbus(otg, 1); hcd->self.uses_pio_for_control = 1; } if (musb->xceiv->last_event == USB_EVENT_NONE) pm_runtime_put(musb->controller); return 0; err2: if (!is_otg_enabled(musb)) musb_stop(musb); err0: return retval; } static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) { int i; struct musb_hw_ep *hw_ep; /* don't disconnect if it's not connected */ if (musb->g.speed == USB_SPEED_UNKNOWN) driver = NULL; else musb->g.speed = USB_SPEED_UNKNOWN; /* deactivate the hardware */ if (musb->softconnect) { musb->softconnect = 0; musb_pullup(musb, 0); } musb_stop(musb); /* killing any outstanding requests will quiesce the driver; * then report disconnect */ if (driver) { for (i = 0, hw_ep = musb->endpoints; i < musb->nr_endpoints; i++, hw_ep++) { musb_ep_select(musb->mregs, i); if (hw_ep->is_shared_fifo /* || !epnum */) { nuke(&hw_ep->ep_in, -ESHUTDOWN); } else { if (hw_ep->max_packet_sz_tx) nuke(&hw_ep->ep_in, -ESHUTDOWN); if (hw_ep->max_packet_sz_rx) nuke(&hw_ep->ep_out, -ESHUTDOWN); } } } } /* * Unregister the gadget driver. Used by gadget drivers when * unregistering themselves from the controller. * * @param driver the gadget driver to unregister */ static int musb_gadget_stop(struct usb_gadget *g, struct usb_gadget_driver *driver) { struct musb *musb = gadget_to_musb(g); unsigned long flags; if (musb->xceiv->last_event == USB_EVENT_NONE) pm_runtime_get_sync(musb->controller); /* * REVISIT always use otg_set_peripheral() here too; * this needs to shut down the OTG engine. */ spin_lock_irqsave(&musb->lock, flags); musb_hnp_stop(musb); (void) musb_gadget_vbus_draw(&musb->g, 0); musb->xceiv->state = OTG_STATE_UNDEFINED; stop_activity(musb, driver); otg_set_peripheral(musb->xceiv->otg, NULL); dev_dbg(musb->controller, "unregistering driver %s\n", driver->function); musb->is_active = 0; musb_platform_try_idle(musb, 0); spin_unlock_irqrestore(&musb->lock, flags); if (is_otg_enabled(musb)) { usb_remove_hcd(musb_to_hcd(musb)); /* FIXME we need to be able to register another * gadget driver here and have everything work; * that currently misbehaves. */ } if (!is_otg_enabled(musb)) musb_stop(musb); pm_runtime_put(musb->controller); return 0; } /* ----------------------------------------------------------------------- */ /* lifecycle operations called through plat_uds.c */ void musb_g_resume(struct musb *musb) { musb->is_suspended = 0; switch (musb->xceiv->state) { case OTG_STATE_B_IDLE: break; case OTG_STATE_B_WAIT_ACON: case OTG_STATE_B_PERIPHERAL: musb->is_active = 1; if (musb->gadget_driver && musb->gadget_driver->resume) { spin_unlock(&musb->lock); musb->gadget_driver->resume(&musb->g); spin_lock(&musb->lock); } break; default: WARNING("unhandled RESUME transition (%s)\n", otg_state_string(musb->xceiv->state)); } } /* called when SOF packets stop for 3+ msec */ void musb_g_suspend(struct musb *musb) { u8 devctl; devctl = musb_readb(musb->mregs, MUSB_DEVCTL); dev_dbg(musb->controller, "devctl %02x\n", devctl); switch (musb->xceiv->state) { case OTG_STATE_B_IDLE: if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) musb->xceiv->state = OTG_STATE_B_PERIPHERAL; break; case OTG_STATE_B_PERIPHERAL: musb->is_suspended = 1; if (musb->gadget_driver && musb->gadget_driver->suspend) { spin_unlock(&musb->lock); musb->gadget_driver->suspend(&musb->g); spin_lock(&musb->lock); } break; default: /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; * A_PERIPHERAL may need care too */ WARNING("unhandled SUSPEND transition (%s)\n", otg_state_string(musb->xceiv->state)); } } /* Called during SRP */ void musb_g_wakeup(struct musb *musb) { musb_gadget_wakeup(&musb->g); } /* called when VBUS drops below session threshold, and in other cases */ void musb_g_disconnect(struct musb *musb) { void __iomem *mregs = musb->mregs; u8 devctl = musb_readb(mregs, MUSB_DEVCTL); dev_dbg(musb->controller, "devctl %02x\n", devctl); /* clear HR */ musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); /* don't draw vbus until new b-default session */ (void) musb_gadget_vbus_draw(&musb->g, 0); musb->g.speed = USB_SPEED_UNKNOWN; if (musb->gadget_driver && musb->gadget_driver->disconnect) { spin_unlock(&musb->lock); musb->gadget_driver->disconnect(&musb->g); spin_lock(&musb->lock); } switch (musb->xceiv->state) { default: dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n", otg_state_string(musb->xceiv->state)); musb->xceiv->state = OTG_STATE_A_IDLE; MUSB_HST_MODE(musb); break; case OTG_STATE_A_PERIPHERAL: musb->xceiv->state = OTG_STATE_A_WAIT_BCON; MUSB_HST_MODE(musb); break; case OTG_STATE_B_WAIT_ACON: case OTG_STATE_B_HOST: case OTG_STATE_B_PERIPHERAL: case OTG_STATE_B_IDLE: musb->xceiv->state = OTG_STATE_B_IDLE; break; case OTG_STATE_B_SRP_INIT: break; } musb->is_active = 0; } void musb_g_reset(struct musb *musb) __releases(musb->lock) __acquires(musb->lock) { void __iomem *mbase = musb->mregs; u8 devctl = musb_readb(mbase, MUSB_DEVCTL); u8 power; dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n", (devctl & MUSB_DEVCTL_BDEVICE) ? "B-Device" : "A-Device", musb_readb(mbase, MUSB_FADDR), musb->gadget_driver ? musb->gadget_driver->driver.name : NULL ); /* report disconnect, if we didn't already (flushing EP state) */ if (musb->g.speed != USB_SPEED_UNKNOWN) musb_g_disconnect(musb); /* clear HR */ else if (devctl & MUSB_DEVCTL_HR) musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); /* what speed did we negotiate? */ power = musb_readb(mbase, MUSB_POWER); musb->g.speed = (power & MUSB_POWER_HSMODE) ? USB_SPEED_HIGH : USB_SPEED_FULL; /* start in USB_STATE_DEFAULT */ musb->is_active = 1; musb->is_suspended = 0; MUSB_DEV_MODE(musb); musb->address = 0; musb->ep0_state = MUSB_EP0_STAGE_SETUP; musb->may_wakeup = 0; musb->g.b_hnp_enable = 0; musb->g.a_alt_hnp_support = 0; musb->g.a_hnp_support = 0; /* Normal reset, as B-Device; * or else after HNP, as A-Device */ if (devctl & MUSB_DEVCTL_BDEVICE) { musb->xceiv->state = OTG_STATE_B_PERIPHERAL; musb->g.is_a_peripheral = 0; } else if (is_otg_enabled(musb)) { musb->xceiv->state = OTG_STATE_A_PERIPHERAL; musb->g.is_a_peripheral = 1; } else WARN_ON(1); /* start with default limits on VBUS power draw */ (void) musb_gadget_vbus_draw(&musb->g, is_otg_enabled(musb) ? 8 : 100); }
gpl-2.0
Validus-Kernel/kernel_htc_flounder
arch/powerpc/kvm/book3s_pr_papr.c
2075
7680
/* * Copyright (C) 2011. Freescale Inc. All rights reserved. * * Authors: * Alexander Graf <agraf@suse.de> * Paul Mackerras <paulus@samba.org> * * Description: * * Hypercall handling for running PAPR guests in PR KVM on Book 3S * processors. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. */ #include <linux/anon_inodes.h> #include <asm/uaccess.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); unsigned long pteg_addr; pte_index <<= 4; pte_index &= ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1) << 7 | 0x70; pteg_addr = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL; pteg_addr |= pte_index; return pteg_addr; } static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) { long flags = kvmppc_get_gpr(vcpu, 4); long pte_index = kvmppc_get_gpr(vcpu, 5); unsigned long pteg[2 * 8]; unsigned long pteg_addr, i, *hpte; pte_index &= ~7UL; pteg_addr = get_pteg_addr(vcpu, pte_index); copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)); hpte = pteg; if (likely((flags & H_EXACT) == 0)) { pte_index &= ~7UL; for (i = 0; ; ++i) { if (i == 8) return H_PTEG_FULL; if ((*hpte & HPTE_V_VALID) == 0) break; hpte += 2; } } else { i = kvmppc_get_gpr(vcpu, 5) & 7UL; hpte += i * 2; } hpte[0] = kvmppc_get_gpr(vcpu, 6); hpte[1] = kvmppc_get_gpr(vcpu, 7); copy_to_user((void __user *)pteg_addr, pteg, sizeof(pteg)); kvmppc_set_gpr(vcpu, 3, H_SUCCESS); kvmppc_set_gpr(vcpu, 4, pte_index | i); return EMULATE_DONE; } static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) { unsigned long flags= kvmppc_get_gpr(vcpu, 4); unsigned long pte_index = kvmppc_get_gpr(vcpu, 5); unsigned long avpn = kvmppc_get_gpr(vcpu, 6); unsigned long v = 0, pteg, rb; unsigned long pte[2]; pteg = get_pteg_addr(vcpu, pte_index); copy_from_user(pte, (void __user *)pteg, sizeof(pte)); if ((pte[0] & HPTE_V_VALID) == 0 || ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) || ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) { kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND); return EMULATE_DONE; } copy_to_user((void __user *)pteg, &v, sizeof(v)); rb = compute_tlbie_rb(pte[0], pte[1], pte_index); vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); kvmppc_set_gpr(vcpu, 3, H_SUCCESS); kvmppc_set_gpr(vcpu, 4, pte[0]); kvmppc_set_gpr(vcpu, 5, pte[1]); return EMULATE_DONE; } /* Request defs for kvmppc_h_pr_bulk_remove() */ #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL #define H_BULK_REMOVE_END 0xc000000000000000ULL #define H_BULK_REMOVE_CODE 0x3000000000000000ULL #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL #define H_BULK_REMOVE_PARM 0x2000000000000000ULL #define H_BULK_REMOVE_HW 0x3000000000000000ULL #define H_BULK_REMOVE_RC 0x0c00000000000000ULL #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL #define H_BULK_REMOVE_MAX_BATCH 4 static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) { int i; int paramnr = 4; int ret = H_SUCCESS; for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i)); unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1); unsigned long pteg, rb, flags; unsigned long pte[2]; unsigned long v = 0; if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) { break; /* Exit success */ } else if ((tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) { ret = H_PARAMETER; break; /* Exit fail */ } tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS; tsh |= H_BULK_REMOVE_RESPONSE; if ((tsh & H_BULK_REMOVE_ANDCOND) && (tsh & H_BULK_REMOVE_AVPN)) { tsh |= H_BULK_REMOVE_PARM; kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); ret = H_PARAMETER; break; /* Exit fail */ } pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); copy_from_user(pte, (void __user *)pteg, sizeof(pte)); /* tsl = AVPN */ flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26; if ((pte[0] & HPTE_V_VALID) == 0 || ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) || ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) { tsh |= H_BULK_REMOVE_NOT_FOUND; } else { /* Splat the pteg in (userland) hpt */ copy_to_user((void __user *)pteg, &v, sizeof(v)); rb = compute_tlbie_rb(pte[0], pte[1], tsh & H_BULK_REMOVE_PTEX); vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); tsh |= H_BULK_REMOVE_SUCCESS; tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43; } kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); } kvmppc_set_gpr(vcpu, 3, ret); return EMULATE_DONE; } static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) { unsigned long flags = kvmppc_get_gpr(vcpu, 4); unsigned long pte_index = kvmppc_get_gpr(vcpu, 5); unsigned long avpn = kvmppc_get_gpr(vcpu, 6); unsigned long rb, pteg, r, v; unsigned long pte[2]; pteg = get_pteg_addr(vcpu, pte_index); copy_from_user(pte, (void __user *)pteg, sizeof(pte)); if ((pte[0] & HPTE_V_VALID) == 0 || ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) { kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND); return EMULATE_DONE; } v = pte[0]; r = pte[1]; r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI | HPTE_R_KEY_LO); r |= (flags << 55) & HPTE_R_PP0; r |= (flags << 48) & HPTE_R_KEY_HI; r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); pte[1] = r; rb = compute_tlbie_rb(v, r, pte_index); vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); copy_to_user((void __user *)pteg, pte, sizeof(pte)); kvmppc_set_gpr(vcpu, 3, H_SUCCESS); return EMULATE_DONE; } static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) { unsigned long liobn = kvmppc_get_gpr(vcpu, 4); unsigned long ioba = kvmppc_get_gpr(vcpu, 5); unsigned long tce = kvmppc_get_gpr(vcpu, 6); long rc; rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce); if (rc == H_TOO_HARD) return EMULATE_FAIL; kvmppc_set_gpr(vcpu, 3, rc); return EMULATE_DONE; } static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) { long rc = kvmppc_xics_hcall(vcpu, cmd); kvmppc_set_gpr(vcpu, 3, rc); return EMULATE_DONE; } int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) { switch (cmd) { case H_ENTER: return kvmppc_h_pr_enter(vcpu); case H_REMOVE: return kvmppc_h_pr_remove(vcpu); case H_PROTECT: return kvmppc_h_pr_protect(vcpu); case H_BULK_REMOVE: return kvmppc_h_pr_bulk_remove(vcpu); case H_PUT_TCE: return kvmppc_h_pr_put_tce(vcpu); case H_CEDE: vcpu->arch.shared->msr |= MSR_EE; kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); vcpu->stat.halt_wakeup++; return EMULATE_DONE; case H_XIRR: case H_CPPR: case H_EOI: case H_IPI: case H_IPOLL: case H_XIRR_X: if (kvmppc_xics_enabled(vcpu)) return kvmppc_h_pr_xics_hcall(vcpu, cmd); break; case H_RTAS: if (list_empty(&vcpu->kvm->arch.rtas_tokens)) return RESUME_HOST; if (kvmppc_rtas_hcall(vcpu)) break; kvmppc_set_gpr(vcpu, 3, 0); return EMULATE_DONE; } return EMULATE_FAIL; }
gpl-2.0
cellphone/lge_p880_kernel_3.0
arch/powerpc/kernel/pci_32.c
2331
11668
/* * Common pmac/prep/chrp pci routines. -- Cort */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/capability.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/bootmem.h> #include <linux/irq.h> #include <linux/list.h> #include <linux/of.h> #include <linux/slab.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/sections.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <asm/byteorder.h> #include <asm/uaccess.h> #include <asm/machdep.h> #undef DEBUG unsigned long isa_io_base = 0; unsigned long pci_dram_offset = 0; int pcibios_assign_bus_offset = 1; void pcibios_make_OF_bus_map(void); static void fixup_cpc710_pci64(struct pci_dev* dev); static u8* pci_to_OF_bus_map; /* By default, we don't re-assign bus numbers. We do this only on * some pmacs */ static int pci_assign_all_buses; static int pci_bus_count; /* This will remain NULL for now, until isa-bridge.c is made common * to both 32-bit and 64-bit. */ struct pci_dev *isa_bridge_pcidev; EXPORT_SYMBOL_GPL(isa_bridge_pcidev); static void fixup_hide_host_resource_fsl(struct pci_dev *dev) { int i, class = dev->class >> 8; if ((class == PCI_CLASS_PROCESSOR_POWERPC || class == PCI_CLASS_BRIDGE_OTHER) && (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) && (dev->bus->parent == NULL)) { for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { dev->resource[i].start = 0; dev->resource[i].end = 0; dev->resource[i].flags = 0; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl); static void fixup_cpc710_pci64(struct pci_dev* dev) { /* Hide the PCI64 BARs from the kernel as their content doesn't * fit well in the resource management */ dev->resource[0].start = dev->resource[0].end = 0; dev->resource[0].flags = 0; dev->resource[1].start = dev->resource[1].end = 0; dev->resource[1].flags = 0; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64); /* * Functions below are used on OpenFirmware machines. */ static void make_one_node_map(struct device_node* node, u8 pci_bus) { const int *bus_range; int len; if (pci_bus >= pci_bus_count) return; bus_range = of_get_property(node, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s, " "assuming it starts at 0\n", node->full_name); pci_to_OF_bus_map[pci_bus] = 0; } else pci_to_OF_bus_map[pci_bus] = bus_range[0]; for_each_child_of_node(node, node) { struct pci_dev* dev; const unsigned int *class_code, *reg; class_code = of_get_property(node, "class-code", NULL); if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) continue; reg = of_get_property(node, "reg", NULL); if (!reg) continue; dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff)); if (!dev || !dev->subordinate) { pci_dev_put(dev); continue; } make_one_node_map(node, dev->subordinate->number); pci_dev_put(dev); } } void pcibios_make_OF_bus_map(void) { int i; struct pci_controller *hose, *tmp; struct property *map_prop; struct device_node *dn; pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL); if (!pci_to_OF_bus_map) { printk(KERN_ERR "Can't allocate OF bus map !\n"); return; } /* We fill the bus map with invalid values, that helps * debugging. */ for (i=0; i<pci_bus_count; i++) pci_to_OF_bus_map[i] = 0xff; /* For each hose, we begin searching bridges */ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { struct device_node* node = hose->dn; if (!node) continue; make_one_node_map(node, hose->first_busno); } dn = of_find_node_by_path("/"); map_prop = of_find_property(dn, "pci-OF-bus-map", NULL); if (map_prop) { BUG_ON(pci_bus_count > map_prop->length); memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count); } of_node_put(dn); #ifdef DEBUG printk("PCI->OF bus map:\n"); for (i=0; i<pci_bus_count; i++) { if (pci_to_OF_bus_map[i] == 0xff) continue; printk("%d -> %d\n", i, pci_to_OF_bus_map[i]); } #endif } typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data); static struct device_node* scan_OF_pci_childs(struct device_node *parent, pci_OF_scan_iterator filter, void* data) { struct device_node *node; struct device_node* sub_node; for_each_child_of_node(parent, node) { const unsigned int *class_code; if (filter(node, data)) { of_node_put(node); return node; } /* For PCI<->PCI bridges or CardBus bridges, we go down * Note: some OFs create a parent node "multifunc-device" as * a fake root for all functions of a multi-function device, * we go down them as well. */ class_code = of_get_property(node, "class-code", NULL); if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) && strcmp(node->name, "multifunc-device")) continue; sub_node = scan_OF_pci_childs(node, filter, data); if (sub_node) { of_node_put(node); return sub_node; } } return NULL; } static struct device_node *scan_OF_for_pci_dev(struct device_node *parent, unsigned int devfn) { struct device_node *np, *cnp; const u32 *reg; unsigned int psize; for_each_child_of_node(parent, np) { reg = of_get_property(np, "reg", &psize); if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn) return np; /* Note: some OFs create a parent node "multifunc-device" as * a fake root for all functions of a multi-function device, * we go down them as well. */ if (!strcmp(np->name, "multifunc-device")) { cnp = scan_OF_for_pci_dev(np, devfn); if (cnp) return cnp; } } return NULL; } static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus) { struct device_node *parent, *np; /* Are we a root bus ? */ if (bus->self == NULL || bus->parent == NULL) { struct pci_controller *hose = pci_bus_to_host(bus); if (hose == NULL) return NULL; return of_node_get(hose->dn); } /* not a root bus, we need to get our parent */ parent = scan_OF_for_pci_bus(bus->parent); if (parent == NULL) return NULL; /* now iterate for children for a match */ np = scan_OF_for_pci_dev(parent, bus->self->devfn); of_node_put(parent); return np; } /* * Scans the OF tree for a device node matching a PCI device */ struct device_node * pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) { struct device_node *parent, *np; pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn); parent = scan_OF_for_pci_bus(bus); if (parent == NULL) return NULL; pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>"); np = scan_OF_for_pci_dev(parent, devfn); of_node_put(parent); pr_debug(" result is %s\n", np ? np->full_name : "<NULL>"); /* XXX most callers don't release the returned node * mostly because ppc64 doesn't increase the refcount, * we need to fix that. */ return np; } EXPORT_SYMBOL(pci_busdev_to_OF_node); struct device_node* pci_device_to_OF_node(struct pci_dev *dev) { return pci_busdev_to_OF_node(dev->bus, dev->devfn); } EXPORT_SYMBOL(pci_device_to_OF_node); static int find_OF_pci_device_filter(struct device_node* node, void* data) { return ((void *)node == data); } /* * Returns the PCI device matching a given OF node */ int pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn) { const unsigned int *reg; struct pci_controller* hose; struct pci_dev* dev = NULL; /* Make sure it's really a PCI device */ hose = pci_find_hose_for_OF_device(node); if (!hose || !hose->dn) return -ENODEV; if (!scan_OF_pci_childs(hose->dn, find_OF_pci_device_filter, (void *)node)) return -ENODEV; reg = of_get_property(node, "reg", NULL); if (!reg) return -ENODEV; *bus = (reg[0] >> 16) & 0xff; *devfn = ((reg[0] >> 8) & 0xff); /* Ok, here we need some tweak. If we have already renumbered * all busses, we can't rely on the OF bus number any more. * the pci_to_OF_bus_map is not enough as several PCI busses * may match the same OF bus number. */ if (!pci_to_OF_bus_map) return 0; for_each_pci_dev(dev) if (pci_to_OF_bus_map[dev->bus->number] == *bus && dev->devfn == *devfn) { *bus = dev->bus->number; pci_dev_put(dev); return 0; } return -ENODEV; } EXPORT_SYMBOL(pci_device_from_OF_node); /* We create the "pci-OF-bus-map" property now so it appears in the * /proc device tree */ void __init pci_create_OF_bus_map(void) { struct property* of_prop; struct device_node *dn; of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256); if (!of_prop) return; dn = of_find_node_by_path("/"); if (dn) { memset(of_prop, -1, sizeof(struct property) + 256); of_prop->name = "pci-OF-bus-map"; of_prop->length = 256; of_prop->value = &of_prop[1]; prom_add_property(dn, of_prop); of_node_put(dn); } } void __devinit pcibios_setup_phb_io_space(struct pci_controller *hose) { unsigned long io_offset; struct resource *res = &hose->io_resource; /* Fixup IO space offset */ io_offset = (unsigned long)hose->io_base_virt - isa_io_base; res->start = (res->start + io_offset) & 0xffffffffu; res->end = (res->end + io_offset) & 0xffffffffu; } static int __init pcibios_init(void) { struct pci_controller *hose, *tmp; int next_busno = 0; printk(KERN_INFO "PCI: Probing PCI hardware\n"); if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_BUS) pci_assign_all_buses = 1; /* Scan all of the recorded PCI controllers. */ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { if (pci_assign_all_buses) hose->first_busno = next_busno; hose->last_busno = 0xff; pcibios_scan_phb(hose); pci_bus_add_devices(hose->bus); if (pci_assign_all_buses || next_busno <= hose->last_busno) next_busno = hose->last_busno + pcibios_assign_bus_offset; } pci_bus_count = next_busno; /* OpenFirmware based machines need a map of OF bus * numbers vs. kernel bus numbers since we may have to * remap them. */ if (pci_assign_all_buses) pcibios_make_OF_bus_map(); /* Call common code to handle resource allocation */ pcibios_resource_survey(); /* Call machine dependent post-init code */ if (ppc_md.pcibios_after_init) ppc_md.pcibios_after_init(); return 0; } subsys_initcall(pcibios_init); static struct pci_controller* pci_bus_to_hose(int bus) { struct pci_controller *hose, *tmp; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) if (bus >= hose->first_busno && bus <= hose->last_busno) return hose; return NULL; } /* Provide information on locations of various I/O regions in physical * memory. Do this on a per-card basis so that we choose the right * root bridge. * Note that the returned IO or memory base is a physical address */ long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) { struct pci_controller* hose; long result = -EOPNOTSUPP; hose = pci_bus_to_hose(bus); if (!hose) return -ENODEV; switch (which) { case IOBASE_BRIDGE_NUMBER: return (long)hose->first_busno; case IOBASE_MEMORY: return (long)hose->pci_mem_offset; case IOBASE_IO: return (long)hose->io_base_phys; case IOBASE_ISA_IO: return (long)isa_io_base; case IOBASE_ISA_MEM: return (long)isa_mem_base; } return result; }
gpl-2.0
htc-mirror/jewel-ics-crc-3.0.8-3fd0422
drivers/infiniband/core/verbs.c
3099
21627
/* * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/errno.h> #include <linux/err.h> #include <linux/string.h> #include <rdma/ib_verbs.h> #include <rdma/ib_cache.h> int ib_rate_to_mult(enum ib_rate rate) { switch (rate) { case IB_RATE_2_5_GBPS: return 1; case IB_RATE_5_GBPS: return 2; case IB_RATE_10_GBPS: return 4; case IB_RATE_20_GBPS: return 8; case IB_RATE_30_GBPS: return 12; case IB_RATE_40_GBPS: return 16; case IB_RATE_60_GBPS: return 24; case IB_RATE_80_GBPS: return 32; case IB_RATE_120_GBPS: return 48; default: return -1; } } EXPORT_SYMBOL(ib_rate_to_mult); enum ib_rate mult_to_ib_rate(int mult) { switch (mult) { case 1: return IB_RATE_2_5_GBPS; case 2: return IB_RATE_5_GBPS; case 4: return IB_RATE_10_GBPS; case 8: return IB_RATE_20_GBPS; case 12: return IB_RATE_30_GBPS; case 16: return IB_RATE_40_GBPS; case 24: return IB_RATE_60_GBPS; case 32: return IB_RATE_80_GBPS; case 48: return IB_RATE_120_GBPS; default: return IB_RATE_PORT_CURRENT; } } EXPORT_SYMBOL(mult_to_ib_rate); enum rdma_transport_type rdma_node_get_transport(enum rdma_node_type node_type) { switch (node_type) { case RDMA_NODE_IB_CA: case RDMA_NODE_IB_SWITCH: case RDMA_NODE_IB_ROUTER: return RDMA_TRANSPORT_IB; case RDMA_NODE_RNIC: return RDMA_TRANSPORT_IWARP; default: BUG(); return 0; } } EXPORT_SYMBOL(rdma_node_get_transport); enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) { if (device->get_link_layer) return device->get_link_layer(device, port_num); switch (rdma_node_get_transport(device->node_type)) { case RDMA_TRANSPORT_IB: return IB_LINK_LAYER_INFINIBAND; case RDMA_TRANSPORT_IWARP: return IB_LINK_LAYER_ETHERNET; default: return IB_LINK_LAYER_UNSPECIFIED; } } EXPORT_SYMBOL(rdma_port_get_link_layer); /* Protection domains */ struct ib_pd *ib_alloc_pd(struct ib_device *device) { struct ib_pd *pd; pd = device->alloc_pd(device, NULL, NULL); if (!IS_ERR(pd)) { pd->device = device; pd->uobject = NULL; atomic_set(&pd->usecnt, 0); } return pd; } EXPORT_SYMBOL(ib_alloc_pd); int ib_dealloc_pd(struct ib_pd *pd) { if (atomic_read(&pd->usecnt)) return -EBUSY; return pd->device->dealloc_pd(pd); } EXPORT_SYMBOL(ib_dealloc_pd); /* Address handles */ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) { struct ib_ah *ah; ah = pd->device->create_ah(pd, ah_attr); if (!IS_ERR(ah)) { ah->device = pd->device; ah->pd = pd; ah->uobject = NULL; atomic_inc(&pd->usecnt); } return ah; } EXPORT_SYMBOL(ib_create_ah); int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, struct ib_grh *grh, struct ib_ah_attr *ah_attr) { u32 flow_class; u16 gid_index; int ret; memset(ah_attr, 0, sizeof *ah_attr); ah_attr->dlid = wc->slid; ah_attr->sl = wc->sl; ah_attr->src_path_bits = wc->dlid_path_bits; ah_attr->port_num = port_num; if (wc->wc_flags & IB_WC_GRH) { ah_attr->ah_flags = IB_AH_GRH; ah_attr->grh.dgid = grh->sgid; ret = ib_find_cached_gid(device, &grh->dgid, &port_num, &gid_index); if (ret) return ret; ah_attr->grh.sgid_index = (u8) gid_index; flow_class = be32_to_cpu(grh->version_tclass_flow); ah_attr->grh.flow_label = flow_class & 0xFFFFF; ah_attr->grh.hop_limit = 0xFF; ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; } return 0; } EXPORT_SYMBOL(ib_init_ah_from_wc); struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc, struct ib_grh *grh, u8 port_num) { struct ib_ah_attr ah_attr; int ret; ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr); if (ret) return ERR_PTR(ret); return ib_create_ah(pd, &ah_attr); } EXPORT_SYMBOL(ib_create_ah_from_wc); int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) { return ah->device->modify_ah ? ah->device->modify_ah(ah, ah_attr) : -ENOSYS; } EXPORT_SYMBOL(ib_modify_ah); int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) { return ah->device->query_ah ? ah->device->query_ah(ah, ah_attr) : -ENOSYS; } EXPORT_SYMBOL(ib_query_ah); int ib_destroy_ah(struct ib_ah *ah) { struct ib_pd *pd; int ret; pd = ah->pd; ret = ah->device->destroy_ah(ah); if (!ret) atomic_dec(&pd->usecnt); return ret; } EXPORT_SYMBOL(ib_destroy_ah); /* Shared receive queues */ struct ib_srq *ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr) { struct ib_srq *srq; if (!pd->device->create_srq) return ERR_PTR(-ENOSYS); srq = pd->device->create_srq(pd, srq_init_attr, NULL); if (!IS_ERR(srq)) { srq->device = pd->device; srq->pd = pd; srq->uobject = NULL; srq->event_handler = srq_init_attr->event_handler; srq->srq_context = srq_init_attr->srq_context; atomic_inc(&pd->usecnt); atomic_set(&srq->usecnt, 0); } return srq; } EXPORT_SYMBOL(ib_create_srq); int ib_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr, enum ib_srq_attr_mask srq_attr_mask) { return srq->device->modify_srq ? srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : -ENOSYS; } EXPORT_SYMBOL(ib_modify_srq); int ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) { return srq->device->query_srq ? srq->device->query_srq(srq, srq_attr) : -ENOSYS; } EXPORT_SYMBOL(ib_query_srq); int ib_destroy_srq(struct ib_srq *srq) { struct ib_pd *pd; int ret; if (atomic_read(&srq->usecnt)) return -EBUSY; pd = srq->pd; ret = srq->device->destroy_srq(srq); if (!ret) atomic_dec(&pd->usecnt); return ret; } EXPORT_SYMBOL(ib_destroy_srq); /* Queue pairs */ struct ib_qp *ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *qp_init_attr) { struct ib_qp *qp; qp = pd->device->create_qp(pd, qp_init_attr, NULL); if (!IS_ERR(qp)) { qp->device = pd->device; qp->pd = pd; qp->send_cq = qp_init_attr->send_cq; qp->recv_cq = qp_init_attr->recv_cq; qp->srq = qp_init_attr->srq; qp->uobject = NULL; qp->event_handler = qp_init_attr->event_handler; qp->qp_context = qp_init_attr->qp_context; qp->qp_type = qp_init_attr->qp_type; atomic_inc(&pd->usecnt); atomic_inc(&qp_init_attr->send_cq->usecnt); atomic_inc(&qp_init_attr->recv_cq->usecnt); if (qp_init_attr->srq) atomic_inc(&qp_init_attr->srq->usecnt); } return qp; } EXPORT_SYMBOL(ib_create_qp); static const struct { int valid; enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETHERTYPE + 1]; enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETHERTYPE + 1]; } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { [IB_QPS_RESET] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_INIT] = { .valid = 1, .req_param = { [IB_QPT_UD] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY), [IB_QPT_UC] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS), [IB_QPT_RC] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS), [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), } }, }, [IB_QPS_INIT] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_INIT] = { .valid = 1, .opt_param = { [IB_QPT_UD] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY), [IB_QPT_UC] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS), [IB_QPT_RC] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS), [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), } }, [IB_QPS_RTR] = { .valid = 1, .req_param = { [IB_QPT_UC] = (IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN), [IB_QPT_RC] = (IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER), }, .opt_param = { [IB_QPT_UD] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), [IB_QPT_UC] = (IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX), [IB_QPT_RC] = (IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX), [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), } } }, [IB_QPS_RTR] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_RTS] = { .valid = 1, .req_param = { [IB_QPT_UD] = IB_QP_SQ_PSN, [IB_QPT_UC] = IB_QP_SQ_PSN, [IB_QPT_RC] = (IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | IB_QP_SQ_PSN | IB_QP_MAX_QP_RD_ATOMIC), [IB_QPT_SMI] = IB_QP_SQ_PSN, [IB_QPT_GSI] = IB_QP_SQ_PSN, }, .opt_param = { [IB_QPT_UD] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_UC] = (IB_QP_CUR_STATE | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PATH_MIG_STATE), [IB_QPT_RC] = (IB_QP_CUR_STATE | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_MIN_RNR_TIMER | IB_QP_PATH_MIG_STATE), [IB_QPT_SMI] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_CUR_STATE | IB_QP_QKEY), } } }, [IB_QPS_RTS] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_RTS] = { .valid = 1, .opt_param = { [IB_QPT_UD] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_UC] = (IB_QP_CUR_STATE | IB_QP_ACCESS_FLAGS | IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE), [IB_QPT_RC] = (IB_QP_CUR_STATE | IB_QP_ACCESS_FLAGS | IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE | IB_QP_MIN_RNR_TIMER), [IB_QPT_SMI] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_CUR_STATE | IB_QP_QKEY), } }, [IB_QPS_SQD] = { .valid = 1, .opt_param = { [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY } }, }, [IB_QPS_SQD] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_RTS] = { .valid = 1, .opt_param = { [IB_QPT_UD] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_UC] = (IB_QP_CUR_STATE | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PATH_MIG_STATE), [IB_QPT_RC] = (IB_QP_CUR_STATE | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_MIN_RNR_TIMER | IB_QP_PATH_MIG_STATE), [IB_QPT_SMI] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_CUR_STATE | IB_QP_QKEY), } }, [IB_QPS_SQD] = { .valid = 1, .opt_param = { [IB_QPT_UD] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), [IB_QPT_UC] = (IB_QP_AV | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PATH_MIG_STATE), [IB_QPT_RC] = (IB_QP_PORT | IB_QP_AV | IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_MIN_RNR_TIMER | IB_QP_PATH_MIG_STATE), [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), } } }, [IB_QPS_SQE] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_RTS] = { .valid = 1, .opt_param = { [IB_QPT_UD] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_UC] = (IB_QP_CUR_STATE | IB_QP_ACCESS_FLAGS), [IB_QPT_SMI] = (IB_QP_CUR_STATE | IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_CUR_STATE | IB_QP_QKEY), } } }, [IB_QPS_ERR] = { [IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_ERR] = { .valid = 1 } } }; int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, enum ib_qp_type type, enum ib_qp_attr_mask mask) { enum ib_qp_attr_mask req_param, opt_param; if (cur_state < 0 || cur_state > IB_QPS_ERR || next_state < 0 || next_state > IB_QPS_ERR) return 0; if (mask & IB_QP_CUR_STATE && cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) return 0; if (!qp_state_table[cur_state][next_state].valid) return 0; req_param = qp_state_table[cur_state][next_state].req_param[type]; opt_param = qp_state_table[cur_state][next_state].opt_param[type]; if ((mask & req_param) != req_param) return 0; if (mask & ~(req_param | opt_param | IB_QP_STATE)) return 0; return 1; } EXPORT_SYMBOL(ib_modify_qp_is_ok); int ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask) { return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL); } EXPORT_SYMBOL(ib_modify_qp); int ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { return qp->device->query_qp ? qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) : -ENOSYS; } EXPORT_SYMBOL(ib_query_qp); int ib_destroy_qp(struct ib_qp *qp) { struct ib_pd *pd; struct ib_cq *scq, *rcq; struct ib_srq *srq; int ret; pd = qp->pd; scq = qp->send_cq; rcq = qp->recv_cq; srq = qp->srq; ret = qp->device->destroy_qp(qp); if (!ret) { atomic_dec(&pd->usecnt); atomic_dec(&scq->usecnt); atomic_dec(&rcq->usecnt); if (srq) atomic_dec(&srq->usecnt); } return ret; } EXPORT_SYMBOL(ib_destroy_qp); /* Completion queues */ struct ib_cq *ib_create_cq(struct ib_device *device, ib_comp_handler comp_handler, void (*event_handler)(struct ib_event *, void *), void *cq_context, int cqe, int comp_vector) { struct ib_cq *cq; cq = device->create_cq(device, cqe, comp_vector, NULL, NULL); if (!IS_ERR(cq)) { cq->device = device; cq->uobject = NULL; cq->comp_handler = comp_handler; cq->event_handler = event_handler; cq->cq_context = cq_context; atomic_set(&cq->usecnt, 0); } return cq; } EXPORT_SYMBOL(ib_create_cq); int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) { return cq->device->modify_cq ? cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS; } EXPORT_SYMBOL(ib_modify_cq); int ib_destroy_cq(struct ib_cq *cq) { if (atomic_read(&cq->usecnt)) return -EBUSY; return cq->device->destroy_cq(cq); } EXPORT_SYMBOL(ib_destroy_cq); int ib_resize_cq(struct ib_cq *cq, int cqe) { return cq->device->resize_cq ? cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS; } EXPORT_SYMBOL(ib_resize_cq); /* Memory regions */ struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags) { struct ib_mr *mr; mr = pd->device->get_dma_mr(pd, mr_access_flags); if (!IS_ERR(mr)) { mr->device = pd->device; mr->pd = pd; mr->uobject = NULL; atomic_inc(&pd->usecnt); atomic_set(&mr->usecnt, 0); } return mr; } EXPORT_SYMBOL(ib_get_dma_mr); struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, struct ib_phys_buf *phys_buf_array, int num_phys_buf, int mr_access_flags, u64 *iova_start) { struct ib_mr *mr; if (!pd->device->reg_phys_mr) return ERR_PTR(-ENOSYS); mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf, mr_access_flags, iova_start); if (!IS_ERR(mr)) { mr->device = pd->device; mr->pd = pd; mr->uobject = NULL; atomic_inc(&pd->usecnt); atomic_set(&mr->usecnt, 0); } return mr; } EXPORT_SYMBOL(ib_reg_phys_mr); int ib_rereg_phys_mr(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd, struct ib_phys_buf *phys_buf_array, int num_phys_buf, int mr_access_flags, u64 *iova_start) { struct ib_pd *old_pd; int ret; if (!mr->device->rereg_phys_mr) return -ENOSYS; if (atomic_read(&mr->usecnt)) return -EBUSY; old_pd = mr->pd; ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd, phys_buf_array, num_phys_buf, mr_access_flags, iova_start); if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) { atomic_dec(&old_pd->usecnt); atomic_inc(&pd->usecnt); } return ret; } EXPORT_SYMBOL(ib_rereg_phys_mr); int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) { return mr->device->query_mr ? mr->device->query_mr(mr, mr_attr) : -ENOSYS; } EXPORT_SYMBOL(ib_query_mr); int ib_dereg_mr(struct ib_mr *mr) { struct ib_pd *pd; int ret; if (atomic_read(&mr->usecnt)) return -EBUSY; pd = mr->pd; ret = mr->device->dereg_mr(mr); if (!ret) atomic_dec(&pd->usecnt); return ret; } EXPORT_SYMBOL(ib_dereg_mr); struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len) { struct ib_mr *mr; if (!pd->device->alloc_fast_reg_mr) return ERR_PTR(-ENOSYS); mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len); if (!IS_ERR(mr)) { mr->device = pd->device; mr->pd = pd; mr->uobject = NULL; atomic_inc(&pd->usecnt); atomic_set(&mr->usecnt, 0); } return mr; } EXPORT_SYMBOL(ib_alloc_fast_reg_mr); struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device, int max_page_list_len) { struct ib_fast_reg_page_list *page_list; if (!device->alloc_fast_reg_page_list) return ERR_PTR(-ENOSYS); page_list = device->alloc_fast_reg_page_list(device, max_page_list_len); if (!IS_ERR(page_list)) { page_list->device = device; page_list->max_page_list_len = max_page_list_len; } return page_list; } EXPORT_SYMBOL(ib_alloc_fast_reg_page_list); void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) { page_list->device->free_fast_reg_page_list(page_list); } EXPORT_SYMBOL(ib_free_fast_reg_page_list); /* Memory windows */ struct ib_mw *ib_alloc_mw(struct ib_pd *pd) { struct ib_mw *mw; if (!pd->device->alloc_mw) return ERR_PTR(-ENOSYS); mw = pd->device->alloc_mw(pd); if (!IS_ERR(mw)) { mw->device = pd->device; mw->pd = pd; mw->uobject = NULL; atomic_inc(&pd->usecnt); } return mw; } EXPORT_SYMBOL(ib_alloc_mw); int ib_dealloc_mw(struct ib_mw *mw) { struct ib_pd *pd; int ret; pd = mw->pd; ret = mw->device->dealloc_mw(mw); if (!ret) atomic_dec(&pd->usecnt); return ret; } EXPORT_SYMBOL(ib_dealloc_mw); /* "Fast" memory regions */ struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, struct ib_fmr_attr *fmr_attr) { struct ib_fmr *fmr; if (!pd->device->alloc_fmr) return ERR_PTR(-ENOSYS); fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); if (!IS_ERR(fmr)) { fmr->device = pd->device; fmr->pd = pd; atomic_inc(&pd->usecnt); } return fmr; } EXPORT_SYMBOL(ib_alloc_fmr); int ib_unmap_fmr(struct list_head *fmr_list) { struct ib_fmr *fmr; if (list_empty(fmr_list)) return 0; fmr = list_entry(fmr_list->next, struct ib_fmr, list); return fmr->device->unmap_fmr(fmr_list); } EXPORT_SYMBOL(ib_unmap_fmr); int ib_dealloc_fmr(struct ib_fmr *fmr) { struct ib_pd *pd; int ret; pd = fmr->pd; ret = fmr->device->dealloc_fmr(fmr); if (!ret) atomic_dec(&pd->usecnt); return ret; } EXPORT_SYMBOL(ib_dealloc_fmr); /* Multicast groups */ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) { if (!qp->device->attach_mcast) return -ENOSYS; if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) return -EINVAL; return qp->device->attach_mcast(qp, gid, lid); } EXPORT_SYMBOL(ib_attach_mcast); int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) { if (!qp->device->detach_mcast) return -ENOSYS; if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) return -EINVAL; return qp->device->detach_mcast(qp, gid, lid); } EXPORT_SYMBOL(ib_detach_mcast);
gpl-2.0
aatjitra/7105u1
drivers/video/omap/lcd_palmtt.c
4123
2905
/* * LCD panel support for Palm Tungsten|T * Current version : Marek Vasut <marek.vasut@gmail.com> * * Modified from lcd_inn1510.c * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* GPIO11 - backlight GPIO12 - screen blanking GPIO13 - screen blanking */ #include <linux/platform_device.h> #include <linux/module.h> #include <linux/io.h> #include <mach/gpio.h> #include "omapfb.h" static int palmtt_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) { return 0; } static void palmtt_panel_cleanup(struct lcd_panel *panel) { } static int palmtt_panel_enable(struct lcd_panel *panel) { return 0; } static void palmtt_panel_disable(struct lcd_panel *panel) { } static unsigned long palmtt_panel_get_caps(struct lcd_panel *panel) { return OMAPFB_CAPS_SET_BACKLIGHT; } struct lcd_panel palmtt_panel = { .name = "palmtt", .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC | OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE | OMAP_LCDC_HSVS_OPPOSITE, .bpp = 16, .data_lines = 16, .x_res = 320, .y_res = 320, .pixel_clock = 10000, .hsw = 4, .hfp = 8, .hbp = 28, .vsw = 1, .vfp = 8, .vbp = 7, .pcd = 0, .init = palmtt_panel_init, .cleanup = palmtt_panel_cleanup, .enable = palmtt_panel_enable, .disable = palmtt_panel_disable, .get_caps = palmtt_panel_get_caps, }; static int palmtt_panel_probe(struct platform_device *pdev) { omapfb_register_panel(&palmtt_panel); return 0; } static int palmtt_panel_remove(struct platform_device *pdev) { return 0; } static int palmtt_panel_suspend(struct platform_device *pdev, pm_message_t mesg) { return 0; } static int palmtt_panel_resume(struct platform_device *pdev) { return 0; } struct platform_driver palmtt_panel_driver = { .probe = palmtt_panel_probe, .remove = palmtt_panel_remove, .suspend = palmtt_panel_suspend, .resume = palmtt_panel_resume, .driver = { .name = "lcd_palmtt", .owner = THIS_MODULE, }, }; static int __init palmtt_panel_drv_init(void) { return platform_driver_register(&palmtt_panel_driver); } static void __exit palmtt_panel_drv_cleanup(void) { platform_driver_unregister(&palmtt_panel_driver); } module_init(palmtt_panel_drv_init); module_exit(palmtt_panel_drv_cleanup);
gpl-2.0
smartboyhw/ubuntu-saucy-rt
arch/s390/kernel/lgr.c
4379
4337
/* * Linux Guest Relocation (LGR) detection * * Copyright IBM Corp. 2012 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> */ #include <linux/module.h> #include <linux/timer.h> #include <linux/slab.h> #include <asm/facility.h> #include <asm/sysinfo.h> #include <asm/ebcdic.h> #include <asm/debug.h> #include <asm/ipl.h> #define LGR_TIMER_INTERVAL_SECS (30 * 60) #define VM_LEVEL_MAX 2 /* Maximum is 8, but we only record two levels */ /* * LGR info: Contains stfle and stsi data */ struct lgr_info { /* Bit field with facility information: 4 DWORDs are stored */ u64 stfle_fac_list[4]; /* Level of system (1 = CEC, 2 = LPAR, 3 = z/VM */ u32 level; /* Level 1: CEC info (stsi 1.1.1) */ char manufacturer[16]; char type[4]; char sequence[16]; char plant[4]; char model[16]; /* Level 2: LPAR info (stsi 2.2.2) */ u16 lpar_number; char name[8]; /* Level 3: VM info (stsi 3.2.2) */ u8 vm_count; struct { char name[8]; char cpi[16]; } vm[VM_LEVEL_MAX]; } __packed __aligned(8); /* * LGR globals */ static char lgr_page[PAGE_SIZE] __aligned(PAGE_SIZE); static struct lgr_info lgr_info_last; static struct lgr_info lgr_info_cur; static struct debug_info *lgr_dbf; /* * Copy buffer and then convert it to ASCII */ static void cpascii(char *dst, char *src, int size) { memcpy(dst, src, size); EBCASC(dst, size); } /* * Fill LGR info with 1.1.1 stsi data */ static void lgr_stsi_1_1_1(struct lgr_info *lgr_info) { struct sysinfo_1_1_1 *si = (void *) lgr_page; if (stsi(si, 1, 1, 1)) return; cpascii(lgr_info->manufacturer, si->manufacturer, sizeof(si->manufacturer)); cpascii(lgr_info->type, si->type, sizeof(si->type)); cpascii(lgr_info->model, si->model, sizeof(si->model)); cpascii(lgr_info->sequence, si->sequence, sizeof(si->sequence)); cpascii(lgr_info->plant, si->plant, sizeof(si->plant)); } /* * Fill LGR info with 2.2.2 stsi data */ static void lgr_stsi_2_2_2(struct lgr_info *lgr_info) { struct sysinfo_2_2_2 *si = (void *) lgr_page; if (stsi(si, 2, 2, 2)) return; cpascii(lgr_info->name, si->name, sizeof(si->name)); memcpy(&lgr_info->lpar_number, &si->lpar_number, sizeof(lgr_info->lpar_number)); } /* * Fill LGR info with 3.2.2 stsi data */ static void lgr_stsi_3_2_2(struct lgr_info *lgr_info) { struct sysinfo_3_2_2 *si = (void *) lgr_page; int i; if (stsi(si, 3, 2, 2)) return; for (i = 0; i < min_t(u8, si->count, VM_LEVEL_MAX); i++) { cpascii(lgr_info->vm[i].name, si->vm[i].name, sizeof(si->vm[i].name)); cpascii(lgr_info->vm[i].cpi, si->vm[i].cpi, sizeof(si->vm[i].cpi)); } lgr_info->vm_count = si->count; } /* * Fill LGR info with current data */ static void lgr_info_get(struct lgr_info *lgr_info) { int level; memset(lgr_info, 0, sizeof(*lgr_info)); stfle(lgr_info->stfle_fac_list, ARRAY_SIZE(lgr_info->stfle_fac_list)); level = stsi(NULL, 0, 0, 0); lgr_info->level = level; if (level >= 1) lgr_stsi_1_1_1(lgr_info); if (level >= 2) lgr_stsi_2_2_2(lgr_info); if (level >= 3) lgr_stsi_3_2_2(lgr_info); } /* * Check if LGR info has changed and if yes log new LGR info to s390dbf */ void lgr_info_log(void) { static DEFINE_SPINLOCK(lgr_info_lock); unsigned long flags; if (!spin_trylock_irqsave(&lgr_info_lock, flags)) return; lgr_info_get(&lgr_info_cur); if (memcmp(&lgr_info_last, &lgr_info_cur, sizeof(lgr_info_cur)) != 0) { debug_event(lgr_dbf, 1, &lgr_info_cur, sizeof(lgr_info_cur)); lgr_info_last = lgr_info_cur; } spin_unlock_irqrestore(&lgr_info_lock, flags); } EXPORT_SYMBOL_GPL(lgr_info_log); static void lgr_timer_set(void); /* * LGR timer callback */ static void lgr_timer_fn(unsigned long ignored) { lgr_info_log(); lgr_timer_set(); } static struct timer_list lgr_timer = TIMER_DEFERRED_INITIALIZER(lgr_timer_fn, 0, 0); /* * Setup next LGR timer */ static void lgr_timer_set(void) { mod_timer(&lgr_timer, jiffies + LGR_TIMER_INTERVAL_SECS * HZ); } /* * Initialize LGR: Add s390dbf, write initial lgr_info and setup timer */ static int __init lgr_init(void) { lgr_dbf = debug_register("lgr", 1, 1, sizeof(struct lgr_info)); if (!lgr_dbf) return -ENOMEM; debug_register_view(lgr_dbf, &debug_hex_ascii_view); lgr_info_get(&lgr_info_last); debug_event(lgr_dbf, 1, &lgr_info_last, sizeof(lgr_info_last)); lgr_timer_set(); return 0; } module_init(lgr_init);
gpl-2.0
kamarush/yuga_aosp_kernel_lp
drivers/usb/storage/karma.c
4635
6019
/* Driver for Rio Karma * * (c) 2006 Bob Copeland <me@bobcopeland.com> * (c) 2006 Keith Bennett <keith@mcs.st-and.ac.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include "usb.h" #include "transport.h" #include "debug.h" MODULE_DESCRIPTION("Driver for Rio Karma"); MODULE_AUTHOR("Bob Copeland <me@bobcopeland.com>, Keith Bennett <keith@mcs.st-and.ac.uk>"); MODULE_LICENSE("GPL"); #define RIO_PREFIX "RIOP\x00" #define RIO_PREFIX_LEN 5 #define RIO_SEND_LEN 40 #define RIO_RECV_LEN 0x200 #define RIO_ENTER_STORAGE 0x1 #define RIO_LEAVE_STORAGE 0x2 #define RIO_RESET 0xC struct karma_data { int in_storage; char *recv; }; static int rio_karma_init(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags)|(USB_US_TYPE_STOR<<24) } static struct usb_device_id karma_usb_ids[] = { # include "unusual_karma.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, karma_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static struct us_unusual_dev karma_unusual_dev_list[] = { # include "unusual_karma.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV /* * Send commands to Rio Karma. * * For each command we send 40 bytes starting 'RIOP\0' followed by * the command number and a sequence number, which the device will ack * with a 512-byte packet with the high four bits set and everything * else null. Then we send 'RIOP\x80' followed by a zero and the * sequence number, until byte 5 in the response repeats the sequence * number. */ static int rio_karma_send_command(char cmd, struct us_data *us) { int result, partial; unsigned long timeout; static unsigned char seq = 1; struct karma_data *data = (struct karma_data *) us->extra; US_DEBUGP("karma: sending command %04x\n", cmd); memset(us->iobuf, 0, RIO_SEND_LEN); memcpy(us->iobuf, RIO_PREFIX, RIO_PREFIX_LEN); us->iobuf[5] = cmd; us->iobuf[6] = seq; timeout = jiffies + msecs_to_jiffies(6000); for (;;) { result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, us->iobuf, RIO_SEND_LEN, &partial); if (result != USB_STOR_XFER_GOOD) goto err; result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, data->recv, RIO_RECV_LEN, &partial); if (result != USB_STOR_XFER_GOOD) goto err; if (data->recv[5] == seq) break; if (time_after(jiffies, timeout)) goto err; us->iobuf[4] = 0x80; us->iobuf[5] = 0; msleep(50); } seq++; if (seq == 0) seq = 1; US_DEBUGP("karma: sent command %04x\n", cmd); return 0; err: US_DEBUGP("karma: command %04x failed\n", cmd); return USB_STOR_TRANSPORT_FAILED; } /* * Trap START_STOP and READ_10 to leave/re-enter storage mode. * Everything else is propagated to the normal bulk layer. */ static int rio_karma_transport(struct scsi_cmnd *srb, struct us_data *us) { int ret; struct karma_data *data = (struct karma_data *) us->extra; if (srb->cmnd[0] == READ_10 && !data->in_storage) { ret = rio_karma_send_command(RIO_ENTER_STORAGE, us); if (ret) return ret; data->in_storage = 1; return usb_stor_Bulk_transport(srb, us); } else if (srb->cmnd[0] == START_STOP) { ret = rio_karma_send_command(RIO_LEAVE_STORAGE, us); if (ret) return ret; data->in_storage = 0; return rio_karma_send_command(RIO_RESET, us); } return usb_stor_Bulk_transport(srb, us); } static void rio_karma_destructor(void *extra) { struct karma_data *data = (struct karma_data *) extra; kfree(data->recv); } static int rio_karma_init(struct us_data *us) { int ret = 0; struct karma_data *data = kzalloc(sizeof(struct karma_data), GFP_NOIO); if (!data) goto out; data->recv = kmalloc(RIO_RECV_LEN, GFP_NOIO); if (!data->recv) { kfree(data); goto out; } us->extra = data; us->extra_destructor = rio_karma_destructor; ret = rio_karma_send_command(RIO_ENTER_STORAGE, us); data->in_storage = (ret == 0); out: return ret; } static int karma_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - karma_usb_ids) + karma_unusual_dev_list); if (result) return result; us->transport_name = "Rio Karma/Bulk"; us->transport = rio_karma_transport; us->transport_reset = usb_stor_Bulk_reset; result = usb_stor_probe2(us); return result; } static struct usb_driver karma_driver = { .name = "ums-karma", .probe = karma_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = karma_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_driver(karma_driver);
gpl-2.0
ybmaker/linux-sunxi
drivers/platform/x86/panasonic-laptop.c
4891
18556
/* * Panasonic HotKey and LCD brightness control driver * (C) 2004 Hiroshi Miura <miura@da-cha.org> * (C) 2004 NTT DATA Intellilink Co. http://www.intellilink.co.jp/ * (C) YOKOTA Hiroshi <yokota (at) netlab. is. tsukuba. ac. jp> * (C) 2004 David Bronaugh <dbronaugh> * (C) 2006-2008 Harald Welte <laforge@gnumonks.org> * * derived from toshiba_acpi.c, Copyright (C) 2002-2004 John Belmonte * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publicshed by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * *--------------------------------------------------------------------------- * * ChangeLog: * Sep.23, 2008 Harald Welte <laforge@gnumonks.org> * -v0.95 rename driver from drivers/acpi/pcc_acpi.c to * drivers/misc/panasonic-laptop.c * * Jul.04, 2008 Harald Welte <laforge@gnumonks.org> * -v0.94 replace /proc interface with device attributes * support {set,get}keycode on th input device * * Jun.27, 2008 Harald Welte <laforge@gnumonks.org> * -v0.92 merge with 2.6.26-rc6 input API changes * remove broken <= 2.6.15 kernel support * resolve all compiler warnings * various coding style fixes (checkpatch.pl) * add support for backlight api * major code restructuring * * Dac.28, 2007 Harald Welte <laforge@gnumonks.org> * -v0.91 merge with 2.6.24-rc6 ACPI changes * * Nov.04, 2006 Hiroshi Miura <miura@da-cha.org> * -v0.9 remove warning about section reference. * remove acpi_os_free * add /proc/acpi/pcc/brightness interface for HAL access * merge dbronaugh's enhancement * Aug.17, 2004 David Bronaugh (dbronaugh) * - Added screen brightness setting interface * Thanks to FreeBSD crew (acpi_panasonic.c) * for the ideas I needed to accomplish it * * May.29, 2006 Hiroshi Miura <miura@da-cha.org> * -v0.8.4 follow to change keyinput structure * thanks Fabian Yamaguchi <fabs@cs.tu-berlin.de>, * Jacob Bower <jacob.bower@ic.ac.uk> and * Hiroshi Yokota for providing solutions. * * Oct.02, 2004 Hiroshi Miura <miura@da-cha.org> * -v0.8.2 merge code of YOKOTA Hiroshi * <yokota@netlab.is.tsukuba.ac.jp>. * Add sticky key mode interface. * Refactoring acpi_pcc_generate_keyinput(). * * Sep.15, 2004 Hiroshi Miura <miura@da-cha.org> * -v0.8 Generate key input event on input subsystem. * This is based on yet another driver written by * Ryuta Nakanishi. * * Sep.10, 2004 Hiroshi Miura <miura@da-cha.org> * -v0.7 Change proc interface functions using seq_file * facility as same as other ACPI drivers. * * Aug.28, 2004 Hiroshi Miura <miura@da-cha.org> * -v0.6.4 Fix a silly error with status checking * * Aug.25, 2004 Hiroshi Miura <miura@da-cha.org> * -v0.6.3 replace read_acpi_int by standard function * acpi_evaluate_integer * some clean up and make smart copyright notice. * fix return value of pcc_acpi_get_key() * fix checking return value of acpi_bus_register_driver() * * Aug.22, 2004 David Bronaugh <dbronaugh@linuxboxen.org> * -v0.6.2 Add check on ACPI data (num_sifr) * Coding style cleanups, better error messages/handling * Fixed an off-by-one error in memory allocation * * Aug.21, 2004 David Bronaugh <dbronaugh@linuxboxen.org> * -v0.6.1 Fix a silly error with status checking * * Aug.20, 2004 David Bronaugh <dbronaugh@linuxboxen.org> * - v0.6 Correct brightness controls to reflect reality * based on information gleaned by Hiroshi Miura * and discussions with Hiroshi Miura * * Aug.10, 2004 Hiroshi Miura <miura@da-cha.org> * - v0.5 support LCD brightness control * based on the disclosed information by MEI. * * Jul.25, 2004 Hiroshi Miura <miura@da-cha.org> * - v0.4 first post version * add function to retrive SIFR * * Jul.24, 2004 Hiroshi Miura <miura@da-cha.org> * - v0.3 get proper status of hotkey * * Jul.22, 2004 Hiroshi Miura <miura@da-cha.org> * - v0.2 add HotKey handler * * Jul.17, 2004 Hiroshi Miura <miura@da-cha.org> * - v0.1 start from toshiba_acpi driver written by John Belmonte * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/backlight.h> #include <linux/ctype.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #ifndef ACPI_HOTKEY_COMPONENT #define ACPI_HOTKEY_COMPONENT 0x10000000 #endif #define _COMPONENT ACPI_HOTKEY_COMPONENT MODULE_AUTHOR("Hiroshi Miura, David Bronaugh and Harald Welte"); MODULE_DESCRIPTION("ACPI HotKey driver for Panasonic Let's Note laptops"); MODULE_LICENSE("GPL"); #define LOGPREFIX "pcc_acpi: " /* Define ACPI PATHs */ /* Lets note hotkeys */ #define METHOD_HKEY_QUERY "HINF" #define METHOD_HKEY_SQTY "SQTY" #define METHOD_HKEY_SINF "SINF" #define METHOD_HKEY_SSET "SSET" #define HKEY_NOTIFY 0x80 #define ACPI_PCC_DRIVER_NAME "Panasonic Laptop Support" #define ACPI_PCC_DEVICE_NAME "Hotkey" #define ACPI_PCC_CLASS "pcc" #define ACPI_PCC_INPUT_PHYS "panasonic/hkey0" /* LCD_TYPEs: 0 = Normal, 1 = Semi-transparent ENV_STATEs: Normal temp=0x01, High temp=0x81, N/A=0x00 */ enum SINF_BITS { SINF_NUM_BATTERIES = 0, SINF_LCD_TYPE, SINF_AC_MAX_BRIGHT, SINF_AC_MIN_BRIGHT, SINF_AC_CUR_BRIGHT, SINF_DC_MAX_BRIGHT, SINF_DC_MIN_BRIGHT, SINF_DC_CUR_BRIGHT, SINF_MUTE, SINF_RESERVED, SINF_ENV_STATE, SINF_STICKY_KEY = 0x80, }; /* R1 handles SINF_AC_CUR_BRIGHT as SINF_CUR_BRIGHT, doesn't know AC state */ static int acpi_pcc_hotkey_add(struct acpi_device *device); static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type); static int acpi_pcc_hotkey_resume(struct acpi_device *device); static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event); static const struct acpi_device_id pcc_device_ids[] = { { "MAT0012", 0}, { "MAT0013", 0}, { "MAT0018", 0}, { "MAT0019", 0}, { "", 0}, }; MODULE_DEVICE_TABLE(acpi, pcc_device_ids); static struct acpi_driver acpi_pcc_driver = { .name = ACPI_PCC_DRIVER_NAME, .class = ACPI_PCC_CLASS, .ids = pcc_device_ids, .ops = { .add = acpi_pcc_hotkey_add, .remove = acpi_pcc_hotkey_remove, .resume = acpi_pcc_hotkey_resume, .notify = acpi_pcc_hotkey_notify, }, }; static const struct key_entry panasonic_keymap[] = { { KE_KEY, 0, { KEY_RESERVED } }, { KE_KEY, 1, { KEY_BRIGHTNESSDOWN } }, { KE_KEY, 2, { KEY_BRIGHTNESSUP } }, { KE_KEY, 3, { KEY_DISPLAYTOGGLE } }, { KE_KEY, 4, { KEY_MUTE } }, { KE_KEY, 5, { KEY_VOLUMEDOWN } }, { KE_KEY, 6, { KEY_VOLUMEUP } }, { KE_KEY, 7, { KEY_SLEEP } }, { KE_KEY, 8, { KEY_PROG1 } }, /* Change CPU boost */ { KE_KEY, 9, { KEY_BATTERY } }, { KE_KEY, 10, { KEY_SUSPEND } }, { KE_END, 0 } }; struct pcc_acpi { acpi_handle handle; unsigned long num_sifr; int sticky_mode; u32 *sinf; struct acpi_device *device; struct input_dev *input_dev; struct backlight_device *backlight; }; struct pcc_keyinput { struct acpi_hotkey *hotkey; }; /* method access functions */ static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val) { union acpi_object in_objs[] = { { .integer.type = ACPI_TYPE_INTEGER, .integer.value = func, }, { .integer.type = ACPI_TYPE_INTEGER, .integer.value = val, }, }; struct acpi_object_list params = { .count = ARRAY_SIZE(in_objs), .pointer = in_objs, }; acpi_status status = AE_OK; status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SSET, &params, NULL); return (status == AE_OK) ? 0 : -EIO; } static inline int acpi_pcc_get_sqty(struct acpi_device *device) { unsigned long long s; acpi_status status; status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY, NULL, &s); if (ACPI_SUCCESS(status)) return s; else { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "evaluation error HKEY.SQTY\n")); return -EINVAL; } } static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc) { acpi_status status; struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *hkey = NULL; int i; status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, NULL, &buffer); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "evaluation error HKEY.SINF\n")); return 0; } hkey = buffer.pointer; if (!hkey || (hkey->type != ACPI_TYPE_PACKAGE)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid HKEY.SINF\n")); status = AE_ERROR; goto end; } if (pcc->num_sifr < hkey->package.count) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "SQTY reports bad SINF length\n")); status = AE_ERROR; goto end; } for (i = 0; i < hkey->package.count; i++) { union acpi_object *element = &(hkey->package.elements[i]); if (likely(element->type == ACPI_TYPE_INTEGER)) { pcc->sinf[i] = element->integer.value; } else ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid HKEY.SINF data\n")); } pcc->sinf[hkey->package.count] = -1; end: kfree(buffer.pointer); return status == AE_OK; } /* backlight API interface functions */ /* This driver currently treats AC and DC brightness identical, * since we don't need to invent an interface to the core ACPI * logic to receive events in case a power supply is plugged in * or removed */ static int bl_get(struct backlight_device *bd) { struct pcc_acpi *pcc = bl_get_data(bd); if (!acpi_pcc_retrieve_biosdata(pcc)) return -EIO; return pcc->sinf[SINF_AC_CUR_BRIGHT]; } static int bl_set_status(struct backlight_device *bd) { struct pcc_acpi *pcc = bl_get_data(bd); int bright = bd->props.brightness; int rc; if (!acpi_pcc_retrieve_biosdata(pcc)) return -EIO; if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT]) bright = pcc->sinf[SINF_AC_MIN_BRIGHT]; if (bright < pcc->sinf[SINF_DC_MIN_BRIGHT]) bright = pcc->sinf[SINF_DC_MIN_BRIGHT]; if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT] || bright > pcc->sinf[SINF_AC_MAX_BRIGHT]) return -EINVAL; rc = acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, bright); if (rc < 0) return rc; return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright); } static const struct backlight_ops pcc_backlight_ops = { .get_brightness = bl_get, .update_status = bl_set_status, }; /* sysfs user interface functions */ static ssize_t show_numbatt(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi = to_acpi_device(dev); struct pcc_acpi *pcc = acpi_driver_data(acpi); if (!acpi_pcc_retrieve_biosdata(pcc)) return -EIO; return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_NUM_BATTERIES]); } static ssize_t show_lcdtype(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi = to_acpi_device(dev); struct pcc_acpi *pcc = acpi_driver_data(acpi); if (!acpi_pcc_retrieve_biosdata(pcc)) return -EIO; return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_LCD_TYPE]); } static ssize_t show_mute(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi = to_acpi_device(dev); struct pcc_acpi *pcc = acpi_driver_data(acpi); if (!acpi_pcc_retrieve_biosdata(pcc)) return -EIO; return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_MUTE]); } static ssize_t show_sticky(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi = to_acpi_device(dev); struct pcc_acpi *pcc = acpi_driver_data(acpi); if (!acpi_pcc_retrieve_biosdata(pcc)) return -EIO; return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_STICKY_KEY]); } static ssize_t set_sticky(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct acpi_device *acpi = to_acpi_device(dev); struct pcc_acpi *pcc = acpi_driver_data(acpi); int val; if (count && sscanf(buf, "%i", &val) == 1 && (val == 0 || val == 1)) { acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, val); pcc->sticky_mode = val; } return count; } static DEVICE_ATTR(numbatt, S_IRUGO, show_numbatt, NULL); static DEVICE_ATTR(lcdtype, S_IRUGO, show_lcdtype, NULL); static DEVICE_ATTR(mute, S_IRUGO, show_mute, NULL); static DEVICE_ATTR(sticky_key, S_IRUGO | S_IWUSR, show_sticky, set_sticky); static struct attribute *pcc_sysfs_entries[] = { &dev_attr_numbatt.attr, &dev_attr_lcdtype.attr, &dev_attr_mute.attr, &dev_attr_sticky_key.attr, NULL, }; static struct attribute_group pcc_attr_group = { .name = NULL, /* put in device directory */ .attrs = pcc_sysfs_entries, }; /* hotkey input device driver */ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc) { struct input_dev *hotk_input_dev = pcc->input_dev; int rc; unsigned long long result; rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY, NULL, &result); if (!ACPI_SUCCESS(rc)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "error getting hotkey status\n")); return; } acpi_bus_generate_proc_event(pcc->device, HKEY_NOTIFY, result); if (!sparse_keymap_report_event(hotk_input_dev, result & 0xf, result & 0x80, false)) ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown hotkey event: %d\n", result)); } static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event) { struct pcc_acpi *pcc = acpi_driver_data(device); switch (event) { case HKEY_NOTIFY: acpi_pcc_generate_keyinput(pcc); break; default: /* nothing to do */ break; } } static int acpi_pcc_init_input(struct pcc_acpi *pcc) { struct input_dev *input_dev; int error; input_dev = input_allocate_device(); if (!input_dev) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Couldn't allocate input device for hotkey")); return -ENOMEM; } input_dev->name = ACPI_PCC_DRIVER_NAME; input_dev->phys = ACPI_PCC_INPUT_PHYS; input_dev->id.bustype = BUS_HOST; input_dev->id.vendor = 0x0001; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; error = sparse_keymap_setup(input_dev, panasonic_keymap, NULL); if (error) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unable to setup input device keymap\n")); goto err_free_dev; } error = input_register_device(input_dev); if (error) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unable to register input device\n")); goto err_free_keymap; } pcc->input_dev = input_dev; return 0; err_free_keymap: sparse_keymap_free(input_dev); err_free_dev: input_free_device(input_dev); return error; } static void acpi_pcc_destroy_input(struct pcc_acpi *pcc) { sparse_keymap_free(pcc->input_dev); input_unregister_device(pcc->input_dev); /* * No need to input_free_device() since core input API refcounts * and free()s the device. */ } /* kernel module interface */ static int acpi_pcc_hotkey_resume(struct acpi_device *device) { struct pcc_acpi *pcc = acpi_driver_data(device); if (device == NULL || pcc == NULL) return -EINVAL; ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Sticky mode restore: %d\n", pcc->sticky_mode)); return acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode); } static int acpi_pcc_hotkey_add(struct acpi_device *device) { struct backlight_properties props; struct pcc_acpi *pcc; int num_sifr, result; if (!device) return -EINVAL; num_sifr = acpi_pcc_get_sqty(device); if (num_sifr < 0 || num_sifr > 255) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr out of range")); return -ENODEV; } pcc = kzalloc(sizeof(struct pcc_acpi), GFP_KERNEL); if (!pcc) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Couldn't allocate mem for pcc")); return -ENOMEM; } pcc->sinf = kzalloc(sizeof(u32) * (num_sifr + 1), GFP_KERNEL); if (!pcc->sinf) { result = -ENOMEM; goto out_hotkey; } pcc->device = device; pcc->handle = device->handle; pcc->num_sifr = num_sifr; device->driver_data = pcc; strcpy(acpi_device_name(device), ACPI_PCC_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_PCC_CLASS); result = acpi_pcc_init_input(pcc); if (result) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error installing keyinput handler\n")); goto out_sinf; } if (!acpi_pcc_retrieve_biosdata(pcc)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Couldn't retrieve BIOS data\n")); result = -EIO; goto out_input; } /* initialize backlight */ memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT]; pcc->backlight = backlight_device_register("panasonic", NULL, pcc, &pcc_backlight_ops, &props); if (IS_ERR(pcc->backlight)) { result = PTR_ERR(pcc->backlight); goto out_input; } /* read the initial brightness setting from the hardware */ pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT]; /* read the initial sticky key mode from the hardware */ pcc->sticky_mode = pcc->sinf[SINF_STICKY_KEY]; /* add sysfs attributes */ result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group); if (result) goto out_backlight; return 0; out_backlight: backlight_device_unregister(pcc->backlight); out_input: acpi_pcc_destroy_input(pcc); out_sinf: kfree(pcc->sinf); out_hotkey: kfree(pcc); return result; } static int __init acpi_pcc_init(void) { int result = 0; if (acpi_disabled) return -ENODEV; result = acpi_bus_register_driver(&acpi_pcc_driver); if (result < 0) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error registering hotkey driver\n")); return -ENODEV; } return 0; } static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type) { struct pcc_acpi *pcc = acpi_driver_data(device); if (!device || !pcc) return -EINVAL; sysfs_remove_group(&device->dev.kobj, &pcc_attr_group); backlight_device_unregister(pcc->backlight); acpi_pcc_destroy_input(pcc); kfree(pcc->sinf); kfree(pcc); return 0; } static void __exit acpi_pcc_exit(void) { acpi_bus_unregister_driver(&acpi_pcc_driver); } module_init(acpi_pcc_init); module_exit(acpi_pcc_exit);
gpl-2.0
chaoskagami/android_kernel_nvidia_roth
drivers/staging/tidspbridge/rmgr/strm.c
5147
18667
/* * strm.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * DSP/BIOS Bridge Stream Manager. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/types.h> /* ----------------------------------- Host OS */ #include <dspbridge/host_os.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> /* ----------------------------------- OS Adaptation Layer */ #include <dspbridge/sync.h> /* ----------------------------------- Bridge Driver */ #include <dspbridge/dspdefs.h> /* ----------------------------------- Resource Manager */ #include <dspbridge/nodepriv.h> /* ----------------------------------- Others */ #include <dspbridge/cmm.h> /* ----------------------------------- This */ #include <dspbridge/strm.h> #include <dspbridge/resourcecleanup.h> /* ----------------------------------- Defines, Data Structures, Typedefs */ #define DEFAULTTIMEOUT 10000 #define DEFAULTNUMBUFS 2 /* * ======== strm_mgr ======== * The strm_mgr contains device information needed to open the underlying * channels of a stream. */ struct strm_mgr { struct dev_object *dev_obj; /* Device for this processor */ struct chnl_mgr *chnl_mgr; /* Channel manager */ /* Function interface to Bridge driver */ struct bridge_drv_interface *intf_fxns; }; /* * ======== strm_object ======== * This object is allocated in strm_open(). */ struct strm_object { struct strm_mgr *strm_mgr_obj; struct chnl_object *chnl_obj; u32 dir; /* DSP_TONODE or DSP_FROMNODE */ u32 timeout; u32 num_bufs; /* Max # of bufs allowed in stream */ u32 bufs_in_strm; /* Current # of bufs in stream */ u32 bytes; /* bytes transferred since idled */ /* STREAM_IDLE, STREAM_READY, ... */ enum dsp_streamstate strm_state; void *user_event; /* Saved for strm_get_info() */ enum dsp_strmmode strm_mode; /* STRMMODE_[PROCCOPY][ZEROCOPY]... */ u32 dma_chnl_id; /* DMA chnl id */ u32 dma_priority; /* DMA priority:DMAPRI_[LOW][HIGH] */ u32 segment_id; /* >0 is SM segment.=0 is local heap */ u32 buf_alignment; /* Alignment for stream bufs */ /* Stream's SM address translator */ struct cmm_xlatorobject *xlator; }; /* ----------------------------------- Function Prototypes */ static int delete_strm(struct strm_object *stream_obj); /* * ======== strm_allocate_buffer ======== * Purpose: * Allocates buffers for a stream. */ int strm_allocate_buffer(struct strm_res_object *strmres, u32 usize, u8 **ap_buffer, u32 num_bufs, struct process_context *pr_ctxt) { int status = 0; u32 alloc_cnt = 0; u32 i; struct strm_object *stream_obj = strmres->stream; if (stream_obj) { /* * Allocate from segment specified at time of stream open. */ if (usize == 0) status = -EINVAL; } else { status = -EFAULT; } if (status) goto func_end; for (i = 0; i < num_bufs; i++) { (void)cmm_xlator_alloc_buf(stream_obj->xlator, &ap_buffer[i], usize); if (ap_buffer[i] == NULL) { status = -ENOMEM; alloc_cnt = i; break; } } if (status) strm_free_buffer(strmres, ap_buffer, alloc_cnt, pr_ctxt); if (status) goto func_end; drv_proc_update_strm_res(num_bufs, strmres); func_end: return status; } /* * ======== strm_close ======== * Purpose: * Close a stream opened with strm_open(). */ int strm_close(struct strm_res_object *strmres, struct process_context *pr_ctxt) { struct bridge_drv_interface *intf_fxns; struct chnl_info chnl_info_obj; int status = 0; struct strm_object *stream_obj = strmres->stream; if (!stream_obj) { status = -EFAULT; } else { /* Have all buffers been reclaimed? If not, return * -EPIPE */ intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; status = (*intf_fxns->chnl_get_info) (stream_obj->chnl_obj, &chnl_info_obj); if (chnl_info_obj.cio_cs > 0 || chnl_info_obj.cio_reqs > 0) status = -EPIPE; else status = delete_strm(stream_obj); } if (status) goto func_end; idr_remove(pr_ctxt->stream_id, strmres->id); func_end: dev_dbg(bridge, "%s: stream_obj: %p, status 0x%x\n", __func__, stream_obj, status); return status; } /* * ======== strm_create ======== * Purpose: * Create a STRM manager object. */ int strm_create(struct strm_mgr **strm_man, struct dev_object *dev_obj) { struct strm_mgr *strm_mgr_obj; int status = 0; *strm_man = NULL; /* Allocate STRM manager object */ strm_mgr_obj = kzalloc(sizeof(struct strm_mgr), GFP_KERNEL); if (strm_mgr_obj == NULL) status = -ENOMEM; else strm_mgr_obj->dev_obj = dev_obj; /* Get Channel manager and Bridge function interface */ if (!status) { status = dev_get_chnl_mgr(dev_obj, &(strm_mgr_obj->chnl_mgr)); if (!status) { (void)dev_get_intf_fxns(dev_obj, &(strm_mgr_obj->intf_fxns)); } } if (!status) *strm_man = strm_mgr_obj; else kfree(strm_mgr_obj); return status; } /* * ======== strm_delete ======== * Purpose: * Delete the STRM Manager Object. */ void strm_delete(struct strm_mgr *strm_mgr_obj) { kfree(strm_mgr_obj); } /* * ======== strm_free_buffer ======== * Purpose: * Frees the buffers allocated for a stream. */ int strm_free_buffer(struct strm_res_object *strmres, u8 ** ap_buffer, u32 num_bufs, struct process_context *pr_ctxt) { int status = 0; u32 i = 0; struct strm_object *stream_obj = strmres->stream; if (!stream_obj) status = -EFAULT; if (!status) { for (i = 0; i < num_bufs; i++) { status = cmm_xlator_free_buf(stream_obj->xlator, ap_buffer[i]); if (status) break; ap_buffer[i] = NULL; } } drv_proc_update_strm_res(num_bufs - i, strmres); return status; } /* * ======== strm_get_info ======== * Purpose: * Retrieves information about a stream. */ int strm_get_info(struct strm_object *stream_obj, struct stream_info *stream_info, u32 stream_info_size) { struct bridge_drv_interface *intf_fxns; struct chnl_info chnl_info_obj; int status = 0; void *virt_base = NULL; /* NULL if no SM used */ if (!stream_obj) { status = -EFAULT; } else { if (stream_info_size < sizeof(struct stream_info)) { /* size of users info */ status = -EINVAL; } } if (status) goto func_end; intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; status = (*intf_fxns->chnl_get_info) (stream_obj->chnl_obj, &chnl_info_obj); if (status) goto func_end; if (stream_obj->xlator) { /* We have a translator */ cmm_xlator_info(stream_obj->xlator, (u8 **) &virt_base, 0, stream_obj->segment_id, false); } stream_info->segment_id = stream_obj->segment_id; stream_info->strm_mode = stream_obj->strm_mode; stream_info->virt_base = virt_base; stream_info->user_strm->number_bufs_allowed = stream_obj->num_bufs; stream_info->user_strm->number_bufs_in_stream = chnl_info_obj.cio_cs + chnl_info_obj.cio_reqs; /* # of bytes transferred since last call to DSPStream_Idle() */ stream_info->user_strm->number_bytes = chnl_info_obj.bytes_tx; stream_info->user_strm->sync_object_handle = chnl_info_obj.event_obj; /* Determine stream state based on channel state and info */ if (chnl_info_obj.state & CHNL_STATEEOS) { stream_info->user_strm->ss_stream_state = STREAM_DONE; } else { if (chnl_info_obj.cio_cs > 0) stream_info->user_strm->ss_stream_state = STREAM_READY; else if (chnl_info_obj.cio_reqs > 0) stream_info->user_strm->ss_stream_state = STREAM_PENDING; else stream_info->user_strm->ss_stream_state = STREAM_IDLE; } func_end: return status; } /* * ======== strm_idle ======== * Purpose: * Idles a particular stream. */ int strm_idle(struct strm_object *stream_obj, bool flush_data) { struct bridge_drv_interface *intf_fxns; int status = 0; if (!stream_obj) { status = -EFAULT; } else { intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; status = (*intf_fxns->chnl_idle) (stream_obj->chnl_obj, stream_obj->timeout, flush_data); } dev_dbg(bridge, "%s: stream_obj: %p flush_data: 0x%x status: 0x%x\n", __func__, stream_obj, flush_data, status); return status; } /* * ======== strm_issue ======== * Purpose: * Issues a buffer on a stream */ int strm_issue(struct strm_object *stream_obj, u8 *pbuf, u32 ul_bytes, u32 ul_buf_size, u32 dw_arg) { struct bridge_drv_interface *intf_fxns; int status = 0; void *tmp_buf = NULL; if (!stream_obj) { status = -EFAULT; } else { intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; if (stream_obj->segment_id != 0) { tmp_buf = cmm_xlator_translate(stream_obj->xlator, (void *)pbuf, CMM_VA2DSPPA); if (tmp_buf == NULL) status = -ESRCH; } if (!status) { status = (*intf_fxns->chnl_add_io_req) (stream_obj->chnl_obj, pbuf, ul_bytes, ul_buf_size, (u32) tmp_buf, dw_arg); } if (status == -EIO) status = -ENOSR; } dev_dbg(bridge, "%s: stream_obj: %p pbuf: %p ul_bytes: 0x%x dw_arg:" " 0x%x status: 0x%x\n", __func__, stream_obj, pbuf, ul_bytes, dw_arg, status); return status; } /* * ======== strm_open ======== * Purpose: * Open a stream for sending/receiving data buffers to/from a task or * XDAIS socket node on the DSP. */ int strm_open(struct node_object *hnode, u32 dir, u32 index, struct strm_attr *pattr, struct strm_res_object **strmres, struct process_context *pr_ctxt) { struct strm_mgr *strm_mgr_obj; struct bridge_drv_interface *intf_fxns; u32 ul_chnl_id; struct strm_object *strm_obj = NULL; s8 chnl_mode; struct chnl_attr chnl_attr_obj; int status = 0; struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */ void *stream_res; *strmres = NULL; if (dir != DSP_TONODE && dir != DSP_FROMNODE) { status = -EPERM; } else { /* Get the channel id from the node (set in node_connect()) */ status = node_get_channel_id(hnode, dir, index, &ul_chnl_id); } if (!status) status = node_get_strm_mgr(hnode, &strm_mgr_obj); if (!status) { strm_obj = kzalloc(sizeof(struct strm_object), GFP_KERNEL); if (strm_obj == NULL) { status = -ENOMEM; } else { strm_obj->strm_mgr_obj = strm_mgr_obj; strm_obj->dir = dir; strm_obj->strm_state = STREAM_IDLE; strm_obj->user_event = pattr->user_event; if (pattr->stream_attr_in != NULL) { strm_obj->timeout = pattr->stream_attr_in->timeout; strm_obj->num_bufs = pattr->stream_attr_in->num_bufs; strm_obj->strm_mode = pattr->stream_attr_in->strm_mode; strm_obj->segment_id = pattr->stream_attr_in->segment_id; strm_obj->buf_alignment = pattr->stream_attr_in->buf_alignment; strm_obj->dma_chnl_id = pattr->stream_attr_in->dma_chnl_id; strm_obj->dma_priority = pattr->stream_attr_in->dma_priority; chnl_attr_obj.uio_reqs = pattr->stream_attr_in->num_bufs; } else { strm_obj->timeout = DEFAULTTIMEOUT; strm_obj->num_bufs = DEFAULTNUMBUFS; strm_obj->strm_mode = STRMMODE_PROCCOPY; strm_obj->segment_id = 0; /* local mem */ strm_obj->buf_alignment = 0; strm_obj->dma_chnl_id = 0; strm_obj->dma_priority = 0; chnl_attr_obj.uio_reqs = DEFAULTNUMBUFS; } chnl_attr_obj.reserved1 = NULL; /* DMA chnl flush timeout */ chnl_attr_obj.reserved2 = strm_obj->timeout; chnl_attr_obj.event_obj = NULL; if (pattr->user_event != NULL) chnl_attr_obj.event_obj = pattr->user_event; } } if (status) goto func_cont; if ((pattr->virt_base == NULL) || !(pattr->virt_size > 0)) goto func_cont; /* No System DMA */ /* Get the shared mem mgr for this streams dev object */ status = dev_get_cmm_mgr(strm_mgr_obj->dev_obj, &hcmm_mgr); if (!status) { /*Allocate a SM addr translator for this strm. */ status = cmm_xlator_create(&strm_obj->xlator, hcmm_mgr, NULL); if (!status) { /* Set translators Virt Addr attributes */ status = cmm_xlator_info(strm_obj->xlator, (u8 **) &pattr->virt_base, pattr->virt_size, strm_obj->segment_id, true); } } func_cont: if (!status) { /* Open channel */ chnl_mode = (dir == DSP_TONODE) ? CHNL_MODETODSP : CHNL_MODEFROMDSP; intf_fxns = strm_mgr_obj->intf_fxns; status = (*intf_fxns->chnl_open) (&(strm_obj->chnl_obj), strm_mgr_obj->chnl_mgr, chnl_mode, ul_chnl_id, &chnl_attr_obj); if (status) { /* * over-ride non-returnable status codes so we return * something documented */ if (status != -ENOMEM && status != -EINVAL && status != -EPERM) { /* * We got a status that's not return-able. * Assert that we got something we were * expecting (-EFAULT isn't acceptable, * strm_mgr_obj->chnl_mgr better be valid or we * assert here), and then return -EPERM. */ status = -EPERM; } } } if (!status) { status = drv_proc_insert_strm_res_element(strm_obj, &stream_res, pr_ctxt); if (status) delete_strm(strm_obj); else *strmres = (struct strm_res_object *)stream_res; } else { (void)delete_strm(strm_obj); } dev_dbg(bridge, "%s: hnode: %p dir: 0x%x index: 0x%x pattr: %p " "strmres: %p status: 0x%x\n", __func__, hnode, dir, index, pattr, strmres, status); return status; } /* * ======== strm_reclaim ======== * Purpose: * Relcaims a buffer from a stream. */ int strm_reclaim(struct strm_object *stream_obj, u8 ** buf_ptr, u32 *nbytes, u32 *buff_size, u32 *pdw_arg) { struct bridge_drv_interface *intf_fxns; struct chnl_ioc chnl_ioc_obj; int status = 0; void *tmp_buf = NULL; if (!stream_obj) { status = -EFAULT; goto func_end; } intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; status = (*intf_fxns->chnl_get_ioc) (stream_obj->chnl_obj, stream_obj->timeout, &chnl_ioc_obj); if (!status) { *nbytes = chnl_ioc_obj.byte_size; if (buff_size) *buff_size = chnl_ioc_obj.buf_size; *pdw_arg = chnl_ioc_obj.arg; if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) { if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) { status = -ETIME; } else { /* Allow reclaims after idle to succeed */ if (!CHNL_IS_IO_CANCELLED(chnl_ioc_obj)) status = -EPERM; } } /* Translate zerocopy buffer if channel not canceled. */ if (!status && (!CHNL_IS_IO_CANCELLED(chnl_ioc_obj)) && (stream_obj->strm_mode == STRMMODE_ZEROCOPY)) { /* * This is a zero-copy channel so chnl_ioc_obj.buf * contains the DSP address of SM. We need to * translate it to a virtual address for the user * thread to access. * Note: Could add CMM_DSPPA2VA to CMM in the future. */ tmp_buf = cmm_xlator_translate(stream_obj->xlator, chnl_ioc_obj.buf, CMM_DSPPA2PA); if (tmp_buf != NULL) { /* now convert this GPP Pa to Va */ tmp_buf = cmm_xlator_translate(stream_obj-> xlator, tmp_buf, CMM_PA2VA); } if (tmp_buf == NULL) status = -ESRCH; chnl_ioc_obj.buf = tmp_buf; } *buf_ptr = chnl_ioc_obj.buf; } func_end: dev_dbg(bridge, "%s: stream_obj: %p buf_ptr: %p nbytes: %p " "pdw_arg: %p status 0x%x\n", __func__, stream_obj, buf_ptr, nbytes, pdw_arg, status); return status; } /* * ======== strm_register_notify ======== * Purpose: * Register to be notified on specific events for this stream. */ int strm_register_notify(struct strm_object *stream_obj, u32 event_mask, u32 notify_type, struct dsp_notification * hnotification) { struct bridge_drv_interface *intf_fxns; int status = 0; if (!stream_obj) { status = -EFAULT; } else if ((event_mask & ~((DSP_STREAMIOCOMPLETION) | DSP_STREAMDONE)) != 0) { status = -EINVAL; } else { if (notify_type != DSP_SIGNALEVENT) status = -ENOSYS; } if (!status) { intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; status = (*intf_fxns->chnl_register_notify) (stream_obj-> chnl_obj, event_mask, notify_type, hnotification); } return status; } /* * ======== strm_select ======== * Purpose: * Selects a ready stream. */ int strm_select(struct strm_object **strm_tab, u32 strms, u32 *pmask, u32 utimeout) { u32 index; struct chnl_info chnl_info_obj; struct bridge_drv_interface *intf_fxns; struct sync_object **sync_events = NULL; u32 i; int status = 0; *pmask = 0; for (i = 0; i < strms; i++) { if (!strm_tab[i]) { status = -EFAULT; break; } } if (status) goto func_end; /* Determine which channels have IO ready */ for (i = 0; i < strms; i++) { intf_fxns = strm_tab[i]->strm_mgr_obj->intf_fxns; status = (*intf_fxns->chnl_get_info) (strm_tab[i]->chnl_obj, &chnl_info_obj); if (status) { break; } else { if (chnl_info_obj.cio_cs > 0) *pmask |= (1 << i); } } if (!status && utimeout > 0 && *pmask == 0) { /* Non-zero timeout */ sync_events = kmalloc(strms * sizeof(struct sync_object *), GFP_KERNEL); if (sync_events == NULL) { status = -ENOMEM; } else { for (i = 0; i < strms; i++) { intf_fxns = strm_tab[i]->strm_mgr_obj->intf_fxns; status = (*intf_fxns->chnl_get_info) (strm_tab[i]->chnl_obj, &chnl_info_obj); if (status) break; else sync_events[i] = chnl_info_obj.sync_event; } } if (!status) { status = sync_wait_on_multiple_events(sync_events, strms, utimeout, &index); if (!status) { /* Since we waited on the event, we have to * reset it */ sync_set_event(sync_events[index]); *pmask = 1 << index; } } } func_end: kfree(sync_events); return status; } /* * ======== delete_strm ======== * Purpose: * Frees the resources allocated for a stream. */ static int delete_strm(struct strm_object *stream_obj) { struct bridge_drv_interface *intf_fxns; int status = 0; if (stream_obj) { if (stream_obj->chnl_obj) { intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; /* Channel close can fail only if the channel handle * is invalid. */ status = (*intf_fxns->chnl_close) (stream_obj->chnl_obj); } /* Free all SM address translator resources */ kfree(stream_obj->xlator); kfree(stream_obj); } else { status = -EFAULT; } return status; }
gpl-2.0
dasago13/android_kernel_lenovo_s650
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
5659
4398
/************************************************************************** * * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ #include "vmwgfx_drv.h" #include "ttm/ttm_module.h" #include "ttm/ttm_bo_driver.h" #include "ttm/ttm_placement.h" #include <linux/idr.h> #include <linux/spinlock.h> #include <linux/kernel.h> struct vmwgfx_gmrid_man { spinlock_t lock; struct ida gmr_ida; uint32_t max_gmr_ids; uint32_t max_gmr_pages; uint32_t used_gmr_pages; }; static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem) { struct vmwgfx_gmrid_man *gman = (struct vmwgfx_gmrid_man *)man->priv; int ret = 0; int id; mem->mm_node = NULL; spin_lock(&gman->lock); if (gman->max_gmr_pages > 0) { gman->used_gmr_pages += bo->num_pages; if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) goto out_err_locked; } do { spin_unlock(&gman->lock); if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) { ret = -ENOMEM; goto out_err; } spin_lock(&gman->lock); ret = ida_get_new(&gman->gmr_ida, &id); if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) { ida_remove(&gman->gmr_ida, id); ret = 0; goto out_err_locked; } } while (ret == -EAGAIN); if (likely(ret == 0)) { mem->mm_node = gman; mem->start = id; mem->num_pages = bo->num_pages; } else goto out_err_locked; spin_unlock(&gman->lock); return 0; out_err: spin_lock(&gman->lock); out_err_locked: gman->used_gmr_pages -= bo->num_pages; spin_unlock(&gman->lock); return ret; } static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { struct vmwgfx_gmrid_man *gman = (struct vmwgfx_gmrid_man *)man->priv; if (mem->mm_node) { spin_lock(&gman->lock); ida_remove(&gman->gmr_ida, mem->start); gman->used_gmr_pages -= mem->num_pages; spin_unlock(&gman->lock); mem->mm_node = NULL; } } static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, unsigned long p_size) { struct vmw_private *dev_priv = container_of(man->bdev, struct vmw_private, bdev); struct vmwgfx_gmrid_man *gman = kzalloc(sizeof(*gman), GFP_KERNEL); if (unlikely(gman == NULL)) return -ENOMEM; spin_lock_init(&gman->lock); gman->max_gmr_pages = dev_priv->max_gmr_pages; gman->used_gmr_pages = 0; ida_init(&gman->gmr_ida); gman->max_gmr_ids = p_size; man->priv = (void *) gman; return 0; } static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man) { struct vmwgfx_gmrid_man *gman = (struct vmwgfx_gmrid_man *)man->priv; if (gman) { ida_destroy(&gman->gmr_ida); kfree(gman); } return 0; } static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man, const char *prefix) { printk(KERN_INFO "%s: No debug info available for the GMR " "id manager.\n", prefix); } const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = { vmw_gmrid_man_init, vmw_gmrid_man_takedown, vmw_gmrid_man_get_node, vmw_gmrid_man_put_node, vmw_gmrid_man_debug };
gpl-2.0
RepoB/android_kernel_sony_msm8974-GreatDevs
arch/avr32/kernel/signal.c
8987
7612
/* * Copyright (C) 2004-2006 Atmel Corporation * * Based on linux/arch/sh/kernel/signal.c * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima * Copyright (C) 1991, 1992 Linus Torvalds * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/freezer.h> #include <linux/tracehook.h> #include <asm/uaccess.h> #include <asm/ucontext.h> #include <asm/syscalls.h> #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, struct pt_regs *regs) { return do_sigaltstack(uss, uoss, regs->sp); } struct rt_sigframe { struct siginfo info; struct ucontext uc; unsigned long retcode; }; static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; #define COPY(x) err |= __get_user(regs->x, &sc->x) COPY(sr); COPY(pc); COPY(lr); COPY(sp); COPY(r12); COPY(r11); COPY(r10); COPY(r9); COPY(r8); COPY(r7); COPY(r6); COPY(r5); COPY(r4); COPY(r3); COPY(r2); COPY(r1); COPY(r0); #undef COPY /* * Don't allow anyone to pretend they're running in supervisor * mode or something... */ err |= !valid_user_regs(regs); return err; } asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; sigset_t set; frame = (struct rt_sigframe __user *)regs->sp; pr_debug("SIG return: frame = %p\n", frame); if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) goto badframe; if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) goto badframe; pr_debug("Context restored: pc = %08lx, lr = %08lx, sp = %08lx\n", regs->pc, regs->lr, regs->sp); return regs->r12; badframe: force_sig(SIGSEGV, current); return 0; } static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) { int err = 0; #define COPY(x) err |= __put_user(regs->x, &sc->x) COPY(sr); COPY(pc); COPY(lr); COPY(sp); COPY(r12); COPY(r11); COPY(r10); COPY(r9); COPY(r8); COPY(r7); COPY(r6); COPY(r5); COPY(r4); COPY(r3); COPY(r2); COPY(r1); COPY(r0); #undef COPY return err; } static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize) { unsigned long sp = regs->sp; if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) sp = current->sas_ss_sp + current->sas_ss_size; return (void __user *)((sp - framesize) & ~3); } static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int err = 0; frame = get_sigframe(ka, regs, sizeof(*frame)); err = -EFAULT; if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) goto out; /* * Set up the return code: * * mov r8, __NR_rt_sigreturn * scall * * Note: This will blow up since we're using a non-executable * stack. Better use SA_RESTORER. */ #if __NR_rt_sigreturn > 127 # error __NR_rt_sigreturn must be < 127 to fit in a short mov #endif err = __put_user(0x3008d733 | (__NR_rt_sigreturn << 20), &frame->retcode); err |= copy_siginfo_to_user(&frame->info, info); /* Set up the ucontext */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __put_user((void __user *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto out; regs->r12 = sig; regs->r11 = (unsigned long) &frame->info; regs->r10 = (unsigned long) &frame->uc; regs->sp = (unsigned long) frame; if (ka->sa.sa_flags & SA_RESTORER) regs->lr = (unsigned long)ka->sa.sa_restorer; else { printk(KERN_NOTICE "[%s:%d] did not set SA_RESTORER\n", current->comm, current->pid); regs->lr = (unsigned long) &frame->retcode; } pr_debug("SIG deliver [%s:%d]: sig=%d sp=0x%lx pc=0x%lx->0x%p lr=0x%lx\n", current->comm, current->pid, sig, regs->sp, regs->pc, ka->sa.sa_handler, regs->lr); regs->pc = (unsigned long) ka->sa.sa_handler; out: return err; } static inline void setup_syscall_restart(struct pt_regs *regs) { if (regs->r12 == -ERESTART_RESTARTBLOCK) regs->r8 = __NR_restart_syscall; else regs->r12 = regs->r12_orig; regs->pc -= 2; } static inline void handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs, int syscall) { int ret; /* * Set up the stack frame */ ret = setup_rt_frame(sig, ka, info, oldset, regs); /* * Check that the resulting registers are sane */ ret |= !valid_user_regs(regs); /* * Block the signal if we were unsuccessful. */ if (ret != 0 || !(ka->sa.sa_flags & SA_NODEFER)) { spin_lock_irq(&current->sighand->siglock); sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); sigaddset(&current->blocked, sig); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); } if (ret == 0) return; force_sigsegv(sig, current); } /* * Note that 'init' is a special process: it doesn't get signals it * doesn't want to handle. Thus you cannot kill init even with a * SIGKILL even by mistake. */ int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall) { siginfo_t info; int signr; struct k_sigaction ka; /* * We want the common case to go fast, which is why we may in * certain cases get here from kernel mode. Just return * without doing anything if so. */ if (!user_mode(regs)) return 0; if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = &current->saved_sigmask; else if (!oldset) oldset = &current->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (syscall) { switch (regs->r12) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: if (signr > 0) { regs->r12 = -EINTR; break; } /* fall through */ case -ERESTARTSYS: if (signr > 0 && !(ka.sa.sa_flags & SA_RESTART)) { regs->r12 = -EINTR; break; } /* fall through */ case -ERESTARTNOINTR: setup_syscall_restart(regs); } } if (signr == 0) { /* No signal to deliver -- put the saved sigmask back */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } return 0; } handle_signal(signr, &ka, &info, oldset, regs, syscall); return 1; } asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti) { int syscall = 0; if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR) syscall = 1; if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) do_signal(regs, &current->blocked, syscall); if (ti->flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); if (current->replacement_session_keyring) key_replace_session_keyring(); } }
gpl-2.0
bgn9000/Shun-Andromeda
arch/sparc/prom/printf.c
12571
1728
/* * printf.c: Internal prom library printf facility. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (c) 2002 Pete Zaitcev (zaitcev@yahoo.com) * * We used to warn all over the code: DO NOT USE prom_printf(), * and yet people do. Anton's banking code was outputting banks * with prom_printf for most of the 2.4 lifetime. Since an effective * stick is not available, we deployed a carrot: an early printk * through PROM by means of -p boot option. This ought to fix it. * USE printk; if you need, deploy -p. */ #include <linux/kernel.h> #include <linux/compiler.h> #include <linux/spinlock.h> #include <asm/openprom.h> #include <asm/oplib.h> #define CONSOLE_WRITE_BUF_SIZE 1024 static char ppbuf[1024]; static char console_write_buf[CONSOLE_WRITE_BUF_SIZE]; static DEFINE_RAW_SPINLOCK(console_write_lock); void notrace prom_write(const char *buf, unsigned int n) { unsigned int dest_len; unsigned long flags; char *dest; dest = console_write_buf; raw_spin_lock_irqsave(&console_write_lock, flags); dest_len = 0; while (n-- != 0) { char ch = *buf++; if (ch == '\n') { *dest++ = '\r'; dest_len++; } *dest++ = ch; dest_len++; if (dest_len >= CONSOLE_WRITE_BUF_SIZE - 1) { prom_console_write_buf(console_write_buf, dest_len); dest = console_write_buf; dest_len = 0; } } if (dest_len) prom_console_write_buf(console_write_buf, dest_len); raw_spin_unlock_irqrestore(&console_write_lock, flags); } void notrace prom_printf(const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vscnprintf(ppbuf, sizeof(ppbuf), fmt, args); va_end(args); prom_write(ppbuf, i); }
gpl-2.0
TEAM-RAZOR-DEVICES/kernel_asus_moorefield
arch/m68k/lib/lshrdi3.c
12827
1623
/* lshrdi3.c extracted from gcc-2.7.2/libgcc2.c which is: */ /* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define BITS_PER_UNIT 8 typedef int SItype __attribute__ ((mode (SI))); typedef unsigned int USItype __attribute__ ((mode (SI))); typedef int DItype __attribute__ ((mode (DI))); typedef int word_type __attribute__ ((mode (__word__))); struct DIstruct {SItype high, low;}; typedef union { struct DIstruct s; DItype ll; } DIunion; DItype __lshrdi3 (DItype u, word_type b) { DIunion w; word_type bm; DIunion uu; if (b == 0) return u; uu.ll = u; bm = (sizeof (SItype) * BITS_PER_UNIT) - b; if (bm <= 0) { w.s.high = 0; w.s.low = (USItype)uu.s.high >> -bm; } else { USItype carries = (USItype)uu.s.high << bm; w.s.high = (USItype)uu.s.high >> b; w.s.low = ((USItype)uu.s.low >> b) | carries; } return w.ll; }
gpl-2.0
wimpknocker/lge-kernel-lproj
arch/powerpc/boot/cuboot-85xx.c
13851
1673
/* * Old U-boot compatibility for 85xx * * Author: Scott Wood <scottwood@freescale.com> * * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #define TARGET_85xx #define TARGET_HAS_ETH3 #include "ppcboot.h" static bd_t bd; static void platform_fixups(void) { void *soc; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); dt_fixup_mac_address_by_alias("ethernet2", bd.bi_enet2addr); dt_fixup_mac_address_by_alias("ethernet3", bd.bi_enet3addr); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 8, bd.bi_busfreq); /* Unfortunately, the specific model number is encoded in the * soc node name in existing dts files -- once that is fixed, * this can do a simple path lookup. */ soc = find_node_by_devtype(NULL, "soc"); if (soc) { void *serial = NULL; setprop(soc, "bus-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); while ((serial = find_node_by_devtype(serial, "serial"))) { if (get_parent(serial) != soc) continue; setprop(serial, "clock-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); } } } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; }
gpl-2.0
janrinze/loox7xxport
drivers/net/macsonic.c
28
19310
/* * macsonic.c * * (C) 2005 Finn Thain * * Converted to DMA API, converted to unified driver model, made it work as * a module again, and from the mac68k project, introduced more 32-bit cards * and dhd's support for 16-bit cards. * * (C) 1998 Alan Cox * * Debugging Andreas Ehliar, Michael Schmitz * * Based on code * (C) 1996 by Thomas Bogendoerfer (tsbogend@bigbug.franken.de) * * This driver is based on work from Andreas Busse, but most of * the code is rewritten. * * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de) * * A driver for the Mac onboard Sonic ethernet chip. * * 98/12/21 MSch: judged from tests on Q800, it's basically working, * but eating up both receive and transmit resources * and duplicating packets. Needs more testing. * * 99/01/03 MSch: upgraded to version 0.92 of the core driver, fixed. * * 00/10/31 sammy@oh.verio.com: Updated driver for 2.4 kernels, fixed problems * on centris. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/nubus.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/bitrev.h> #include <asm/bootinfo.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/hwtest.h> #include <asm/dma.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_via.h> static char mac_sonic_string[] = "macsonic"; static struct platform_device *mac_sonic_device; #include "sonic.h" /* These should basically be bus-size and endian independent (since the SONIC is at least smart enough that it uses the same endianness as the host, unlike certain less enlightened Macintosh NICs) */ #define SONIC_READ(reg) (nubus_readw(dev->base_addr + (reg * 4) \ + lp->reg_offset)) #define SONIC_WRITE(reg,val) (nubus_writew(val, dev->base_addr + (reg * 4) \ + lp->reg_offset)) /* use 0 for production, 1 for verification, >1 for debug */ #ifdef SONIC_DEBUG static unsigned int sonic_debug = SONIC_DEBUG; #else static unsigned int sonic_debug = 1; #endif static int sonic_version_printed; extern int mac_onboard_sonic_probe(struct net_device* dev); extern int mac_nubus_sonic_probe(struct net_device* dev); /* For onboard SONIC */ #define ONBOARD_SONIC_REGISTERS 0x50F0A000 #define ONBOARD_SONIC_PROM_BASE 0x50f08000 enum macsonic_type { MACSONIC_DUODOCK, MACSONIC_APPLE, MACSONIC_APPLE16, MACSONIC_DAYNA, MACSONIC_DAYNALINK }; /* For the built-in SONIC in the Duo Dock */ #define DUODOCK_SONIC_REGISTERS 0xe10000 #define DUODOCK_SONIC_PROM_BASE 0xe12000 /* For Apple-style NuBus SONIC */ #define APPLE_SONIC_REGISTERS 0 #define APPLE_SONIC_PROM_BASE 0x40000 /* Daynalink LC SONIC */ #define DAYNALINK_PROM_BASE 0x400000 /* For Dayna-style NuBus SONIC (haven't seen one yet) */ #define DAYNA_SONIC_REGISTERS 0x180000 /* This is what OpenBSD says. However, this is definitely in NuBus ROM space so we should be able to get it by walking the NuBus resource directories */ #define DAYNA_SONIC_MAC_ADDR 0xffe004 #define SONIC_READ_PROM(addr) nubus_readb(prom_addr+addr) /* * For reversing the PROM address */ static inline void bit_reverse_addr(unsigned char addr[6]) { int i; for(i = 0; i < 6; i++) addr[i] = bitrev8(addr[i]); } static irqreturn_t macsonic_interrupt(int irq, void *dev_id) { irqreturn_t result; unsigned long flags; local_irq_save(flags); result = sonic_interrupt(irq, dev_id); local_irq_restore(flags); return result; } static int macsonic_open(struct net_device* dev) { if (request_irq(dev->irq, &sonic_interrupt, IRQ_FLG_FAST, "sonic", dev)) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); return -EAGAIN; } /* Under the A/UX interrupt scheme, the onboard SONIC interrupt comes * in at priority level 3. However, we sometimes get the level 2 inter- * rupt as well, which must prevent re-entrance of the sonic handler. */ if (dev->irq == IRQ_AUTO_3) if (request_irq(IRQ_NUBUS_9, &macsonic_interrupt, IRQ_FLG_FAST, "sonic", dev)) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, IRQ_NUBUS_9); free_irq(dev->irq, dev); return -EAGAIN; } return sonic_open(dev); } static int macsonic_close(struct net_device* dev) { int err; err = sonic_close(dev); free_irq(dev->irq, dev); if (dev->irq == IRQ_AUTO_3) free_irq(IRQ_NUBUS_9, dev); return err; } int __init macsonic_init(struct net_device* dev) { struct sonic_local* lp = netdev_priv(dev); /* Allocate the entire chunk of memory for the descriptors. Note that this cannot cross a 64K boundary. */ if ((lp->descriptors = dma_alloc_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), &lp->descriptors_laddr, GFP_KERNEL)) == NULL) { printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", lp->device->bus_id); return -ENOMEM; } /* Now set up the pointers to point to the appropriate places */ lp->cda = lp->descriptors; lp->tda = lp->cda + (SIZEOF_SONIC_CDA * SONIC_BUS_SCALE(lp->dma_bitmode)); lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS * SONIC_BUS_SCALE(lp->dma_bitmode)); lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS * SONIC_BUS_SCALE(lp->dma_bitmode)); lp->cda_laddr = lp->descriptors_laddr; lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA * SONIC_BUS_SCALE(lp->dma_bitmode)); lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS * SONIC_BUS_SCALE(lp->dma_bitmode)); lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS * SONIC_BUS_SCALE(lp->dma_bitmode)); dev->open = macsonic_open; dev->stop = macsonic_close; dev->hard_start_xmit = sonic_send_packet; dev->get_stats = sonic_get_stats; dev->set_multicast_list = &sonic_multicast_list; dev->tx_timeout = sonic_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; /* * clear tally counter */ SONIC_WRITE(SONIC_CRCT, 0xffff); SONIC_WRITE(SONIC_FAET, 0xffff); SONIC_WRITE(SONIC_MPT, 0xffff); return 0; } int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev) { struct sonic_local *lp = netdev_priv(dev); const int prom_addr = ONBOARD_SONIC_PROM_BASE; int i; DECLARE_MAC_BUF(mac); /* On NuBus boards we can sometimes look in the ROM resources. No such luck for comm-slot/onboard. */ for(i = 0; i < 6; i++) dev->dev_addr[i] = SONIC_READ_PROM(i); /* Most of the time, the address is bit-reversed. The NetBSD source has a rather long and detailed historical account of why this is so. */ if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) && memcmp(dev->dev_addr, "\x00\xA0\x40", 3) && memcmp(dev->dev_addr, "\x00\x80\x19", 3) && memcmp(dev->dev_addr, "\x00\x05\x02", 3)) bit_reverse_addr(dev->dev_addr); else return 0; /* If we still have what seems to be a bogus address, we'll look in the CAM. The top entry should be ours. */ /* Danger! This only works if MacOS has already initialized the card... */ if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) && memcmp(dev->dev_addr, "\x00\xA0\x40", 3) && memcmp(dev->dev_addr, "\x00\x80\x19", 3) && memcmp(dev->dev_addr, "\x00\x05\x02", 3)) { unsigned short val; printk(KERN_INFO "macsonic: PROM seems to be wrong, trying CAM entry 15\n"); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); SONIC_WRITE(SONIC_CEP, 15); val = SONIC_READ(SONIC_CAP2); dev->dev_addr[5] = val >> 8; dev->dev_addr[4] = val & 0xff; val = SONIC_READ(SONIC_CAP1); dev->dev_addr[3] = val >> 8; dev->dev_addr[2] = val & 0xff; val = SONIC_READ(SONIC_CAP0); dev->dev_addr[1] = val >> 8; dev->dev_addr[0] = val & 0xff; printk(KERN_INFO "HW Address from CAM 15: %s\n", print_mac(mac, dev->dev_addr)); } else return 0; if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) && memcmp(dev->dev_addr, "\x00\xA0\x40", 3) && memcmp(dev->dev_addr, "\x00\x80\x19", 3) && memcmp(dev->dev_addr, "\x00\x05\x02", 3)) { /* * Still nonsense ... messed up someplace! */ printk(KERN_ERR "macsonic: ERROR (INVALID MAC)\n"); return -EIO; } else return 0; } int __init mac_onboard_sonic_probe(struct net_device* dev) { /* Bwahahaha */ static int once_is_more_than_enough; struct sonic_local* lp = netdev_priv(dev); int sr; int commslot = 0; if (once_is_more_than_enough) return -ENODEV; once_is_more_than_enough = 1; if (!MACH_IS_MAC) return -ENODEV; if (macintosh_config->ether_type != MAC_ETHER_SONIC) return -ENODEV; printk(KERN_INFO "Checking for internal Macintosh ethernet (SONIC).. "); /* Bogus probing, on the models which may or may not have Ethernet (BTW, the Ethernet *is* always at the same address, and nothing else lives there, at least if Apple's documentation is to be believed) */ if (macintosh_config->ident == MAC_MODEL_Q630 || macintosh_config->ident == MAC_MODEL_P588 || macintosh_config->ident == MAC_MODEL_P575 || macintosh_config->ident == MAC_MODEL_C610) { unsigned long flags; int card_present; local_irq_save(flags); card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS); local_irq_restore(flags); if (!card_present) { printk("none.\n"); return -ENODEV; } commslot = 1; } printk("yes\n"); /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */ dev->base_addr = ONBOARD_SONIC_REGISTERS; if (via_alt_mapping) dev->irq = IRQ_AUTO_3; else dev->irq = IRQ_NUBUS_9; if (!sonic_version_printed) { printk(KERN_INFO "%s", version); sonic_version_printed = 1; } printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n", lp->device->bus_id, dev->base_addr); /* The PowerBook's SONIC is 16 bit always. */ if (macintosh_config->ident == MAC_MODEL_PB520) { lp->reg_offset = 0; lp->dma_bitmode = SONIC_BITMODE16; sr = SONIC_READ(SONIC_SR); } else if (commslot) { /* Some of the comm-slot cards are 16 bit. But some of them are not. The 32-bit cards use offset 2 and have known revisions, we try reading the revision register at offset 2, if we don't get a known revision we assume 16 bit at offset 0. */ lp->reg_offset = 2; lp->dma_bitmode = SONIC_BITMODE16; sr = SONIC_READ(SONIC_SR); if (sr == 0x0004 || sr == 0x0006 || sr == 0x0100 || sr == 0x0101) /* 83932 is 0x0004 or 0x0006, 83934 is 0x0100 or 0x0101 */ lp->dma_bitmode = SONIC_BITMODE32; else { lp->dma_bitmode = SONIC_BITMODE16; lp->reg_offset = 0; sr = SONIC_READ(SONIC_SR); } } else { /* All onboard cards are at offset 2 with 32 bit DMA. */ lp->reg_offset = 2; lp->dma_bitmode = SONIC_BITMODE32; sr = SONIC_READ(SONIC_SR); } printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", lp->device->bus_id, sr, lp->dma_bitmode?32:16, lp->reg_offset); #if 0 /* This is sometimes useful to find out how MacOS configured the card. */ printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", lp->device->bus_id, SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); #endif /* Software reset, then initialize control registers. */ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); SONIC_WRITE(SONIC_DCR, SONIC_DCR_EXBUS | SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | (lp->dma_bitmode ? SONIC_DCR_DW : 0)); /* This *must* be written back to in order to restore the * extended programmable output bits, as it may not have been * initialised since the hardware reset. */ SONIC_WRITE(SONIC_DCR2, 0); /* Clear *and* disable interrupts to be on the safe side */ SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); /* Now look for the MAC address. */ if (mac_onboard_sonic_ethernet_addr(dev) != 0) return -ENODEV; /* Shared init code */ return macsonic_init(dev); } int __init mac_nubus_sonic_ethernet_addr(struct net_device* dev, unsigned long prom_addr, int id) { int i; for(i = 0; i < 6; i++) dev->dev_addr[i] = SONIC_READ_PROM(i); /* Some of the addresses are bit-reversed */ if (id != MACSONIC_DAYNA) bit_reverse_addr(dev->dev_addr); return 0; } int __init macsonic_ident(struct nubus_dev* ndev) { if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC && ndev->dr_sw == NUBUS_DRSW_SONIC_LC) return MACSONIC_DAYNALINK; if (ndev->dr_hw == NUBUS_DRHW_SONIC && ndev->dr_sw == NUBUS_DRSW_APPLE) { /* There has to be a better way to do this... */ if (strstr(ndev->board->name, "DuoDock")) return MACSONIC_DUODOCK; else return MACSONIC_APPLE; } if (ndev->dr_hw == NUBUS_DRHW_SMC9194 && ndev->dr_sw == NUBUS_DRSW_DAYNA) return MACSONIC_DAYNA; if (ndev->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC && ndev->dr_sw == 0) { /* huh? */ return MACSONIC_APPLE16; } return -1; } int __init mac_nubus_sonic_probe(struct net_device* dev) { static int slots; struct nubus_dev* ndev = NULL; struct sonic_local* lp = netdev_priv(dev); unsigned long base_addr, prom_addr; u16 sonic_dcr; int id = -1; int reg_offset, dma_bitmode; /* Find the first SONIC that hasn't been initialized already */ while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET, ndev)) != NULL) { /* Have we seen it already? */ if (slots & (1<<ndev->board->slot)) continue; slots |= 1<<ndev->board->slot; /* Is it one of ours? */ if ((id = macsonic_ident(ndev)) != -1) break; } if (ndev == NULL) return -ENODEV; switch (id) { case MACSONIC_DUODOCK: base_addr = ndev->board->slot_addr + DUODOCK_SONIC_REGISTERS; prom_addr = ndev->board->slot_addr + DUODOCK_SONIC_PROM_BASE; sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT0 | SONIC_DCR_RFT1 | SONIC_DCR_TFT0; reg_offset = 2; dma_bitmode = SONIC_BITMODE32; break; case MACSONIC_APPLE: base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS; prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE; sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0; reg_offset = 0; dma_bitmode = SONIC_BITMODE32; break; case MACSONIC_APPLE16: base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS; prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE; sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1 | SONIC_DCR_BMS; reg_offset = 0; dma_bitmode = SONIC_BITMODE16; break; case MACSONIC_DAYNALINK: base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS; prom_addr = ndev->board->slot_addr + DAYNALINK_PROM_BASE; sonic_dcr = SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1 | SONIC_DCR_BMS; reg_offset = 0; dma_bitmode = SONIC_BITMODE16; break; case MACSONIC_DAYNA: base_addr = ndev->board->slot_addr + DAYNA_SONIC_REGISTERS; prom_addr = ndev->board->slot_addr + DAYNA_SONIC_MAC_ADDR; sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1; reg_offset = 0; dma_bitmode = SONIC_BITMODE16; break; default: printk(KERN_ERR "macsonic: WTF, id is %d\n", id); return -ENODEV; } /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */ dev->base_addr = base_addr; lp->reg_offset = reg_offset; lp->dma_bitmode = dma_bitmode; dev->irq = SLOT2IRQ(ndev->board->slot); if (!sonic_version_printed) { printk(KERN_INFO "%s", version); sonic_version_printed = 1; } printk(KERN_INFO "%s: %s in slot %X\n", lp->device->bus_id, ndev->board->name, ndev->board->slot); printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", lp->device->bus_id, SONIC_READ(SONIC_SR), dma_bitmode?32:16, reg_offset); #if 0 /* This is sometimes useful to find out how MacOS configured the card. */ printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", lp->device->bus_id, SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); #endif /* Software reset, then initialize control registers. */ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); SONIC_WRITE(SONIC_DCR, sonic_dcr | (dma_bitmode ? SONIC_DCR_DW : 0)); /* This *must* be written back to in order to restore the * extended programmable output bits, since it may not have been * initialised since the hardware reset. */ SONIC_WRITE(SONIC_DCR2, 0); /* Clear *and* disable interrupts to be on the safe side */ SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); /* Now look for the MAC address. */ if (mac_nubus_sonic_ethernet_addr(dev, prom_addr, id) != 0) return -ENODEV; /* Shared init code */ return macsonic_init(dev); } static int __init mac_sonic_probe(struct platform_device *pdev) { struct net_device *dev; struct sonic_local *lp; int err; DECLARE_MAC_BUF(mac); dev = alloc_etherdev(sizeof(struct sonic_local)); if (!dev) return -ENOMEM; lp = netdev_priv(dev); lp->device = &pdev->dev; SET_NETDEV_DEV(dev, &pdev->dev); /* This will catch fatal stuff like -ENOMEM as well as success */ err = mac_onboard_sonic_probe(dev); if (err == 0) goto found; if (err != -ENODEV) goto out; err = mac_nubus_sonic_probe(dev); if (err) goto out; found: err = register_netdev(dev); if (err) goto out; printk("%s: MAC %s IRQ %d\n", dev->name, print_mac(mac, dev->dev_addr), dev->irq); return 0; out: free_netdev(dev); return err; } MODULE_DESCRIPTION("Macintosh SONIC ethernet driver"); module_param(sonic_debug, int, 0); MODULE_PARM_DESC(sonic_debug, "macsonic debug level (1-4)"); #include "sonic.c" static int __devexit mac_sonic_device_remove (struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sonic_local* lp = netdev_priv(dev); unregister_netdev(dev); dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); free_netdev(dev); return 0; } static struct platform_driver mac_sonic_driver = { .probe = mac_sonic_probe, .remove = __devexit_p(mac_sonic_device_remove), .driver = { .name = mac_sonic_string, }, }; static int __init mac_sonic_init_module(void) { int err; if ((err = platform_driver_register(&mac_sonic_driver))) { printk(KERN_ERR "Driver registration failed\n"); return err; } mac_sonic_device = platform_device_alloc(mac_sonic_string, 0); if (!mac_sonic_device) goto out_unregister; if (platform_device_add(mac_sonic_device)) { platform_device_put(mac_sonic_device); mac_sonic_device = NULL; } return 0; out_unregister: platform_driver_unregister(&mac_sonic_driver); return -ENOMEM; } static void __exit mac_sonic_cleanup_module(void) { platform_driver_unregister(&mac_sonic_driver); if (mac_sonic_device) { platform_device_unregister(mac_sonic_device); mac_sonic_device = NULL; } } module_init(mac_sonic_init_module); module_exit(mac_sonic_cleanup_module);
gpl-2.0
HeydayGuan/linux-3.6.7
drivers/rtc/rtc-mc13xxx.c
284
9542
/* * Real Time Clock driver for Freescale MC13XXX PMIC * * (C) 2009 Sascha Hauer, Pengutronix * (C) 2009 Uwe Kleine-Koenig, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/mfd/mc13xxx.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/rtc.h> #define DRIVER_NAME "mc13xxx-rtc" #define MC13XXX_RTCTOD 20 #define MC13XXX_RTCTODA 21 #define MC13XXX_RTCDAY 22 #define MC13XXX_RTCDAYA 23 struct mc13xxx_rtc { struct rtc_device *rtc; struct mc13xxx *mc13xxx; int valid; }; static int mc13xxx_rtc_irq_enable_unlocked(struct device *dev, unsigned int enabled, int irq) { struct mc13xxx_rtc *priv = dev_get_drvdata(dev); int (*func)(struct mc13xxx *mc13xxx, int irq); if (!priv->valid) return -ENODATA; func = enabled ? mc13xxx_irq_unmask : mc13xxx_irq_mask; return func(priv->mc13xxx, irq); } static int mc13xxx_rtc_irq_enable(struct device *dev, unsigned int enabled, int irq) { struct mc13xxx_rtc *priv = dev_get_drvdata(dev); int ret; mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_rtc_irq_enable_unlocked(dev, enabled, irq); mc13xxx_unlock(priv->mc13xxx); return ret; } static int mc13xxx_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct mc13xxx_rtc *priv = dev_get_drvdata(dev); unsigned int seconds, days1, days2; unsigned long s1970; int ret; mc13xxx_lock(priv->mc13xxx); if (!priv->valid) { ret = -ENODATA; goto out; } ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days1); if (unlikely(ret)) goto out; ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTOD, &seconds); if (unlikely(ret)) goto out; ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days2); out: mc13xxx_unlock(priv->mc13xxx); if (ret) return ret; if (days2 == days1 + 1) { if (seconds >= 86400 / 2) days2 = days1; else days1 = days2; } if (days1 != days2) return -EIO; s1970 = days1 * 86400 + seconds; rtc_time_to_tm(s1970, tm); return rtc_valid_tm(tm); } static int mc13xxx_rtc_set_mmss(struct device *dev, unsigned long secs) { struct mc13xxx_rtc *priv = dev_get_drvdata(dev); unsigned int seconds, days; unsigned int alarmseconds; int ret; seconds = secs % 86400; days = secs / 86400; mc13xxx_lock(priv->mc13xxx); /* * temporarily invalidate alarm to prevent triggering it when the day is * already updated while the time isn't yet. */ ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTODA, &alarmseconds); if (unlikely(ret)) goto out; if (alarmseconds < 86400) { ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCTODA, 0x1ffff); if (unlikely(ret)) goto out; } /* * write seconds=0 to prevent a day switch between writing days * and seconds below */ ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCTOD, 0); if (unlikely(ret)) goto out; ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCDAY, days); if (unlikely(ret)) goto out; ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCTOD, seconds); if (unlikely(ret)) goto out; /* restore alarm */ if (alarmseconds < 86400) { ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCTODA, alarmseconds); if (unlikely(ret)) goto out; } ret = mc13xxx_irq_ack(priv->mc13xxx, MC13XXX_IRQ_RTCRST); if (unlikely(ret)) goto out; ret = mc13xxx_irq_unmask(priv->mc13xxx, MC13XXX_IRQ_RTCRST); out: priv->valid = !ret; mc13xxx_unlock(priv->mc13xxx); return ret; } static int mc13xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct mc13xxx_rtc *priv = dev_get_drvdata(dev); unsigned seconds, days; unsigned long s1970; int enabled, pending; int ret; mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTODA, &seconds); if (unlikely(ret)) goto out; if (seconds >= 86400) { ret = -ENODATA; goto out; } ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days); if (unlikely(ret)) goto out; ret = mc13xxx_irq_status(priv->mc13xxx, MC13XXX_IRQ_TODA, &enabled, &pending); out: mc13xxx_unlock(priv->mc13xxx); if (ret) return ret; alarm->enabled = enabled; alarm->pending = pending; s1970 = days * 86400 + seconds; rtc_time_to_tm(s1970, &alarm->time); dev_dbg(dev, "%s: %lu\n", __func__, s1970); return 0; } static int mc13xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct mc13xxx_rtc *priv = dev_get_drvdata(dev); unsigned long s1970; unsigned seconds, days; int ret; mc13xxx_lock(priv->mc13xxx); /* disable alarm to prevent false triggering */ ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCTODA, 0x1ffff); if (unlikely(ret)) goto out; ret = mc13xxx_irq_ack(priv->mc13xxx, MC13XXX_IRQ_TODA); if (unlikely(ret)) goto out; ret = rtc_tm_to_time(&alarm->time, &s1970); if (unlikely(ret)) goto out; dev_dbg(dev, "%s: o%2.s %lu\n", __func__, alarm->enabled ? "n" : "ff", s1970); ret = mc13xxx_rtc_irq_enable_unlocked(dev, alarm->enabled, MC13XXX_IRQ_TODA); if (unlikely(ret)) goto out; seconds = s1970 % 86400; days = s1970 / 86400; ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCDAYA, days); if (unlikely(ret)) goto out; ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCTODA, seconds); out: mc13xxx_unlock(priv->mc13xxx); return ret; } static irqreturn_t mc13xxx_rtc_alarm_handler(int irq, void *dev) { struct mc13xxx_rtc *priv = dev; struct mc13xxx *mc13xxx = priv->mc13xxx; dev_dbg(&priv->rtc->dev, "Alarm\n"); rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_AF); mc13xxx_irq_ack(mc13xxx, irq); return IRQ_HANDLED; } static irqreturn_t mc13xxx_rtc_update_handler(int irq, void *dev) { struct mc13xxx_rtc *priv = dev; struct mc13xxx *mc13xxx = priv->mc13xxx; dev_dbg(&priv->rtc->dev, "1HZ\n"); rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_UF); mc13xxx_irq_ack(mc13xxx, irq); return IRQ_HANDLED; } static int mc13xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { return mc13xxx_rtc_irq_enable(dev, enabled, MC13XXX_IRQ_TODA); } static const struct rtc_class_ops mc13xxx_rtc_ops = { .read_time = mc13xxx_rtc_read_time, .set_mmss = mc13xxx_rtc_set_mmss, .read_alarm = mc13xxx_rtc_read_alarm, .set_alarm = mc13xxx_rtc_set_alarm, .alarm_irq_enable = mc13xxx_rtc_alarm_irq_enable, }; static irqreturn_t mc13xxx_rtc_reset_handler(int irq, void *dev) { struct mc13xxx_rtc *priv = dev; struct mc13xxx *mc13xxx = priv->mc13xxx; dev_dbg(&priv->rtc->dev, "RTCRST\n"); priv->valid = 0; mc13xxx_irq_mask(mc13xxx, irq); return IRQ_HANDLED; } static int __init mc13xxx_rtc_probe(struct platform_device *pdev) { int ret; struct mc13xxx_rtc *priv; struct mc13xxx *mc13xxx; int rtcrst_pending; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; mc13xxx = dev_get_drvdata(pdev->dev.parent); priv->mc13xxx = mc13xxx; platform_set_drvdata(pdev, priv); mc13xxx_lock(mc13xxx); ret = mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_RTCRST, mc13xxx_rtc_reset_handler, DRIVER_NAME, priv); if (ret) goto err_reset_irq_request; ret = mc13xxx_irq_status(mc13xxx, MC13XXX_IRQ_RTCRST, NULL, &rtcrst_pending); if (ret) goto err_reset_irq_status; priv->valid = !rtcrst_pending; ret = mc13xxx_irq_request_nounmask(mc13xxx, MC13XXX_IRQ_1HZ, mc13xxx_rtc_update_handler, DRIVER_NAME, priv); if (ret) goto err_update_irq_request; ret = mc13xxx_irq_request_nounmask(mc13xxx, MC13XXX_IRQ_TODA, mc13xxx_rtc_alarm_handler, DRIVER_NAME, priv); if (ret) goto err_alarm_irq_request; mc13xxx_unlock(mc13xxx); priv->rtc = rtc_device_register(pdev->name, &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE); if (IS_ERR(priv->rtc)) { ret = PTR_ERR(priv->rtc); mc13xxx_lock(mc13xxx); mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv); err_alarm_irq_request: mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_1HZ, priv); err_update_irq_request: err_reset_irq_status: mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv); err_reset_irq_request: mc13xxx_unlock(mc13xxx); platform_set_drvdata(pdev, NULL); kfree(priv); } return ret; } static int __exit mc13xxx_rtc_remove(struct platform_device *pdev) { struct mc13xxx_rtc *priv = platform_get_drvdata(pdev); mc13xxx_lock(priv->mc13xxx); rtc_device_unregister(priv->rtc); mc13xxx_irq_free(priv->mc13xxx, MC13XXX_IRQ_TODA, priv); mc13xxx_irq_free(priv->mc13xxx, MC13XXX_IRQ_1HZ, priv); mc13xxx_irq_free(priv->mc13xxx, MC13XXX_IRQ_RTCRST, priv); mc13xxx_unlock(priv->mc13xxx); platform_set_drvdata(pdev, NULL); kfree(priv); return 0; } static const struct platform_device_id mc13xxx_rtc_idtable[] = { { .name = "mc13783-rtc", }, { .name = "mc13892-rtc", }, { .name = "mc34708-rtc", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, mc13xxx_rtc_idtable); static struct platform_driver mc13xxx_rtc_driver = { .id_table = mc13xxx_rtc_idtable, .remove = __exit_p(mc13xxx_rtc_remove), .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; static int __init mc13xxx_rtc_init(void) { return platform_driver_probe(&mc13xxx_rtc_driver, &mc13xxx_rtc_probe); } module_init(mc13xxx_rtc_init); static void __exit mc13xxx_rtc_exit(void) { platform_driver_unregister(&mc13xxx_rtc_driver); } module_exit(mc13xxx_rtc_exit); MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); MODULE_DESCRIPTION("RTC driver for Freescale MC13XXX PMIC"); MODULE_LICENSE("GPL v2");
gpl-2.0
Victor-android/kernel_huawei
sound/soc/codecs/wm8900.c
540
40800
/* * wm8900.c -- WM8900 ALSA Soc Audio driver * * Copyright 2007, 2008 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * TODO: * - Tristating. * - TDM. * - Jack detect. * - FLL source configuration, currently only MCLK is supported. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/initval.h> #include <sound/tlv.h> #include "wm8900.h" /* WM8900 register space */ #define WM8900_REG_RESET 0x0 #define WM8900_REG_ID 0x0 #define WM8900_REG_POWER1 0x1 #define WM8900_REG_POWER2 0x2 #define WM8900_REG_POWER3 0x3 #define WM8900_REG_AUDIO1 0x4 #define WM8900_REG_AUDIO2 0x5 #define WM8900_REG_CLOCKING1 0x6 #define WM8900_REG_CLOCKING2 0x7 #define WM8900_REG_AUDIO3 0x8 #define WM8900_REG_AUDIO4 0x9 #define WM8900_REG_DACCTRL 0xa #define WM8900_REG_LDAC_DV 0xb #define WM8900_REG_RDAC_DV 0xc #define WM8900_REG_SIDETONE 0xd #define WM8900_REG_ADCCTRL 0xe #define WM8900_REG_LADC_DV 0xf #define WM8900_REG_RADC_DV 0x10 #define WM8900_REG_GPIO 0x12 #define WM8900_REG_INCTL 0x15 #define WM8900_REG_LINVOL 0x16 #define WM8900_REG_RINVOL 0x17 #define WM8900_REG_INBOOSTMIX1 0x18 #define WM8900_REG_INBOOSTMIX2 0x19 #define WM8900_REG_ADCPATH 0x1a #define WM8900_REG_AUXBOOST 0x1b #define WM8900_REG_ADDCTL 0x1e #define WM8900_REG_FLLCTL1 0x24 #define WM8900_REG_FLLCTL2 0x25 #define WM8900_REG_FLLCTL3 0x26 #define WM8900_REG_FLLCTL4 0x27 #define WM8900_REG_FLLCTL5 0x28 #define WM8900_REG_FLLCTL6 0x29 #define WM8900_REG_LOUTMIXCTL1 0x2c #define WM8900_REG_ROUTMIXCTL1 0x2d #define WM8900_REG_BYPASS1 0x2e #define WM8900_REG_BYPASS2 0x2f #define WM8900_REG_AUXOUT_CTL 0x30 #define WM8900_REG_LOUT1CTL 0x33 #define WM8900_REG_ROUT1CTL 0x34 #define WM8900_REG_LOUT2CTL 0x35 #define WM8900_REG_ROUT2CTL 0x36 #define WM8900_REG_HPCTL1 0x3a #define WM8900_REG_OUTBIASCTL 0x73 #define WM8900_MAXREG 0x80 #define WM8900_REG_ADDCTL_OUT1_DIS 0x80 #define WM8900_REG_ADDCTL_OUT2_DIS 0x40 #define WM8900_REG_ADDCTL_VMID_DIS 0x20 #define WM8900_REG_ADDCTL_BIAS_SRC 0x10 #define WM8900_REG_ADDCTL_VMID_SOFTST 0x04 #define WM8900_REG_ADDCTL_TEMP_SD 0x02 #define WM8900_REG_GPIO_TEMP_ENA 0x2 #define WM8900_REG_POWER1_STARTUP_BIAS_ENA 0x0100 #define WM8900_REG_POWER1_BIAS_ENA 0x0008 #define WM8900_REG_POWER1_VMID_BUF_ENA 0x0004 #define WM8900_REG_POWER1_FLL_ENA 0x0040 #define WM8900_REG_POWER2_SYSCLK_ENA 0x8000 #define WM8900_REG_POWER2_ADCL_ENA 0x0002 #define WM8900_REG_POWER2_ADCR_ENA 0x0001 #define WM8900_REG_POWER3_DACL_ENA 0x0002 #define WM8900_REG_POWER3_DACR_ENA 0x0001 #define WM8900_REG_AUDIO1_AIF_FMT_MASK 0x0018 #define WM8900_REG_AUDIO1_LRCLK_INV 0x0080 #define WM8900_REG_AUDIO1_BCLK_INV 0x0100 #define WM8900_REG_CLOCKING1_BCLK_DIR 0x1 #define WM8900_REG_CLOCKING1_MCLK_SRC 0x100 #define WM8900_REG_CLOCKING1_BCLK_MASK (~0x01e) #define WM8900_REG_CLOCKING1_OPCLK_MASK (~0x7000) #define WM8900_REG_CLOCKING2_ADC_CLKDIV 0xe0 #define WM8900_REG_CLOCKING2_DAC_CLKDIV 0x1c #define WM8900_REG_DACCTRL_MUTE 0x004 #define WM8900_REG_DACCTRL_DAC_SB_FILT 0x100 #define WM8900_REG_DACCTRL_AIF_LRCLKRATE 0x400 #define WM8900_REG_AUDIO3_ADCLRC_DIR 0x0800 #define WM8900_REG_AUDIO4_DACLRC_DIR 0x0800 #define WM8900_REG_FLLCTL1_OSC_ENA 0x100 #define WM8900_REG_FLLCTL6_FLL_SLOW_LOCK_REF 0x100 #define WM8900_REG_HPCTL1_HP_IPSTAGE_ENA 0x80 #define WM8900_REG_HPCTL1_HP_OPSTAGE_ENA 0x40 #define WM8900_REG_HPCTL1_HP_CLAMP_IP 0x20 #define WM8900_REG_HPCTL1_HP_CLAMP_OP 0x10 #define WM8900_REG_HPCTL1_HP_SHORT 0x08 #define WM8900_REG_HPCTL1_HP_SHORT2 0x04 #define WM8900_LRC_MASK 0xfc00 struct snd_soc_codec_device soc_codec_dev_wm8900; struct wm8900_priv { struct snd_soc_codec codec; u16 reg_cache[WM8900_MAXREG]; u32 fll_in; /* FLL input frequency */ u32 fll_out; /* FLL output frequency */ }; /* * wm8900 register cache. We can't read the entire register space and we * have slow control buses so we cache the registers. */ static const u16 wm8900_reg_defaults[WM8900_MAXREG] = { 0x8900, 0x0000, 0xc000, 0x0000, 0x4050, 0x4000, 0x0008, 0x0000, 0x0040, 0x0040, 0x1004, 0x00c0, 0x00c0, 0x0000, 0x0100, 0x00c0, 0x00c0, 0x0000, 0xb001, 0x0000, 0x0000, 0x0044, 0x004c, 0x004c, 0x0044, 0x0044, 0x0000, 0x0044, 0x0000, 0x0000, 0x0002, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0008, 0x0000, 0x0000, 0x0008, 0x0097, 0x0100, 0x0000, 0x0000, 0x0050, 0x0050, 0x0055, 0x0055, 0x0055, 0x0000, 0x0000, 0x0079, 0x0079, 0x0079, 0x0079, 0x0000, /* Remaining registers all zero */ }; static int wm8900_volatile_register(unsigned int reg) { switch (reg) { case WM8900_REG_ID: case WM8900_REG_POWER1: return 1; default: return 0; } } static void wm8900_reset(struct snd_soc_codec *codec) { snd_soc_write(codec, WM8900_REG_RESET, 0); memcpy(codec->reg_cache, wm8900_reg_defaults, sizeof(wm8900_reg_defaults)); } static int wm8900_hp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; u16 hpctl1 = snd_soc_read(codec, WM8900_REG_HPCTL1); switch (event) { case SND_SOC_DAPM_PRE_PMU: /* Clamp headphone outputs */ hpctl1 = WM8900_REG_HPCTL1_HP_CLAMP_IP | WM8900_REG_HPCTL1_HP_CLAMP_OP; snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1); break; case SND_SOC_DAPM_POST_PMU: /* Enable the input stage */ hpctl1 &= ~WM8900_REG_HPCTL1_HP_CLAMP_IP; hpctl1 |= WM8900_REG_HPCTL1_HP_SHORT | WM8900_REG_HPCTL1_HP_SHORT2 | WM8900_REG_HPCTL1_HP_IPSTAGE_ENA; snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1); msleep(400); /* Enable the output stage */ hpctl1 &= ~WM8900_REG_HPCTL1_HP_CLAMP_OP; hpctl1 |= WM8900_REG_HPCTL1_HP_OPSTAGE_ENA; snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1); /* Remove the shorts */ hpctl1 &= ~WM8900_REG_HPCTL1_HP_SHORT2; snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1); hpctl1 &= ~WM8900_REG_HPCTL1_HP_SHORT; snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1); break; case SND_SOC_DAPM_PRE_PMD: /* Short the output */ hpctl1 |= WM8900_REG_HPCTL1_HP_SHORT; snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1); /* Disable the output stage */ hpctl1 &= ~WM8900_REG_HPCTL1_HP_OPSTAGE_ENA; snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1); /* Clamp the outputs and power down input */ hpctl1 |= WM8900_REG_HPCTL1_HP_CLAMP_IP | WM8900_REG_HPCTL1_HP_CLAMP_OP; hpctl1 &= ~WM8900_REG_HPCTL1_HP_IPSTAGE_ENA; snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1); break; case SND_SOC_DAPM_POST_PMD: /* Disable everything */ snd_soc_write(codec, WM8900_REG_HPCTL1, 0); break; default: BUG(); } return 0; } static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -5700, 100, 0); static const DECLARE_TLV_DB_SCALE(out_mix_tlv, -1500, 300, 0); static const DECLARE_TLV_DB_SCALE(in_boost_tlv, -1200, 600, 0); static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1200, 100, 0); static const DECLARE_TLV_DB_SCALE(dac_boost_tlv, 0, 600, 0); static const DECLARE_TLV_DB_SCALE(dac_tlv, -7200, 75, 1); static const DECLARE_TLV_DB_SCALE(adc_svol_tlv, -3600, 300, 0); static const DECLARE_TLV_DB_SCALE(adc_tlv, -7200, 75, 1); static const char *mic_bias_level_txt[] = { "0.9*AVDD", "0.65*AVDD" }; static const struct soc_enum mic_bias_level = SOC_ENUM_SINGLE(WM8900_REG_INCTL, 8, 2, mic_bias_level_txt); static const char *dac_mute_rate_txt[] = { "Fast", "Slow" }; static const struct soc_enum dac_mute_rate = SOC_ENUM_SINGLE(WM8900_REG_DACCTRL, 7, 2, dac_mute_rate_txt); static const char *dac_deemphasis_txt[] = { "Disabled", "32kHz", "44.1kHz", "48kHz" }; static const struct soc_enum dac_deemphasis = SOC_ENUM_SINGLE(WM8900_REG_DACCTRL, 4, 4, dac_deemphasis_txt); static const char *adc_hpf_cut_txt[] = { "Hi-fi mode", "Voice mode 1", "Voice mode 2", "Voice mode 3" }; static const struct soc_enum adc_hpf_cut = SOC_ENUM_SINGLE(WM8900_REG_ADCCTRL, 5, 4, adc_hpf_cut_txt); static const char *lr_txt[] = { "Left", "Right" }; static const struct soc_enum aifl_src = SOC_ENUM_SINGLE(WM8900_REG_AUDIO1, 15, 2, lr_txt); static const struct soc_enum aifr_src = SOC_ENUM_SINGLE(WM8900_REG_AUDIO1, 14, 2, lr_txt); static const struct soc_enum dacl_src = SOC_ENUM_SINGLE(WM8900_REG_AUDIO2, 15, 2, lr_txt); static const struct soc_enum dacr_src = SOC_ENUM_SINGLE(WM8900_REG_AUDIO2, 14, 2, lr_txt); static const char *sidetone_txt[] = { "Disabled", "Left ADC", "Right ADC" }; static const struct soc_enum dacl_sidetone = SOC_ENUM_SINGLE(WM8900_REG_SIDETONE, 2, 3, sidetone_txt); static const struct soc_enum dacr_sidetone = SOC_ENUM_SINGLE(WM8900_REG_SIDETONE, 0, 3, sidetone_txt); static const struct snd_kcontrol_new wm8900_snd_controls[] = { SOC_ENUM("Mic Bias Level", mic_bias_level), SOC_SINGLE_TLV("Left Input PGA Volume", WM8900_REG_LINVOL, 0, 31, 0, in_pga_tlv), SOC_SINGLE("Left Input PGA Switch", WM8900_REG_LINVOL, 6, 1, 1), SOC_SINGLE("Left Input PGA ZC Switch", WM8900_REG_LINVOL, 7, 1, 0), SOC_SINGLE_TLV("Right Input PGA Volume", WM8900_REG_RINVOL, 0, 31, 0, in_pga_tlv), SOC_SINGLE("Right Input PGA Switch", WM8900_REG_RINVOL, 6, 1, 1), SOC_SINGLE("Right Input PGA ZC Switch", WM8900_REG_RINVOL, 7, 1, 0), SOC_SINGLE("DAC Soft Mute Switch", WM8900_REG_DACCTRL, 6, 1, 1), SOC_ENUM("DAC Mute Rate", dac_mute_rate), SOC_SINGLE("DAC Mono Switch", WM8900_REG_DACCTRL, 9, 1, 0), SOC_ENUM("DAC Deemphasis", dac_deemphasis), SOC_SINGLE("DAC Sigma-Delta Modulator Clock Switch", WM8900_REG_DACCTRL, 12, 1, 0), SOC_SINGLE("ADC HPF Switch", WM8900_REG_ADCCTRL, 8, 1, 0), SOC_ENUM("ADC HPF Cut-Off", adc_hpf_cut), SOC_DOUBLE("ADC Invert Switch", WM8900_REG_ADCCTRL, 1, 0, 1, 0), SOC_SINGLE_TLV("Left ADC Sidetone Volume", WM8900_REG_SIDETONE, 9, 12, 0, adc_svol_tlv), SOC_SINGLE_TLV("Right ADC Sidetone Volume", WM8900_REG_SIDETONE, 5, 12, 0, adc_svol_tlv), SOC_ENUM("Left Digital Audio Source", aifl_src), SOC_ENUM("Right Digital Audio Source", aifr_src), SOC_SINGLE_TLV("DAC Input Boost Volume", WM8900_REG_AUDIO2, 10, 4, 0, dac_boost_tlv), SOC_ENUM("Left DAC Source", dacl_src), SOC_ENUM("Right DAC Source", dacr_src), SOC_ENUM("Left DAC Sidetone", dacl_sidetone), SOC_ENUM("Right DAC Sidetone", dacr_sidetone), SOC_DOUBLE("DAC Invert Switch", WM8900_REG_DACCTRL, 1, 0, 1, 0), SOC_DOUBLE_R_TLV("Digital Playback Volume", WM8900_REG_LDAC_DV, WM8900_REG_RDAC_DV, 1, 96, 0, dac_tlv), SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8900_REG_LADC_DV, WM8900_REG_RADC_DV, 1, 119, 0, adc_tlv), SOC_SINGLE_TLV("LINPUT3 Bypass Volume", WM8900_REG_LOUTMIXCTL1, 4, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("RINPUT3 Bypass Volume", WM8900_REG_ROUTMIXCTL1, 4, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("Left AUX Bypass Volume", WM8900_REG_AUXOUT_CTL, 4, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("Right AUX Bypass Volume", WM8900_REG_AUXOUT_CTL, 0, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("LeftIn to RightOut Mixer Volume", WM8900_REG_BYPASS1, 0, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("LeftIn to LeftOut Mixer Volume", WM8900_REG_BYPASS1, 4, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("RightIn to LeftOut Mixer Volume", WM8900_REG_BYPASS2, 0, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("RightIn to RightOut Mixer Volume", WM8900_REG_BYPASS2, 4, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("IN2L Boost Volume", WM8900_REG_INBOOSTMIX1, 0, 3, 0, in_boost_tlv), SOC_SINGLE_TLV("IN3L Boost Volume", WM8900_REG_INBOOSTMIX1, 4, 3, 0, in_boost_tlv), SOC_SINGLE_TLV("IN2R Boost Volume", WM8900_REG_INBOOSTMIX2, 0, 3, 0, in_boost_tlv), SOC_SINGLE_TLV("IN3R Boost Volume", WM8900_REG_INBOOSTMIX2, 4, 3, 0, in_boost_tlv), SOC_SINGLE_TLV("Left AUX Boost Volume", WM8900_REG_AUXBOOST, 4, 3, 0, in_boost_tlv), SOC_SINGLE_TLV("Right AUX Boost Volume", WM8900_REG_AUXBOOST, 0, 3, 0, in_boost_tlv), SOC_DOUBLE_R_TLV("LINEOUT1 Volume", WM8900_REG_LOUT1CTL, WM8900_REG_ROUT1CTL, 0, 63, 0, out_pga_tlv), SOC_DOUBLE_R("LINEOUT1 Switch", WM8900_REG_LOUT1CTL, WM8900_REG_ROUT1CTL, 6, 1, 1), SOC_DOUBLE_R("LINEOUT1 ZC Switch", WM8900_REG_LOUT1CTL, WM8900_REG_ROUT1CTL, 7, 1, 0), SOC_DOUBLE_R_TLV("LINEOUT2 Volume", WM8900_REG_LOUT2CTL, WM8900_REG_ROUT2CTL, 0, 63, 0, out_pga_tlv), SOC_DOUBLE_R("LINEOUT2 Switch", WM8900_REG_LOUT2CTL, WM8900_REG_ROUT2CTL, 6, 1, 1), SOC_DOUBLE_R("LINEOUT2 ZC Switch", WM8900_REG_LOUT2CTL, WM8900_REG_ROUT2CTL, 7, 1, 0), SOC_SINGLE("LINEOUT2 LP -12dB", WM8900_REG_LOUTMIXCTL1, 0, 1, 1), }; static const struct snd_kcontrol_new wm8900_dapm_loutput2_control = SOC_DAPM_SINGLE("LINEOUT2L Switch", WM8900_REG_POWER3, 6, 1, 0); static const struct snd_kcontrol_new wm8900_dapm_routput2_control = SOC_DAPM_SINGLE("LINEOUT2R Switch", WM8900_REG_POWER3, 5, 1, 0); static const struct snd_kcontrol_new wm8900_loutmix_controls[] = { SOC_DAPM_SINGLE("LINPUT3 Bypass Switch", WM8900_REG_LOUTMIXCTL1, 7, 1, 0), SOC_DAPM_SINGLE("AUX Bypass Switch", WM8900_REG_AUXOUT_CTL, 7, 1, 0), SOC_DAPM_SINGLE("Left Input Mixer Switch", WM8900_REG_BYPASS1, 7, 1, 0), SOC_DAPM_SINGLE("Right Input Mixer Switch", WM8900_REG_BYPASS2, 3, 1, 0), SOC_DAPM_SINGLE("DACL Switch", WM8900_REG_LOUTMIXCTL1, 8, 1, 0), }; static const struct snd_kcontrol_new wm8900_routmix_controls[] = { SOC_DAPM_SINGLE("RINPUT3 Bypass Switch", WM8900_REG_ROUTMIXCTL1, 7, 1, 0), SOC_DAPM_SINGLE("AUX Bypass Switch", WM8900_REG_AUXOUT_CTL, 3, 1, 0), SOC_DAPM_SINGLE("Left Input Mixer Switch", WM8900_REG_BYPASS1, 3, 1, 0), SOC_DAPM_SINGLE("Right Input Mixer Switch", WM8900_REG_BYPASS2, 7, 1, 0), SOC_DAPM_SINGLE("DACR Switch", WM8900_REG_ROUTMIXCTL1, 8, 1, 0), }; static const struct snd_kcontrol_new wm8900_linmix_controls[] = { SOC_DAPM_SINGLE("LINPUT2 Switch", WM8900_REG_INBOOSTMIX1, 2, 1, 1), SOC_DAPM_SINGLE("LINPUT3 Switch", WM8900_REG_INBOOSTMIX1, 6, 1, 1), SOC_DAPM_SINGLE("AUX Switch", WM8900_REG_AUXBOOST, 6, 1, 1), SOC_DAPM_SINGLE("Input PGA Switch", WM8900_REG_ADCPATH, 6, 1, 0), }; static const struct snd_kcontrol_new wm8900_rinmix_controls[] = { SOC_DAPM_SINGLE("RINPUT2 Switch", WM8900_REG_INBOOSTMIX2, 2, 1, 1), SOC_DAPM_SINGLE("RINPUT3 Switch", WM8900_REG_INBOOSTMIX2, 6, 1, 1), SOC_DAPM_SINGLE("AUX Switch", WM8900_REG_AUXBOOST, 2, 1, 1), SOC_DAPM_SINGLE("Input PGA Switch", WM8900_REG_ADCPATH, 2, 1, 0), }; static const struct snd_kcontrol_new wm8900_linpga_controls[] = { SOC_DAPM_SINGLE("LINPUT1 Switch", WM8900_REG_INCTL, 6, 1, 0), SOC_DAPM_SINGLE("LINPUT2 Switch", WM8900_REG_INCTL, 5, 1, 0), SOC_DAPM_SINGLE("LINPUT3 Switch", WM8900_REG_INCTL, 4, 1, 0), }; static const struct snd_kcontrol_new wm8900_rinpga_controls[] = { SOC_DAPM_SINGLE("RINPUT1 Switch", WM8900_REG_INCTL, 2, 1, 0), SOC_DAPM_SINGLE("RINPUT2 Switch", WM8900_REG_INCTL, 1, 1, 0), SOC_DAPM_SINGLE("RINPUT3 Switch", WM8900_REG_INCTL, 0, 1, 0), }; static const char *wm9700_lp_mux[] = { "Disabled", "Enabled" }; static const struct soc_enum wm8900_lineout2_lp_mux = SOC_ENUM_SINGLE(WM8900_REG_LOUTMIXCTL1, 1, 2, wm9700_lp_mux); static const struct snd_kcontrol_new wm8900_lineout2_lp = SOC_DAPM_ENUM("Route", wm8900_lineout2_lp_mux); static const struct snd_soc_dapm_widget wm8900_dapm_widgets[] = { /* Externally visible pins */ SND_SOC_DAPM_OUTPUT("LINEOUT1L"), SND_SOC_DAPM_OUTPUT("LINEOUT1R"), SND_SOC_DAPM_OUTPUT("LINEOUT2L"), SND_SOC_DAPM_OUTPUT("LINEOUT2R"), SND_SOC_DAPM_OUTPUT("HP_L"), SND_SOC_DAPM_OUTPUT("HP_R"), SND_SOC_DAPM_INPUT("RINPUT1"), SND_SOC_DAPM_INPUT("LINPUT1"), SND_SOC_DAPM_INPUT("RINPUT2"), SND_SOC_DAPM_INPUT("LINPUT2"), SND_SOC_DAPM_INPUT("RINPUT3"), SND_SOC_DAPM_INPUT("LINPUT3"), SND_SOC_DAPM_INPUT("AUX"), SND_SOC_DAPM_VMID("VMID"), /* Input */ SND_SOC_DAPM_MIXER("Left Input PGA", WM8900_REG_POWER2, 3, 0, wm8900_linpga_controls, ARRAY_SIZE(wm8900_linpga_controls)), SND_SOC_DAPM_MIXER("Right Input PGA", WM8900_REG_POWER2, 2, 0, wm8900_rinpga_controls, ARRAY_SIZE(wm8900_rinpga_controls)), SND_SOC_DAPM_MIXER("Left Input Mixer", WM8900_REG_POWER2, 5, 0, wm8900_linmix_controls, ARRAY_SIZE(wm8900_linmix_controls)), SND_SOC_DAPM_MIXER("Right Input Mixer", WM8900_REG_POWER2, 4, 0, wm8900_rinmix_controls, ARRAY_SIZE(wm8900_rinmix_controls)), SND_SOC_DAPM_MICBIAS("Mic Bias", WM8900_REG_POWER1, 4, 0), SND_SOC_DAPM_ADC("ADCL", "Left HiFi Capture", WM8900_REG_POWER2, 1, 0), SND_SOC_DAPM_ADC("ADCR", "Right HiFi Capture", WM8900_REG_POWER2, 0, 0), /* Output */ SND_SOC_DAPM_DAC("DACL", "Left HiFi Playback", WM8900_REG_POWER3, 1, 0), SND_SOC_DAPM_DAC("DACR", "Right HiFi Playback", WM8900_REG_POWER3, 0, 0), SND_SOC_DAPM_PGA_E("Headphone Amplifier", WM8900_REG_POWER3, 7, 0, NULL, 0, wm8900_hp_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_PGA("LINEOUT1L PGA", WM8900_REG_POWER2, 8, 0, NULL, 0), SND_SOC_DAPM_PGA("LINEOUT1R PGA", WM8900_REG_POWER2, 7, 0, NULL, 0), SND_SOC_DAPM_MUX("LINEOUT2 LP", SND_SOC_NOPM, 0, 0, &wm8900_lineout2_lp), SND_SOC_DAPM_PGA("LINEOUT2L PGA", WM8900_REG_POWER3, 6, 0, NULL, 0), SND_SOC_DAPM_PGA("LINEOUT2R PGA", WM8900_REG_POWER3, 5, 0, NULL, 0), SND_SOC_DAPM_MIXER("Left Output Mixer", WM8900_REG_POWER3, 3, 0, wm8900_loutmix_controls, ARRAY_SIZE(wm8900_loutmix_controls)), SND_SOC_DAPM_MIXER("Right Output Mixer", WM8900_REG_POWER3, 2, 0, wm8900_routmix_controls, ARRAY_SIZE(wm8900_routmix_controls)), }; /* Target, Path, Source */ static const struct snd_soc_dapm_route audio_map[] = { /* Inputs */ {"Left Input PGA", "LINPUT1 Switch", "LINPUT1"}, {"Left Input PGA", "LINPUT2 Switch", "LINPUT2"}, {"Left Input PGA", "LINPUT3 Switch", "LINPUT3"}, {"Right Input PGA", "RINPUT1 Switch", "RINPUT1"}, {"Right Input PGA", "RINPUT2 Switch", "RINPUT2"}, {"Right Input PGA", "RINPUT3 Switch", "RINPUT3"}, {"Left Input Mixer", "LINPUT2 Switch", "LINPUT2"}, {"Left Input Mixer", "LINPUT3 Switch", "LINPUT3"}, {"Left Input Mixer", "AUX Switch", "AUX"}, {"Left Input Mixer", "Input PGA Switch", "Left Input PGA"}, {"Right Input Mixer", "RINPUT2 Switch", "RINPUT2"}, {"Right Input Mixer", "RINPUT3 Switch", "RINPUT3"}, {"Right Input Mixer", "AUX Switch", "AUX"}, {"Right Input Mixer", "Input PGA Switch", "Right Input PGA"}, {"ADCL", NULL, "Left Input Mixer"}, {"ADCR", NULL, "Right Input Mixer"}, /* Outputs */ {"LINEOUT1L", NULL, "LINEOUT1L PGA"}, {"LINEOUT1L PGA", NULL, "Left Output Mixer"}, {"LINEOUT1R", NULL, "LINEOUT1R PGA"}, {"LINEOUT1R PGA", NULL, "Right Output Mixer"}, {"LINEOUT2L PGA", NULL, "Left Output Mixer"}, {"LINEOUT2 LP", "Disabled", "LINEOUT2L PGA"}, {"LINEOUT2 LP", "Enabled", "Left Output Mixer"}, {"LINEOUT2L", NULL, "LINEOUT2 LP"}, {"LINEOUT2R PGA", NULL, "Right Output Mixer"}, {"LINEOUT2 LP", "Disabled", "LINEOUT2R PGA"}, {"LINEOUT2 LP", "Enabled", "Right Output Mixer"}, {"LINEOUT2R", NULL, "LINEOUT2 LP"}, {"Left Output Mixer", "LINPUT3 Bypass Switch", "LINPUT3"}, {"Left Output Mixer", "AUX Bypass Switch", "AUX"}, {"Left Output Mixer", "Left Input Mixer Switch", "Left Input Mixer"}, {"Left Output Mixer", "Right Input Mixer Switch", "Right Input Mixer"}, {"Left Output Mixer", "DACL Switch", "DACL"}, {"Right Output Mixer", "RINPUT3 Bypass Switch", "RINPUT3"}, {"Right Output Mixer", "AUX Bypass Switch", "AUX"}, {"Right Output Mixer", "Left Input Mixer Switch", "Left Input Mixer"}, {"Right Output Mixer", "Right Input Mixer Switch", "Right Input Mixer"}, {"Right Output Mixer", "DACR Switch", "DACR"}, /* Note that the headphone output stage needs to be connected * externally to LINEOUT2 via DC blocking capacitors. Other * configurations are not supported. * * Note also that left and right headphone paths are treated as a * mono path. */ {"Headphone Amplifier", NULL, "LINEOUT2 LP"}, {"Headphone Amplifier", NULL, "LINEOUT2 LP"}, {"HP_L", NULL, "Headphone Amplifier"}, {"HP_R", NULL, "Headphone Amplifier"}, }; static int wm8900_add_widgets(struct snd_soc_codec *codec) { snd_soc_dapm_new_controls(codec, wm8900_dapm_widgets, ARRAY_SIZE(wm8900_dapm_widgets)); snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); return 0; } static int wm8900_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_device *socdev = rtd->socdev; struct snd_soc_codec *codec = socdev->card->codec; u16 reg; reg = snd_soc_read(codec, WM8900_REG_AUDIO1) & ~0x60; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: reg |= 0x20; break; case SNDRV_PCM_FORMAT_S24_LE: reg |= 0x40; break; case SNDRV_PCM_FORMAT_S32_LE: reg |= 0x60; break; default: return -EINVAL; } snd_soc_write(codec, WM8900_REG_AUDIO1, reg); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { reg = snd_soc_read(codec, WM8900_REG_DACCTRL); if (params_rate(params) <= 24000) reg |= WM8900_REG_DACCTRL_DAC_SB_FILT; else reg &= ~WM8900_REG_DACCTRL_DAC_SB_FILT; snd_soc_write(codec, WM8900_REG_DACCTRL, reg); } return 0; } /* FLL divisors */ struct _fll_div { u16 fll_ratio; u16 fllclk_div; u16 fll_slow_lock_ref; u16 n; u16 k; }; /* The size in bits of the FLL divide multiplied by 10 * to allow rounding later */ #define FIXED_FLL_SIZE ((1 << 16) * 10) static int fll_factors(struct _fll_div *fll_div, unsigned int Fref, unsigned int Fout) { u64 Kpart; unsigned int K, Ndiv, Nmod, target; unsigned int div; BUG_ON(!Fout); /* The FLL must run at 90-100MHz which is then scaled down to * the output value by FLLCLK_DIV. */ target = Fout; div = 1; while (target < 90000000) { div *= 2; target *= 2; } if (target > 100000000) printk(KERN_WARNING "wm8900: FLL rate %u out of range, Fref=%u" " Fout=%u\n", target, Fref, Fout); if (div > 32) { printk(KERN_ERR "wm8900: Invalid FLL division rate %u, " "Fref=%u, Fout=%u, target=%u\n", div, Fref, Fout, target); return -EINVAL; } fll_div->fllclk_div = div >> 2; if (Fref < 48000) fll_div->fll_slow_lock_ref = 1; else fll_div->fll_slow_lock_ref = 0; Ndiv = target / Fref; if (Fref < 1000000) fll_div->fll_ratio = 8; else fll_div->fll_ratio = 1; fll_div->n = Ndiv / fll_div->fll_ratio; Nmod = (target / fll_div->fll_ratio) % Fref; /* Calculate fractional part - scale up so we can round. */ Kpart = FIXED_FLL_SIZE * (long long)Nmod; do_div(Kpart, Fref); K = Kpart & 0xFFFFFFFF; if ((K % 10) >= 5) K += 5; /* Move down to proper range now rounding is done */ fll_div->k = K / 10; BUG_ON(target != Fout * (fll_div->fllclk_div << 2)); BUG_ON(!K && target != Fref * fll_div->fll_ratio * fll_div->n); return 0; } static int wm8900_set_fll(struct snd_soc_codec *codec, int fll_id, unsigned int freq_in, unsigned int freq_out) { struct wm8900_priv *wm8900 = snd_soc_codec_get_drvdata(codec); struct _fll_div fll_div; unsigned int reg; if (wm8900->fll_in == freq_in && wm8900->fll_out == freq_out) return 0; /* The digital side should be disabled during any change. */ reg = snd_soc_read(codec, WM8900_REG_POWER1); snd_soc_write(codec, WM8900_REG_POWER1, reg & (~WM8900_REG_POWER1_FLL_ENA)); /* Disable the FLL? */ if (!freq_in || !freq_out) { reg = snd_soc_read(codec, WM8900_REG_CLOCKING1); snd_soc_write(codec, WM8900_REG_CLOCKING1, reg & (~WM8900_REG_CLOCKING1_MCLK_SRC)); reg = snd_soc_read(codec, WM8900_REG_FLLCTL1); snd_soc_write(codec, WM8900_REG_FLLCTL1, reg & (~WM8900_REG_FLLCTL1_OSC_ENA)); wm8900->fll_in = freq_in; wm8900->fll_out = freq_out; return 0; } if (fll_factors(&fll_div, freq_in, freq_out) != 0) goto reenable; wm8900->fll_in = freq_in; wm8900->fll_out = freq_out; /* The osclilator *MUST* be enabled before we enable the * digital circuit. */ snd_soc_write(codec, WM8900_REG_FLLCTL1, fll_div.fll_ratio | WM8900_REG_FLLCTL1_OSC_ENA); snd_soc_write(codec, WM8900_REG_FLLCTL4, fll_div.n >> 5); snd_soc_write(codec, WM8900_REG_FLLCTL5, (fll_div.fllclk_div << 6) | (fll_div.n & 0x1f)); if (fll_div.k) { snd_soc_write(codec, WM8900_REG_FLLCTL2, (fll_div.k >> 8) | 0x100); snd_soc_write(codec, WM8900_REG_FLLCTL3, fll_div.k & 0xff); } else snd_soc_write(codec, WM8900_REG_FLLCTL2, 0); if (fll_div.fll_slow_lock_ref) snd_soc_write(codec, WM8900_REG_FLLCTL6, WM8900_REG_FLLCTL6_FLL_SLOW_LOCK_REF); else snd_soc_write(codec, WM8900_REG_FLLCTL6, 0); reg = snd_soc_read(codec, WM8900_REG_POWER1); snd_soc_write(codec, WM8900_REG_POWER1, reg | WM8900_REG_POWER1_FLL_ENA); reenable: reg = snd_soc_read(codec, WM8900_REG_CLOCKING1); snd_soc_write(codec, WM8900_REG_CLOCKING1, reg | WM8900_REG_CLOCKING1_MCLK_SRC); return 0; } static int wm8900_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { return wm8900_set_fll(codec_dai->codec, pll_id, freq_in, freq_out); } static int wm8900_set_dai_clkdiv(struct snd_soc_dai *codec_dai, int div_id, int div) { struct snd_soc_codec *codec = codec_dai->codec; unsigned int reg; switch (div_id) { case WM8900_BCLK_DIV: reg = snd_soc_read(codec, WM8900_REG_CLOCKING1); snd_soc_write(codec, WM8900_REG_CLOCKING1, div | (reg & WM8900_REG_CLOCKING1_BCLK_MASK)); break; case WM8900_OPCLK_DIV: reg = snd_soc_read(codec, WM8900_REG_CLOCKING1); snd_soc_write(codec, WM8900_REG_CLOCKING1, div | (reg & WM8900_REG_CLOCKING1_OPCLK_MASK)); break; case WM8900_DAC_LRCLK: reg = snd_soc_read(codec, WM8900_REG_AUDIO4); snd_soc_write(codec, WM8900_REG_AUDIO4, div | (reg & WM8900_LRC_MASK)); break; case WM8900_ADC_LRCLK: reg = snd_soc_read(codec, WM8900_REG_AUDIO3); snd_soc_write(codec, WM8900_REG_AUDIO3, div | (reg & WM8900_LRC_MASK)); break; case WM8900_DAC_CLKDIV: reg = snd_soc_read(codec, WM8900_REG_CLOCKING2); snd_soc_write(codec, WM8900_REG_CLOCKING2, div | (reg & WM8900_REG_CLOCKING2_DAC_CLKDIV)); break; case WM8900_ADC_CLKDIV: reg = snd_soc_read(codec, WM8900_REG_CLOCKING2); snd_soc_write(codec, WM8900_REG_CLOCKING2, div | (reg & WM8900_REG_CLOCKING2_ADC_CLKDIV)); break; case WM8900_LRCLK_MODE: reg = snd_soc_read(codec, WM8900_REG_DACCTRL); snd_soc_write(codec, WM8900_REG_DACCTRL, div | (reg & WM8900_REG_DACCTRL_AIF_LRCLKRATE)); break; default: return -EINVAL; } return 0; } static int wm8900_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; unsigned int clocking1, aif1, aif3, aif4; clocking1 = snd_soc_read(codec, WM8900_REG_CLOCKING1); aif1 = snd_soc_read(codec, WM8900_REG_AUDIO1); aif3 = snd_soc_read(codec, WM8900_REG_AUDIO3); aif4 = snd_soc_read(codec, WM8900_REG_AUDIO4); /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: clocking1 &= ~WM8900_REG_CLOCKING1_BCLK_DIR; aif3 &= ~WM8900_REG_AUDIO3_ADCLRC_DIR; aif4 &= ~WM8900_REG_AUDIO4_DACLRC_DIR; break; case SND_SOC_DAIFMT_CBS_CFM: clocking1 &= ~WM8900_REG_CLOCKING1_BCLK_DIR; aif3 |= WM8900_REG_AUDIO3_ADCLRC_DIR; aif4 |= WM8900_REG_AUDIO4_DACLRC_DIR; break; case SND_SOC_DAIFMT_CBM_CFM: clocking1 |= WM8900_REG_CLOCKING1_BCLK_DIR; aif3 |= WM8900_REG_AUDIO3_ADCLRC_DIR; aif4 |= WM8900_REG_AUDIO4_DACLRC_DIR; break; case SND_SOC_DAIFMT_CBM_CFS: clocking1 |= WM8900_REG_CLOCKING1_BCLK_DIR; aif3 &= ~WM8900_REG_AUDIO3_ADCLRC_DIR; aif4 &= ~WM8900_REG_AUDIO4_DACLRC_DIR; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: aif1 |= WM8900_REG_AUDIO1_AIF_FMT_MASK; aif1 &= ~WM8900_REG_AUDIO1_LRCLK_INV; break; case SND_SOC_DAIFMT_DSP_B: aif1 |= WM8900_REG_AUDIO1_AIF_FMT_MASK; aif1 |= WM8900_REG_AUDIO1_LRCLK_INV; break; case SND_SOC_DAIFMT_I2S: aif1 &= ~WM8900_REG_AUDIO1_AIF_FMT_MASK; aif1 |= 0x10; break; case SND_SOC_DAIFMT_RIGHT_J: aif1 &= ~WM8900_REG_AUDIO1_AIF_FMT_MASK; break; case SND_SOC_DAIFMT_LEFT_J: aif1 &= ~WM8900_REG_AUDIO1_AIF_FMT_MASK; aif1 |= 0x8; break; default: return -EINVAL; } /* Clock inversion */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: case SND_SOC_DAIFMT_DSP_B: /* frame inversion not valid for DSP modes */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: aif1 &= ~WM8900_REG_AUDIO1_BCLK_INV; break; case SND_SOC_DAIFMT_IB_NF: aif1 |= WM8900_REG_AUDIO1_BCLK_INV; break; default: return -EINVAL; } break; case SND_SOC_DAIFMT_I2S: case SND_SOC_DAIFMT_RIGHT_J: case SND_SOC_DAIFMT_LEFT_J: switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: aif1 &= ~WM8900_REG_AUDIO1_BCLK_INV; aif1 &= ~WM8900_REG_AUDIO1_LRCLK_INV; break; case SND_SOC_DAIFMT_IB_IF: aif1 |= WM8900_REG_AUDIO1_BCLK_INV; aif1 |= WM8900_REG_AUDIO1_LRCLK_INV; break; case SND_SOC_DAIFMT_IB_NF: aif1 |= WM8900_REG_AUDIO1_BCLK_INV; aif1 &= ~WM8900_REG_AUDIO1_LRCLK_INV; break; case SND_SOC_DAIFMT_NB_IF: aif1 &= ~WM8900_REG_AUDIO1_BCLK_INV; aif1 |= WM8900_REG_AUDIO1_LRCLK_INV; break; default: return -EINVAL; } break; default: return -EINVAL; } snd_soc_write(codec, WM8900_REG_CLOCKING1, clocking1); snd_soc_write(codec, WM8900_REG_AUDIO1, aif1); snd_soc_write(codec, WM8900_REG_AUDIO3, aif3); snd_soc_write(codec, WM8900_REG_AUDIO4, aif4); return 0; } static int wm8900_digital_mute(struct snd_soc_dai *codec_dai, int mute) { struct snd_soc_codec *codec = codec_dai->codec; u16 reg; reg = snd_soc_read(codec, WM8900_REG_DACCTRL); if (mute) reg |= WM8900_REG_DACCTRL_MUTE; else reg &= ~WM8900_REG_DACCTRL_MUTE; snd_soc_write(codec, WM8900_REG_DACCTRL, reg); return 0; } #define WM8900_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |\ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000) #define WM8900_PCM_FORMATS \ (SNDRV_PCM_FORMAT_S16_LE | SNDRV_PCM_FORMAT_S20_3LE | \ SNDRV_PCM_FORMAT_S24_LE) static struct snd_soc_dai_ops wm8900_dai_ops = { .hw_params = wm8900_hw_params, .set_clkdiv = wm8900_set_dai_clkdiv, .set_pll = wm8900_set_dai_pll, .set_fmt = wm8900_set_dai_fmt, .digital_mute = wm8900_digital_mute, }; struct snd_soc_dai wm8900_dai = { .name = "WM8900 HiFi", .playback = { .stream_name = "HiFi Playback", .channels_min = 1, .channels_max = 2, .rates = WM8900_RATES, .formats = WM8900_PCM_FORMATS, }, .capture = { .stream_name = "HiFi Capture", .channels_min = 1, .channels_max = 2, .rates = WM8900_RATES, .formats = WM8900_PCM_FORMATS, }, .ops = &wm8900_dai_ops, }; EXPORT_SYMBOL_GPL(wm8900_dai); static int wm8900_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { u16 reg; switch (level) { case SND_SOC_BIAS_ON: /* Enable thermal shutdown */ reg = snd_soc_read(codec, WM8900_REG_GPIO); snd_soc_write(codec, WM8900_REG_GPIO, reg | WM8900_REG_GPIO_TEMP_ENA); reg = snd_soc_read(codec, WM8900_REG_ADDCTL); snd_soc_write(codec, WM8900_REG_ADDCTL, reg | WM8900_REG_ADDCTL_TEMP_SD); break; case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: /* Charge capacitors if initial power up */ if (codec->bias_level == SND_SOC_BIAS_OFF) { /* STARTUP_BIAS_ENA on */ snd_soc_write(codec, WM8900_REG_POWER1, WM8900_REG_POWER1_STARTUP_BIAS_ENA); /* Startup bias mode */ snd_soc_write(codec, WM8900_REG_ADDCTL, WM8900_REG_ADDCTL_BIAS_SRC | WM8900_REG_ADDCTL_VMID_SOFTST); /* VMID 2x50k */ snd_soc_write(codec, WM8900_REG_POWER1, WM8900_REG_POWER1_STARTUP_BIAS_ENA | 0x1); /* Allow capacitors to charge */ schedule_timeout_interruptible(msecs_to_jiffies(400)); /* Enable bias */ snd_soc_write(codec, WM8900_REG_POWER1, WM8900_REG_POWER1_STARTUP_BIAS_ENA | WM8900_REG_POWER1_BIAS_ENA | 0x1); snd_soc_write(codec, WM8900_REG_ADDCTL, 0); snd_soc_write(codec, WM8900_REG_POWER1, WM8900_REG_POWER1_BIAS_ENA | 0x1); } reg = snd_soc_read(codec, WM8900_REG_POWER1); snd_soc_write(codec, WM8900_REG_POWER1, (reg & WM8900_REG_POWER1_FLL_ENA) | WM8900_REG_POWER1_BIAS_ENA | 0x1); snd_soc_write(codec, WM8900_REG_POWER2, WM8900_REG_POWER2_SYSCLK_ENA); snd_soc_write(codec, WM8900_REG_POWER3, 0); break; case SND_SOC_BIAS_OFF: /* Startup bias enable */ reg = snd_soc_read(codec, WM8900_REG_POWER1); snd_soc_write(codec, WM8900_REG_POWER1, reg & WM8900_REG_POWER1_STARTUP_BIAS_ENA); snd_soc_write(codec, WM8900_REG_ADDCTL, WM8900_REG_ADDCTL_BIAS_SRC | WM8900_REG_ADDCTL_VMID_SOFTST); /* Discharge caps */ snd_soc_write(codec, WM8900_REG_POWER1, WM8900_REG_POWER1_STARTUP_BIAS_ENA); schedule_timeout_interruptible(msecs_to_jiffies(500)); /* Remove clamp */ snd_soc_write(codec, WM8900_REG_HPCTL1, 0); /* Power down */ snd_soc_write(codec, WM8900_REG_ADDCTL, 0); snd_soc_write(codec, WM8900_REG_POWER1, 0); snd_soc_write(codec, WM8900_REG_POWER2, 0); snd_soc_write(codec, WM8900_REG_POWER3, 0); /* Need to let things settle before stopping the clock * to ensure that restart works, see "Stopping the * master clock" in the datasheet. */ schedule_timeout_interruptible(msecs_to_jiffies(1)); snd_soc_write(codec, WM8900_REG_POWER2, WM8900_REG_POWER2_SYSCLK_ENA); break; } codec->bias_level = level; return 0; } static int wm8900_suspend(struct platform_device *pdev, pm_message_t state) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_codec *codec = socdev->card->codec; struct wm8900_priv *wm8900 = snd_soc_codec_get_drvdata(codec); int fll_out = wm8900->fll_out; int fll_in = wm8900->fll_in; int ret; /* Stop the FLL in an orderly fashion */ ret = wm8900_set_fll(codec, 0, 0, 0); if (ret != 0) { dev_err(&pdev->dev, "Failed to stop FLL\n"); return ret; } wm8900->fll_out = fll_out; wm8900->fll_in = fll_in; wm8900_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm8900_resume(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_codec *codec = socdev->card->codec; struct wm8900_priv *wm8900 = snd_soc_codec_get_drvdata(codec); u16 *cache; int i, ret; cache = kmemdup(codec->reg_cache, sizeof(wm8900_reg_defaults), GFP_KERNEL); wm8900_reset(codec); wm8900_set_bias_level(codec, SND_SOC_BIAS_STANDBY); /* Restart the FLL? */ if (wm8900->fll_out) { int fll_out = wm8900->fll_out; int fll_in = wm8900->fll_in; wm8900->fll_in = 0; wm8900->fll_out = 0; ret = wm8900_set_fll(codec, 0, fll_in, fll_out); if (ret != 0) { dev_err(&pdev->dev, "Failed to restart FLL\n"); return ret; } } if (cache) { for (i = 0; i < WM8900_MAXREG; i++) snd_soc_write(codec, i, cache[i]); kfree(cache); } else dev_err(&pdev->dev, "Unable to allocate register cache\n"); return 0; } static struct snd_soc_codec *wm8900_codec; static __devinit int wm8900_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8900_priv *wm8900; struct snd_soc_codec *codec; unsigned int reg; int ret; wm8900 = kzalloc(sizeof(struct wm8900_priv), GFP_KERNEL); if (wm8900 == NULL) return -ENOMEM; codec = &wm8900->codec; snd_soc_codec_set_drvdata(codec, wm8900); codec->reg_cache = &wm8900->reg_cache[0]; codec->reg_cache_size = WM8900_MAXREG; mutex_init(&codec->mutex); INIT_LIST_HEAD(&codec->dapm_widgets); INIT_LIST_HEAD(&codec->dapm_paths); codec->name = "WM8900"; codec->owner = THIS_MODULE; codec->dai = &wm8900_dai; codec->num_dai = 1; codec->control_data = i2c; codec->set_bias_level = wm8900_set_bias_level; codec->volatile_register = wm8900_volatile_register; codec->dev = &i2c->dev; ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_I2C); if (ret != 0) { dev_err(&i2c->dev, "Failed to set cache I/O: %d\n", ret); goto err; } reg = snd_soc_read(codec, WM8900_REG_ID); if (reg != 0x8900) { dev_err(&i2c->dev, "Device is not a WM8900 - ID %x\n", reg); ret = -ENODEV; goto err; } /* Read back from the chip */ reg = snd_soc_read(codec, WM8900_REG_POWER1); reg = (reg >> 12) & 0xf; dev_info(&i2c->dev, "WM8900 revision %d\n", reg); wm8900_reset(codec); /* Turn the chip on */ wm8900_set_bias_level(codec, SND_SOC_BIAS_STANDBY); /* Latch the volume update bits */ snd_soc_write(codec, WM8900_REG_LINVOL, snd_soc_read(codec, WM8900_REG_LINVOL) | 0x100); snd_soc_write(codec, WM8900_REG_RINVOL, snd_soc_read(codec, WM8900_REG_RINVOL) | 0x100); snd_soc_write(codec, WM8900_REG_LOUT1CTL, snd_soc_read(codec, WM8900_REG_LOUT1CTL) | 0x100); snd_soc_write(codec, WM8900_REG_ROUT1CTL, snd_soc_read(codec, WM8900_REG_ROUT1CTL) | 0x100); snd_soc_write(codec, WM8900_REG_LOUT2CTL, snd_soc_read(codec, WM8900_REG_LOUT2CTL) | 0x100); snd_soc_write(codec, WM8900_REG_ROUT2CTL, snd_soc_read(codec, WM8900_REG_ROUT2CTL) | 0x100); snd_soc_write(codec, WM8900_REG_LDAC_DV, snd_soc_read(codec, WM8900_REG_LDAC_DV) | 0x100); snd_soc_write(codec, WM8900_REG_RDAC_DV, snd_soc_read(codec, WM8900_REG_RDAC_DV) | 0x100); snd_soc_write(codec, WM8900_REG_LADC_DV, snd_soc_read(codec, WM8900_REG_LADC_DV) | 0x100); snd_soc_write(codec, WM8900_REG_RADC_DV, snd_soc_read(codec, WM8900_REG_RADC_DV) | 0x100); /* Set the DAC and mixer output bias */ snd_soc_write(codec, WM8900_REG_OUTBIASCTL, 0x81); wm8900_dai.dev = &i2c->dev; wm8900_codec = codec; ret = snd_soc_register_codec(codec); if (ret != 0) { dev_err(&i2c->dev, "Failed to register codec: %d\n", ret); goto err; } ret = snd_soc_register_dai(&wm8900_dai); if (ret != 0) { dev_err(&i2c->dev, "Failed to register DAI: %d\n", ret); goto err_codec; } return ret; err_codec: snd_soc_unregister_codec(codec); err: kfree(wm8900); wm8900_codec = NULL; return ret; } static __devexit int wm8900_i2c_remove(struct i2c_client *client) { snd_soc_unregister_dai(&wm8900_dai); snd_soc_unregister_codec(wm8900_codec); wm8900_set_bias_level(wm8900_codec, SND_SOC_BIAS_OFF); wm8900_dai.dev = NULL; kfree(snd_soc_codec_get_drvdata(wm8900_codec)); wm8900_codec = NULL; return 0; } static const struct i2c_device_id wm8900_i2c_id[] = { { "wm8900", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8900_i2c_id); static struct i2c_driver wm8900_i2c_driver = { .driver = { .name = "WM8900", .owner = THIS_MODULE, }, .probe = wm8900_i2c_probe, .remove = __devexit_p(wm8900_i2c_remove), .id_table = wm8900_i2c_id, }; static int wm8900_probe(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_codec *codec; int ret = 0; if (!wm8900_codec) { dev_err(&pdev->dev, "I2C client not yet instantiated\n"); return -ENODEV; } codec = wm8900_codec; socdev->card->codec = codec; /* Register pcms */ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); if (ret < 0) { dev_err(&pdev->dev, "Failed to register new PCMs\n"); goto pcm_err; } snd_soc_add_controls(codec, wm8900_snd_controls, ARRAY_SIZE(wm8900_snd_controls)); wm8900_add_widgets(codec); pcm_err: return ret; } /* power down chip */ static int wm8900_remove(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); snd_soc_free_pcms(socdev); snd_soc_dapm_free(socdev); return 0; } struct snd_soc_codec_device soc_codec_dev_wm8900 = { .probe = wm8900_probe, .remove = wm8900_remove, .suspend = wm8900_suspend, .resume = wm8900_resume, }; EXPORT_SYMBOL_GPL(soc_codec_dev_wm8900); static int __init wm8900_modinit(void) { return i2c_add_driver(&wm8900_i2c_driver); } module_init(wm8900_modinit); static void __exit wm8900_exit(void) { i2c_del_driver(&wm8900_i2c_driver); } module_exit(wm8900_exit); MODULE_DESCRIPTION("ASoC WM8900 driver"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfonmicro.com>"); MODULE_LICENSE("GPL");
gpl-2.0
syhost/android_kernel_pantech_ef60s
arch/arm/mach-msm/msm_rtb.c
540
7307
/* * Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/atomic.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/memory_alloc.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/atomic.h> #include <linux/of.h> #include <asm/io.h> #include <asm-generic/sizes.h> #include <mach/memory.h> #include <mach/msm_rtb.h> #include <mach/system.h> #define SENTINEL_BYTE_1 0xFF #define SENTINEL_BYTE_2 0xAA #define SENTINEL_BYTE_3 0xFF #define RTB_COMPAT_STR "qcom,msm-rtb" /* Write * 1) 3 bytes sentinel * 2) 1 bytes of log type * 3) 4 bytes of where the caller came from * 4) 4 bytes index * 4) 4 bytes extra data from the caller * * Total = 16 bytes. */ struct msm_rtb_layout { unsigned char sentinel[3]; unsigned char log_type; void *caller; unsigned long idx; void *data; } __attribute__ ((__packed__)); struct msm_rtb_state { struct msm_rtb_layout *rtb; phys_addr_t phys; int nentries; int size; int enabled; int initialized; uint32_t filter; int step_size; }; #if defined(CONFIG_MSM_RTB_SEPARATE_CPUS) DEFINE_PER_CPU(atomic_t, msm_rtb_idx_cpu); #else static atomic_t msm_rtb_idx; #endif struct msm_rtb_state msm_rtb = { .filter = 1 << LOGK_LOGBUF, .enabled = 1, }; module_param_named(filter, msm_rtb.filter, uint, 0644); module_param_named(enable, msm_rtb.enabled, int, 0644); static int msm_rtb_panic_notifier(struct notifier_block *this, unsigned long event, void *ptr) { msm_rtb.enabled = 0; return NOTIFY_DONE; } static struct notifier_block msm_rtb_panic_blk = { .notifier_call = msm_rtb_panic_notifier, }; int notrace msm_rtb_event_should_log(enum logk_event_type log_type) { return msm_rtb.initialized && msm_rtb.enabled && ((1 << (log_type & ~LOGTYPE_NOPC)) & msm_rtb.filter); } EXPORT_SYMBOL(msm_rtb_event_should_log); static void msm_rtb_emit_sentinel(struct msm_rtb_layout *start) { start->sentinel[0] = SENTINEL_BYTE_1; start->sentinel[1] = SENTINEL_BYTE_2; start->sentinel[2] = SENTINEL_BYTE_3; } static void msm_rtb_write_type(enum logk_event_type log_type, struct msm_rtb_layout *start) { start->log_type = (char)log_type; } static void msm_rtb_write_caller(void *caller, struct msm_rtb_layout *start) { start->caller = caller; } static void msm_rtb_write_idx(unsigned long idx, struct msm_rtb_layout *start) { start->idx = idx; } static void msm_rtb_write_data(void *data, struct msm_rtb_layout *start) { start->data = data; } static void uncached_logk_pc_idx(enum logk_event_type log_type, void *caller, void *data, int idx) { struct msm_rtb_layout *start; start = &msm_rtb.rtb[idx & (msm_rtb.nentries - 1)]; msm_rtb_emit_sentinel(start); msm_rtb_write_type(log_type, start); msm_rtb_write_caller(caller, start); msm_rtb_write_idx(idx, start); msm_rtb_write_data(data, start); mb(); return; } static void uncached_logk_timestamp(int idx) { unsigned long long timestamp; void *timestamp_upper, *timestamp_lower; timestamp = sched_clock(); timestamp_lower = (void *)lower_32_bits(timestamp); timestamp_upper = (void *)upper_32_bits(timestamp); uncached_logk_pc_idx(LOGK_TIMESTAMP|LOGTYPE_NOPC, timestamp_lower, timestamp_upper, idx); } #if defined(CONFIG_MSM_RTB_SEPARATE_CPUS) static int msm_rtb_get_idx(void) { int cpu, i, offset; atomic_t *index; /* * ideally we would use get_cpu but this is a close enough * approximation for our purposes. */ cpu = raw_smp_processor_id(); index = &per_cpu(msm_rtb_idx_cpu, cpu); i = atomic_add_return(msm_rtb.step_size, index); i -= msm_rtb.step_size; /* Check if index has wrapped around */ offset = (i & (msm_rtb.nentries - 1)) - ((i - msm_rtb.step_size) & (msm_rtb.nentries - 1)); if (offset < 0) { uncached_logk_timestamp(i); i = atomic_add_return(msm_rtb.step_size, index); i -= msm_rtb.step_size; } return i; } #else static int msm_rtb_get_idx(void) { int i, offset; i = atomic_inc_return(&msm_rtb_idx); i--; /* Check if index has wrapped around */ offset = (i & (msm_rtb.nentries - 1)) - ((i - 1) & (msm_rtb.nentries - 1)); if (offset < 0) { uncached_logk_timestamp(i); i = atomic_inc_return(&msm_rtb_idx); i--; } return i; } #endif int notrace uncached_logk_pc(enum logk_event_type log_type, void *caller, void *data) { int i; if (!msm_rtb_event_should_log(log_type)) return 0; i = msm_rtb_get_idx(); uncached_logk_pc_idx(log_type, caller, data, i); return 1; } EXPORT_SYMBOL(uncached_logk_pc); noinline int notrace uncached_logk(enum logk_event_type log_type, void *data) { return uncached_logk_pc(log_type, __builtin_return_address(0), data); } EXPORT_SYMBOL(uncached_logk); int msm_rtb_probe(struct platform_device *pdev) { struct msm_rtb_platform_data *d = pdev->dev.platform_data; #if defined(CONFIG_MSM_RTB_SEPARATE_CPUS) unsigned int cpu; #endif int ret; if (!pdev->dev.of_node) { msm_rtb.size = d->size; } else { int size; ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,memory-reservation-size", &size); if (ret < 0) return ret; msm_rtb.size = size; } if (msm_rtb.size <= 0 || msm_rtb.size > SZ_1M) return -EINVAL; /* * The ioremap call is made separately to store the physical * address of the buffer. This is necessary for cases where * the only way to access the buffer is a physical address. */ msm_rtb.phys = allocate_contiguous_ebi_nomap(msm_rtb.size, SZ_4K); if (!msm_rtb.phys) return -ENOMEM; msm_rtb.rtb = ioremap(msm_rtb.phys, msm_rtb.size); if (!msm_rtb.rtb) { free_contiguous_memory_by_paddr(msm_rtb.phys); return -ENOMEM; } msm_rtb.nentries = msm_rtb.size / sizeof(struct msm_rtb_layout); /* Round this down to a power of 2 */ msm_rtb.nentries = __rounddown_pow_of_two(msm_rtb.nentries); memset(msm_rtb.rtb, 0, msm_rtb.size); #if defined(CONFIG_MSM_RTB_SEPARATE_CPUS) for_each_possible_cpu(cpu) { atomic_t *a = &per_cpu(msm_rtb_idx_cpu, cpu); atomic_set(a, cpu); } msm_rtb.step_size = num_possible_cpus(); #else atomic_set(&msm_rtb_idx, 0); msm_rtb.step_size = 1; #endif atomic_notifier_chain_register(&panic_notifier_list, &msm_rtb_panic_blk); msm_rtb.initialized = 1; return 0; } static struct of_device_id msm_match_table[] = { {.compatible = RTB_COMPAT_STR}, {}, }; EXPORT_COMPAT(RTB_COMPAT_STR); static struct platform_driver msm_rtb_driver = { .driver = { .name = "msm_rtb", .owner = THIS_MODULE, .of_match_table = msm_match_table }, }; static int __init msm_rtb_init(void) { return platform_driver_probe(&msm_rtb_driver, msm_rtb_probe); } static void __exit msm_rtb_exit(void) { platform_driver_unregister(&msm_rtb_driver); } module_init(msm_rtb_init) module_exit(msm_rtb_exit)
gpl-2.0
joolswills/linux
drivers/usb/gadget/function/f_serial.c
796
10611
/* * f_serial.c - generic USB serial function driver * * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) * Copyright (C) 2008 by David Brownell * Copyright (C) 2008 by Nokia Corporation * * This software is distributed under the terms of the GNU General * Public License ("GPL") as published by the Free Software Foundation, * either version 2 of that License or (at your option) any later version. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include "u_serial.h" #include "gadget_chips.h" /* * This function packages a simple "generic serial" port with no real * control mechanisms, just raw data transfer over two bulk endpoints. * * Because it's not standardized, this isn't as interoperable as the * CDC ACM driver. However, for many purposes it's just as functional * if you can arrange appropriate host side drivers. */ struct f_gser { struct gserial port; u8 data_id; u8 port_num; }; static inline struct f_gser *func_to_gser(struct usb_function *f) { return container_of(f, struct f_gser, port.func); } /*-------------------------------------------------------------------------*/ /* interface descriptor: */ static struct usb_interface_descriptor gser_interface_desc = { .bLength = USB_DT_INTERFACE_SIZE, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 0, .bInterfaceProtocol = 0, /* .iInterface = DYNAMIC */ }; /* full speed support: */ static struct usb_endpoint_descriptor gser_fs_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor gser_fs_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *gser_fs_function[] = { (struct usb_descriptor_header *) &gser_interface_desc, (struct usb_descriptor_header *) &gser_fs_in_desc, (struct usb_descriptor_header *) &gser_fs_out_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor gser_hs_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor gser_hs_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *gser_hs_function[] = { (struct usb_descriptor_header *) &gser_interface_desc, (struct usb_descriptor_header *) &gser_hs_in_desc, (struct usb_descriptor_header *) &gser_hs_out_desc, NULL, }; static struct usb_endpoint_descriptor gser_ss_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(1024), }; static struct usb_endpoint_descriptor gser_ss_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(1024), }; static struct usb_ss_ep_comp_descriptor gser_ss_bulk_comp_desc = { .bLength = sizeof gser_ss_bulk_comp_desc, .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, }; static struct usb_descriptor_header *gser_ss_function[] = { (struct usb_descriptor_header *) &gser_interface_desc, (struct usb_descriptor_header *) &gser_ss_in_desc, (struct usb_descriptor_header *) &gser_ss_bulk_comp_desc, (struct usb_descriptor_header *) &gser_ss_out_desc, (struct usb_descriptor_header *) &gser_ss_bulk_comp_desc, NULL, }; /* string descriptors: */ static struct usb_string gser_string_defs[] = { [0].s = "Generic Serial", { } /* end of list */ }; static struct usb_gadget_strings gser_string_table = { .language = 0x0409, /* en-us */ .strings = gser_string_defs, }; static struct usb_gadget_strings *gser_strings[] = { &gser_string_table, NULL, }; /*-------------------------------------------------------------------------*/ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_gser *gser = func_to_gser(f); struct usb_composite_dev *cdev = f->config->cdev; /* we know alt == 0, so this is an activation or a reset */ if (gser->port.in->driver_data) { dev_dbg(&cdev->gadget->dev, "reset generic ttyGS%d\n", gser->port_num); gserial_disconnect(&gser->port); } if (!gser->port.in->desc || !gser->port.out->desc) { dev_dbg(&cdev->gadget->dev, "activate generic ttyGS%d\n", gser->port_num); if (config_ep_by_speed(cdev->gadget, f, gser->port.in) || config_ep_by_speed(cdev->gadget, f, gser->port.out)) { gser->port.in->desc = NULL; gser->port.out->desc = NULL; return -EINVAL; } } gserial_connect(&gser->port, gser->port_num); return 0; } static void gser_disable(struct usb_function *f) { struct f_gser *gser = func_to_gser(f); struct usb_composite_dev *cdev = f->config->cdev; dev_dbg(&cdev->gadget->dev, "generic ttyGS%d deactivated\n", gser->port_num); gserial_disconnect(&gser->port); } /*-------------------------------------------------------------------------*/ /* serial function driver setup/binding */ static int gser_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_gser *gser = func_to_gser(f); int status; struct usb_ep *ep; /* REVISIT might want instance-specific strings to help * distinguish instances ... */ /* maybe allocate device-global string ID */ if (gser_string_defs[0].id == 0) { status = usb_string_id(c->cdev); if (status < 0) return status; gser_string_defs[0].id = status; } /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) goto fail; gser->data_id = status; gser_interface_desc.bInterfaceNumber = status; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_in_desc); if (!ep) goto fail; gser->port.in = ep; ep->driver_data = cdev; /* claim */ ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_out_desc); if (!ep) goto fail; gser->port.out = ep; ep->driver_data = cdev; /* claim */ /* support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ gser_hs_in_desc.bEndpointAddress = gser_fs_in_desc.bEndpointAddress; gser_hs_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress; gser_ss_in_desc.bEndpointAddress = gser_fs_in_desc.bEndpointAddress; gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress; status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function, gser_ss_function); if (status) goto fail; dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n", gser->port_num, gadget_is_superspeed(c->cdev->gadget) ? "super" : gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", gser->port.in->name, gser->port.out->name); return 0; fail: /* we might as well release our claims on endpoints */ if (gser->port.out) gser->port.out->driver_data = NULL; if (gser->port.in) gser->port.in->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); return status; } static inline struct f_serial_opts *to_f_serial_opts(struct config_item *item) { return container_of(to_config_group(item), struct f_serial_opts, func_inst.group); } CONFIGFS_ATTR_STRUCT(f_serial_opts); static ssize_t f_serial_attr_show(struct config_item *item, struct configfs_attribute *attr, char *page) { struct f_serial_opts *opts = to_f_serial_opts(item); struct f_serial_opts_attribute *f_serial_opts_attr = container_of(attr, struct f_serial_opts_attribute, attr); ssize_t ret = 0; if (f_serial_opts_attr->show) ret = f_serial_opts_attr->show(opts, page); return ret; } static void serial_attr_release(struct config_item *item) { struct f_serial_opts *opts = to_f_serial_opts(item); usb_put_function_instance(&opts->func_inst); } static struct configfs_item_operations serial_item_ops = { .release = serial_attr_release, .show_attribute = f_serial_attr_show, }; static ssize_t f_serial_port_num_show(struct f_serial_opts *opts, char *page) { return sprintf(page, "%u\n", opts->port_num); } static struct f_serial_opts_attribute f_serial_port_num = __CONFIGFS_ATTR_RO(port_num, f_serial_port_num_show); static struct configfs_attribute *acm_attrs[] = { &f_serial_port_num.attr, NULL, }; static struct config_item_type serial_func_type = { .ct_item_ops = &serial_item_ops, .ct_attrs = acm_attrs, .ct_owner = THIS_MODULE, }; static void gser_free_inst(struct usb_function_instance *f) { struct f_serial_opts *opts; opts = container_of(f, struct f_serial_opts, func_inst); gserial_free_line(opts->port_num); kfree(opts); } static struct usb_function_instance *gser_alloc_inst(void) { struct f_serial_opts *opts; int ret; opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) return ERR_PTR(-ENOMEM); opts->func_inst.free_func_inst = gser_free_inst; ret = gserial_alloc_line(&opts->port_num); if (ret) { kfree(opts); return ERR_PTR(ret); } config_group_init_type_name(&opts->func_inst.group, "", &serial_func_type); return &opts->func_inst; } static void gser_free(struct usb_function *f) { struct f_gser *serial; serial = func_to_gser(f); kfree(serial); } static void gser_unbind(struct usb_configuration *c, struct usb_function *f) { usb_free_all_descriptors(f); } static struct usb_function *gser_alloc(struct usb_function_instance *fi) { struct f_gser *gser; struct f_serial_opts *opts; /* allocate and initialize one new instance */ gser = kzalloc(sizeof(*gser), GFP_KERNEL); if (!gser) return ERR_PTR(-ENOMEM); opts = container_of(fi, struct f_serial_opts, func_inst); gser->port_num = opts->port_num; gser->port.func.name = "gser"; gser->port.func.strings = gser_strings; gser->port.func.bind = gser_bind; gser->port.func.unbind = gser_unbind; gser->port.func.set_alt = gser_set_alt; gser->port.func.disable = gser_disable; gser->port.func.free_func = gser_free; return &gser->port.func; } DECLARE_USB_FUNCTION_INIT(gser, gser_alloc_inst, gser_alloc); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Al Borchers"); MODULE_AUTHOR("David Brownell");
gpl-2.0
droidcore/Hydra
drivers/irqchip/irq-sun4i.c
2076
4191
/* * Allwinner A1X SoCs IRQ chip driver. * * Copyright (C) 2012 Maxime Ripard * * Maxime Ripard <maxime.ripard@free-electrons.com> * * Based on code from * Allwinner Technology Co., Ltd. <www.allwinnertech.com> * Benn Huang <benn@allwinnertech.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/io.h> #include <linux/irq.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <asm/exception.h> #include <asm/mach/irq.h> #include "irqchip.h" #define SUN4I_IRQ_VECTOR_REG 0x00 #define SUN4I_IRQ_PROTECTION_REG 0x08 #define SUN4I_IRQ_NMI_CTRL_REG 0x0c #define SUN4I_IRQ_PENDING_REG(x) (0x10 + 0x4 * x) #define SUN4I_IRQ_FIQ_PENDING_REG(x) (0x20 + 0x4 * x) #define SUN4I_IRQ_ENABLE_REG(x) (0x40 + 0x4 * x) #define SUN4I_IRQ_MASK_REG(x) (0x50 + 0x4 * x) static void __iomem *sun4i_irq_base; static struct irq_domain *sun4i_irq_domain; static asmlinkage void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs); void sun4i_irq_ack(struct irq_data *irqd) { unsigned int irq = irqd_to_hwirq(irqd); unsigned int irq_off = irq % 32; int reg = irq / 32; u32 val; val = readl(sun4i_irq_base + SUN4I_IRQ_PENDING_REG(reg)); writel(val | (1 << irq_off), sun4i_irq_base + SUN4I_IRQ_PENDING_REG(reg)); } static void sun4i_irq_mask(struct irq_data *irqd) { unsigned int irq = irqd_to_hwirq(irqd); unsigned int irq_off = irq % 32; int reg = irq / 32; u32 val; val = readl(sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg)); writel(val & ~(1 << irq_off), sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg)); } static void sun4i_irq_unmask(struct irq_data *irqd) { unsigned int irq = irqd_to_hwirq(irqd); unsigned int irq_off = irq % 32; int reg = irq / 32; u32 val; val = readl(sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg)); writel(val | (1 << irq_off), sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg)); } static struct irq_chip sun4i_irq_chip = { .name = "sun4i_irq", .irq_ack = sun4i_irq_ack, .irq_mask = sun4i_irq_mask, .irq_unmask = sun4i_irq_unmask, }; static int sun4i_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_level_irq); set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); return 0; } static struct irq_domain_ops sun4i_irq_ops = { .map = sun4i_irq_map, .xlate = irq_domain_xlate_onecell, }; static int __init sun4i_of_init(struct device_node *node, struct device_node *parent) { sun4i_irq_base = of_iomap(node, 0); if (!sun4i_irq_base) panic("%s: unable to map IC registers\n", node->full_name); /* Disable all interrupts */ writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0)); writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(1)); writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(2)); /* Mask all the interrupts */ writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(0)); writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(1)); writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(2)); /* Clear all the pending interrupts */ writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0)); writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(1)); writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(2)); /* Enable protection mode */ writel(0x01, sun4i_irq_base + SUN4I_IRQ_PROTECTION_REG); /* Configure the external interrupt source type */ writel(0x00, sun4i_irq_base + SUN4I_IRQ_NMI_CTRL_REG); sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32, &sun4i_irq_ops, NULL); if (!sun4i_irq_domain) panic("%s: unable to create IRQ domain\n", node->full_name); set_handle_irq(sun4i_handle_irq); return 0; } IRQCHIP_DECLARE(allwinner_sun4i_ic, "allwinner,sun4i-ic", sun4i_of_init); static asmlinkage void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs) { u32 irq, hwirq; hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2; while (hwirq != 0) { irq = irq_find_mapping(sun4i_irq_domain, hwirq); handle_IRQ(irq, regs); hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2; } }
gpl-2.0
detule/lge-linux-msm
sound/soc/pxa/mmp-pcm.c
2076
6918
/* * linux/sound/soc/pxa/mmp-pcm.c * * Copyright (C) 2011 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/platform_data/dma-mmp_tdma.h> #include <linux/platform_data/mmp_audio.h> #include <sound/pxa2xx-lib.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/dmaengine_pcm.h> struct mmp_dma_data { int ssp_id; struct resource *dma_res; }; #define MMP_PCM_INFO (SNDRV_PCM_INFO_MMAP | \ SNDRV_PCM_INFO_MMAP_VALID | \ SNDRV_PCM_INFO_INTERLEAVED | \ SNDRV_PCM_INFO_PAUSE | \ SNDRV_PCM_INFO_RESUME) #define MMP_PCM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S32_LE) static struct snd_pcm_hardware mmp_pcm_hardware[] = { { .info = MMP_PCM_INFO, .formats = MMP_PCM_FORMATS, .period_bytes_min = 1024, .period_bytes_max = 2048, .periods_min = 2, .periods_max = 32, .buffer_bytes_max = 4096, .fifo_size = 32, }, { .info = MMP_PCM_INFO, .formats = MMP_PCM_FORMATS, .period_bytes_min = 1024, .period_bytes_max = 2048, .periods_min = 2, .periods_max = 32, .buffer_bytes_max = 4096, .fifo_size = 32, }, }; static int mmp_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream); struct snd_soc_pcm_runtime *rtd = substream->private_data; struct pxa2xx_pcm_dma_params *dma_params; struct dma_slave_config slave_config; int ret; dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); if (!dma_params) return 0; ret = snd_hwparams_to_dma_slave_config(substream, params, &slave_config); if (ret) return ret; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { slave_config.dst_addr = dma_params->dev_addr; slave_config.dst_maxburst = 4; } else { slave_config.src_addr = dma_params->dev_addr; slave_config.src_maxburst = 4; } ret = dmaengine_slave_config(chan, &slave_config); if (ret) return ret; snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); return 0; } static bool filter(struct dma_chan *chan, void *param) { struct mmp_dma_data *dma_data = param; bool found = false; char *devname; devname = kasprintf(GFP_KERNEL, "%s.%d", dma_data->dma_res->name, dma_data->ssp_id); if ((strcmp(dev_name(chan->device->dev), devname) == 0) && (chan->chan_id == dma_data->dma_res->start)) { found = true; } kfree(devname); return found; } static int mmp_pcm_open(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct platform_device *pdev = to_platform_device(rtd->platform->dev); struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct mmp_dma_data dma_data; struct resource *r; r = platform_get_resource(pdev, IORESOURCE_DMA, substream->stream); if (!r) return -EBUSY; snd_soc_set_runtime_hwparams(substream, &mmp_pcm_hardware[substream->stream]); dma_data.dma_res = r; dma_data.ssp_id = cpu_dai->id; return snd_dmaengine_pcm_open_request_chan(substream, filter, &dma_data); } static int mmp_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; unsigned long off = vma->vm_pgoff; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, __phys_to_pfn(runtime->dma_addr) + off, vma->vm_end - vma->vm_start, vma->vm_page_prot); } struct snd_pcm_ops mmp_pcm_ops = { .open = mmp_pcm_open, .close = snd_dmaengine_pcm_close_release_chan, .ioctl = snd_pcm_lib_ioctl, .hw_params = mmp_pcm_hw_params, .trigger = snd_dmaengine_pcm_trigger, .pointer = snd_dmaengine_pcm_pointer, .mmap = mmp_pcm_mmap, }; static void mmp_pcm_free_dma_buffers(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; int stream; struct gen_pool *gpool; gpool = sram_get_gpool("asram"); if (!gpool) return; for (stream = 0; stream < 2; stream++) { size_t size = mmp_pcm_hardware[stream].buffer_bytes_max; substream = pcm->streams[stream].substream; if (!substream) continue; buf = &substream->dma_buffer; if (!buf->area) continue; gen_pool_free(gpool, (unsigned long)buf->area, size); buf->area = NULL; } return; } static int mmp_pcm_preallocate_dma_buffer(struct snd_pcm_substream *substream, int stream) { struct snd_dma_buffer *buf = &substream->dma_buffer; size_t size = mmp_pcm_hardware[stream].buffer_bytes_max; struct gen_pool *gpool; buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.dev = substream->pcm->card->dev; buf->private_data = NULL; gpool = sram_get_gpool("asram"); if (!gpool) return -ENOMEM; buf->area = (unsigned char *)gen_pool_alloc(gpool, size); if (!buf->area) return -ENOMEM; buf->addr = gen_pool_virt_to_phys(gpool, (unsigned long)buf->area); buf->bytes = size; return 0; } int mmp_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_pcm_substream *substream; struct snd_pcm *pcm = rtd->pcm; int ret = 0, stream; for (stream = 0; stream < 2; stream++) { substream = pcm->streams[stream].substream; ret = mmp_pcm_preallocate_dma_buffer(substream, stream); if (ret) goto err; } return 0; err: mmp_pcm_free_dma_buffers(pcm); return ret; } struct snd_soc_platform_driver mmp_soc_platform = { .ops = &mmp_pcm_ops, .pcm_new = mmp_pcm_new, .pcm_free = mmp_pcm_free_dma_buffers, }; static int mmp_pcm_probe(struct platform_device *pdev) { struct mmp_audio_platdata *pdata = pdev->dev.platform_data; if (pdata) { mmp_pcm_hardware[SNDRV_PCM_STREAM_PLAYBACK].buffer_bytes_max = pdata->buffer_max_playback; mmp_pcm_hardware[SNDRV_PCM_STREAM_PLAYBACK].period_bytes_max = pdata->period_max_playback; mmp_pcm_hardware[SNDRV_PCM_STREAM_CAPTURE].buffer_bytes_max = pdata->buffer_max_capture; mmp_pcm_hardware[SNDRV_PCM_STREAM_CAPTURE].period_bytes_max = pdata->period_max_capture; } return snd_soc_register_platform(&pdev->dev, &mmp_soc_platform); } static int mmp_pcm_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver mmp_pcm_driver = { .driver = { .name = "mmp-pcm-audio", .owner = THIS_MODULE, }, .probe = mmp_pcm_probe, .remove = mmp_pcm_remove, }; module_platform_driver(mmp_pcm_driver); MODULE_AUTHOR("Leo Yan <leoy@marvell.com>"); MODULE_DESCRIPTION("MMP Soc Audio DMA module"); MODULE_LICENSE("GPL");
gpl-2.0
lgics/lge-kernel-msm7x27-3.4
fs/dcache.c
2076
79666
/* * fs/dcache.c * * Complete reimplementation * (C) 1997 Thomas Schoebel-Theuer, * with heavy changes by Linus Torvalds */ /* * Notes on the allocation strategy: * * The dcache is a master of the icache - whenever a dcache entry * exists, the inode will always exist. "iput()" is done either when * the dcache entry is deleted or garbage collected. */ #include <linux/syscalls.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/fsnotify.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/hash.h> #include <linux/cache.h> #include <linux/export.h> #include <linux/mount.h> #include <linux/file.h> #include <asm/uaccess.h> #include <linux/security.h> #include <linux/seqlock.h> #include <linux/swap.h> #include <linux/bootmem.h> #include <linux/fs_struct.h> #include <linux/hardirq.h> #include <linux/bit_spinlock.h> #include <linux/rculist_bl.h> #include <linux/prefetch.h> #include <linux/ratelimit.h> #include "internal.h" #include "mount.h" /* * Usage: * dcache->d_inode->i_lock protects: * - i_dentry, d_alias, d_inode of aliases * dcache_hash_bucket lock protects: * - the dcache hash table * s_anon bl list spinlock protects: * - the s_anon list (see __d_drop) * dcache_lru_lock protects: * - the dcache lru lists and counters * d_lock protects: * - d_flags * - d_name * - d_lru * - d_count * - d_unhashed() * - d_parent and d_subdirs * - childrens' d_child and d_parent * - d_alias, d_inode * * Ordering: * dentry->d_inode->i_lock * dentry->d_lock * dcache_lru_lock * dcache_hash_bucket lock * s_anon lock * * If there is an ancestor relationship: * dentry->d_parent->...->d_parent->d_lock * ... * dentry->d_parent->d_lock * dentry->d_lock * * If no ancestor relationship: * if (dentry1 < dentry2) * dentry1->d_lock * dentry2->d_lock */ int sysctl_vfs_cache_pressure __read_mostly = 100; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock); __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); EXPORT_SYMBOL(rename_lock); static struct kmem_cache *dentry_cache __read_mostly; /* * This is the single most critical data structure when it comes * to the dcache: the hashtable for lookups. Somebody should try * to make this good - I've just made it work. * * This hash-function tries to avoid losing too many bits of hash * information, yet avoid using a prime hash-size or similar. */ #define D_HASHBITS d_hash_shift #define D_HASHMASK d_hash_mask static unsigned int d_hash_mask __read_mostly; static unsigned int d_hash_shift __read_mostly; static struct hlist_bl_head *dentry_hashtable __read_mostly; static inline struct hlist_bl_head *d_hash(const struct dentry *parent, unsigned int hash) { hash += (unsigned long) parent / L1_CACHE_BYTES; hash = hash + (hash >> D_HASHBITS); return dentry_hashtable + (hash & D_HASHMASK); } /* Statistics gathering. */ struct dentry_stat_t dentry_stat = { .age_limit = 45, }; static DEFINE_PER_CPU(unsigned int, nr_dentry); #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) static int get_nr_dentry(void) { int i; int sum = 0; for_each_possible_cpu(i) sum += per_cpu(nr_dentry, i); return sum < 0 ? 0 : sum; } int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { dentry_stat.nr_dentry = get_nr_dentry(); return proc_dointvec(table, write, buffer, lenp, ppos); } #endif /* * Compare 2 name strings, return 0 if they match, otherwise non-zero. * The strings are both count bytes long, and count is non-zero. */ #ifdef CONFIG_DCACHE_WORD_ACCESS #include <asm/word-at-a-time.h> /* * NOTE! 'cs' and 'scount' come from a dentry, so it has a * aligned allocation for this particular component. We don't * strictly need the load_unaligned_zeropad() safety, but it * doesn't hurt either. * * In contrast, 'ct' and 'tcount' can be from a pathname, and do * need the careful unaligned handling. */ static inline int dentry_cmp(const unsigned char *cs, size_t scount, const unsigned char *ct, size_t tcount) { unsigned long a,b,mask; if (unlikely(scount != tcount)) return 1; for (;;) { a = load_unaligned_zeropad(cs); b = load_unaligned_zeropad(ct); if (tcount < sizeof(unsigned long)) break; if (unlikely(a != b)) return 1; cs += sizeof(unsigned long); ct += sizeof(unsigned long); tcount -= sizeof(unsigned long); if (!tcount) return 0; } mask = ~(~0ul << tcount*8); return unlikely(!!((a ^ b) & mask)); } #else static inline int dentry_cmp(const unsigned char *cs, size_t scount, const unsigned char *ct, size_t tcount) { if (scount != tcount) return 1; do { if (*cs != *ct) return 1; cs++; ct++; tcount--; } while (tcount); return 0; } #endif static void __d_free(struct rcu_head *head) { struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); WARN_ON(!list_empty(&dentry->d_alias)); if (dname_external(dentry)) kfree(dentry->d_name.name); kmem_cache_free(dentry_cache, dentry); } /* * no locks, please. */ static void d_free(struct dentry *dentry) { BUG_ON(dentry->d_count); this_cpu_dec(nr_dentry); if (dentry->d_op && dentry->d_op->d_release) dentry->d_op->d_release(dentry); /* if dentry was never visible to RCU, immediate free is OK */ if (!(dentry->d_flags & DCACHE_RCUACCESS)) __d_free(&dentry->d_u.d_rcu); else call_rcu(&dentry->d_u.d_rcu, __d_free); } /** * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups * @dentry: the target dentry * After this call, in-progress rcu-walk path lookup will fail. This * should be called after unhashing, and after changing d_inode (if * the dentry has not already been unhashed). */ static inline void dentry_rcuwalk_barrier(struct dentry *dentry) { assert_spin_locked(&dentry->d_lock); /* Go through a barrier */ write_seqcount_barrier(&dentry->d_seq); } /* * Release the dentry's inode, using the filesystem * d_iput() operation if defined. Dentry has no refcount * and is unhashed. */ static void dentry_iput(struct dentry * dentry) __releases(dentry->d_lock) __releases(dentry->d_inode->i_lock) { struct inode *inode = dentry->d_inode; if (inode) { dentry->d_inode = NULL; list_del_init(&dentry->d_alias); spin_unlock(&dentry->d_lock); spin_unlock(&inode->i_lock); if (!inode->i_nlink) fsnotify_inoderemove(inode); if (dentry->d_op && dentry->d_op->d_iput) dentry->d_op->d_iput(dentry, inode); else iput(inode); } else { spin_unlock(&dentry->d_lock); } } /* * Release the dentry's inode, using the filesystem * d_iput() operation if defined. dentry remains in-use. */ static void dentry_unlink_inode(struct dentry * dentry) __releases(dentry->d_lock) __releases(dentry->d_inode->i_lock) { struct inode *inode = dentry->d_inode; dentry->d_inode = NULL; list_del_init(&dentry->d_alias); dentry_rcuwalk_barrier(dentry); spin_unlock(&dentry->d_lock); spin_unlock(&inode->i_lock); if (!inode->i_nlink) fsnotify_inoderemove(inode); if (dentry->d_op && dentry->d_op->d_iput) dentry->d_op->d_iput(dentry, inode); else iput(inode); } /* * dentry_lru_(add|del|prune|move_tail) must be called with d_lock held. */ static void dentry_lru_add(struct dentry *dentry) { if (list_empty(&dentry->d_lru)) { spin_lock(&dcache_lru_lock); list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); dentry->d_sb->s_nr_dentry_unused++; dentry_stat.nr_unused++; spin_unlock(&dcache_lru_lock); } } static void __dentry_lru_del(struct dentry *dentry) { list_del_init(&dentry->d_lru); dentry->d_flags &= ~DCACHE_SHRINK_LIST; dentry->d_sb->s_nr_dentry_unused--; dentry_stat.nr_unused--; } /* * Remove a dentry with references from the LRU. */ static void dentry_lru_del(struct dentry *dentry) { if (!list_empty(&dentry->d_lru)) { spin_lock(&dcache_lru_lock); __dentry_lru_del(dentry); spin_unlock(&dcache_lru_lock); } } /* * Remove a dentry that is unreferenced and about to be pruned * (unhashed and destroyed) from the LRU, and inform the file system. * This wrapper should be called _prior_ to unhashing a victim dentry. */ static void dentry_lru_prune(struct dentry *dentry) { if (!list_empty(&dentry->d_lru)) { if (dentry->d_flags & DCACHE_OP_PRUNE) dentry->d_op->d_prune(dentry); spin_lock(&dcache_lru_lock); __dentry_lru_del(dentry); spin_unlock(&dcache_lru_lock); } } static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list) { spin_lock(&dcache_lru_lock); if (list_empty(&dentry->d_lru)) { list_add_tail(&dentry->d_lru, list); dentry->d_sb->s_nr_dentry_unused++; dentry_stat.nr_unused++; } else { list_move_tail(&dentry->d_lru, list); } spin_unlock(&dcache_lru_lock); } /** * d_kill - kill dentry and return parent * @dentry: dentry to kill * @parent: parent dentry * * The dentry must already be unhashed and removed from the LRU. * * If this is the root of the dentry tree, return NULL. * * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by * d_kill. */ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) __releases(dentry->d_lock) __releases(parent->d_lock) __releases(dentry->d_inode->i_lock) { list_del(&dentry->d_u.d_child); /* * Inform try_to_ascend() that we are no longer attached to the * dentry tree */ dentry->d_flags |= DCACHE_DISCONNECTED; if (parent) spin_unlock(&parent->d_lock); dentry_iput(dentry); /* * dentry_iput drops the locks, at which point nobody (except * transient RCU lookups) can reach this dentry. */ d_free(dentry); return parent; } /* * Unhash a dentry without inserting an RCU walk barrier or checking that * dentry->d_lock is locked. The caller must take care of that, if * appropriate. */ static void __d_shrink(struct dentry *dentry) { if (!d_unhashed(dentry)) { struct hlist_bl_head *b; if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) b = &dentry->d_sb->s_anon; else b = d_hash(dentry->d_parent, dentry->d_name.hash); hlist_bl_lock(b); __hlist_bl_del(&dentry->d_hash); dentry->d_hash.pprev = NULL; hlist_bl_unlock(b); } } /** * d_drop - drop a dentry * @dentry: dentry to drop * * d_drop() unhashes the entry from the parent dentry hashes, so that it won't * be found through a VFS lookup any more. Note that this is different from * deleting the dentry - d_delete will try to mark the dentry negative if * possible, giving a successful _negative_ lookup, while d_drop will * just make the cache lookup fail. * * d_drop() is used mainly for stuff that wants to invalidate a dentry for some * reason (NFS timeouts or autofs deletes). * * __d_drop requires dentry->d_lock. */ void __d_drop(struct dentry *dentry) { if (!d_unhashed(dentry)) { __d_shrink(dentry); dentry_rcuwalk_barrier(dentry); } } EXPORT_SYMBOL(__d_drop); void d_drop(struct dentry *dentry) { spin_lock(&dentry->d_lock); __d_drop(dentry); spin_unlock(&dentry->d_lock); } EXPORT_SYMBOL(d_drop); /* * d_clear_need_lookup - drop a dentry from cache and clear the need lookup flag * @dentry: dentry to drop * * This is called when we do a lookup on a placeholder dentry that needed to be * looked up. The dentry should have been hashed in order for it to be found by * the lookup code, but now needs to be unhashed while we do the actual lookup * and clear the DCACHE_NEED_LOOKUP flag. */ void d_clear_need_lookup(struct dentry *dentry) { spin_lock(&dentry->d_lock); __d_drop(dentry); dentry->d_flags &= ~DCACHE_NEED_LOOKUP; spin_unlock(&dentry->d_lock); } EXPORT_SYMBOL(d_clear_need_lookup); /* * Finish off a dentry we've decided to kill. * dentry->d_lock must be held, returns with it unlocked. * If ref is non-zero, then decrement the refcount too. * Returns dentry requiring refcount drop, or NULL if we're done. */ static inline struct dentry *dentry_kill(struct dentry *dentry, int ref) __releases(dentry->d_lock) { struct inode *inode; struct dentry *parent; inode = dentry->d_inode; if (inode && !spin_trylock(&inode->i_lock)) { relock: spin_unlock(&dentry->d_lock); cpu_relax(); return dentry; /* try again with same dentry */ } if (IS_ROOT(dentry)) parent = NULL; else parent = dentry->d_parent; if (parent && !spin_trylock(&parent->d_lock)) { if (inode) spin_unlock(&inode->i_lock); goto relock; } if (ref) dentry->d_count--; /* * if dentry was on the d_lru list delete it from there. * inform the fs via d_prune that this dentry is about to be * unhashed and destroyed. */ dentry_lru_prune(dentry); /* if it was on the hash then remove it */ __d_drop(dentry); return d_kill(dentry, parent); } /* * This is dput * * This is complicated by the fact that we do not want to put * dentries that are no longer on any hash chain on the unused * list: we'd much rather just get rid of them immediately. * * However, that implies that we have to traverse the dentry * tree upwards to the parents which might _also_ now be * scheduled for deletion (it may have been only waiting for * its last child to go away). * * This tail recursion is done by hand as we don't want to depend * on the compiler to always get this right (gcc generally doesn't). * Real recursion would eat up our stack space. */ /* * dput - release a dentry * @dentry: dentry to release * * Release a dentry. This will drop the usage count and if appropriate * call the dentry unlink method as well as removing it from the queues and * releasing its resources. If the parent dentries were scheduled for release * they too may now get deleted. */ void dput(struct dentry *dentry) { if (!dentry) return; repeat: if (dentry->d_count == 1) might_sleep(); spin_lock(&dentry->d_lock); BUG_ON(!dentry->d_count); if (dentry->d_count > 1) { dentry->d_count--; spin_unlock(&dentry->d_lock); return; } if (dentry->d_flags & DCACHE_OP_DELETE) { if (dentry->d_op->d_delete(dentry)) goto kill_it; } /* Unreachable? Get rid of it */ if (d_unhashed(dentry)) goto kill_it; /* * If this dentry needs lookup, don't set the referenced flag so that it * is more likely to be cleaned up by the dcache shrinker in case of * memory pressure. */ if (!d_need_lookup(dentry)) dentry->d_flags |= DCACHE_REFERENCED; dentry_lru_add(dentry); dentry->d_count--; spin_unlock(&dentry->d_lock); return; kill_it: dentry = dentry_kill(dentry, 1); if (dentry) goto repeat; } EXPORT_SYMBOL(dput); /** * d_invalidate - invalidate a dentry * @dentry: dentry to invalidate * * Try to invalidate the dentry if it turns out to be * possible. If there are other dentries that can be * reached through this one we can't delete it and we * return -EBUSY. On success we return 0. * * no dcache lock. */ int d_invalidate(struct dentry * dentry) { /* * If it's already been dropped, return OK. */ spin_lock(&dentry->d_lock); if (d_unhashed(dentry)) { spin_unlock(&dentry->d_lock); return 0; } /* * Check whether to do a partial shrink_dcache * to get rid of unused child entries. */ if (!list_empty(&dentry->d_subdirs)) { spin_unlock(&dentry->d_lock); shrink_dcache_parent(dentry); spin_lock(&dentry->d_lock); } /* * Somebody else still using it? * * If it's a directory, we can't drop it * for fear of somebody re-populating it * with children (even though dropping it * would make it unreachable from the root, * we might still populate it if it was a * working directory or similar). * We also need to leave mountpoints alone, * directory or not. */ if (dentry->d_count > 1 && dentry->d_inode) { if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) { spin_unlock(&dentry->d_lock); return -EBUSY; } } __d_drop(dentry); spin_unlock(&dentry->d_lock); return 0; } EXPORT_SYMBOL(d_invalidate); /* This must be called with d_lock held */ static inline void __dget_dlock(struct dentry *dentry) { dentry->d_count++; } static inline void __dget(struct dentry *dentry) { spin_lock(&dentry->d_lock); __dget_dlock(dentry); spin_unlock(&dentry->d_lock); } struct dentry *dget_parent(struct dentry *dentry) { struct dentry *ret; repeat: /* * Don't need rcu_dereference because we re-check it was correct under * the lock. */ rcu_read_lock(); ret = dentry->d_parent; spin_lock(&ret->d_lock); if (unlikely(ret != dentry->d_parent)) { spin_unlock(&ret->d_lock); rcu_read_unlock(); goto repeat; } rcu_read_unlock(); BUG_ON(!ret->d_count); ret->d_count++; spin_unlock(&ret->d_lock); return ret; } EXPORT_SYMBOL(dget_parent); /** * d_find_alias - grab a hashed alias of inode * @inode: inode in question * @want_discon: flag, used by d_splice_alias, to request * that only a DISCONNECTED alias be returned. * * If inode has a hashed alias, or is a directory and has any alias, * acquire the reference to alias and return it. Otherwise return NULL. * Notice that if inode is a directory there can be only one alias and * it can be unhashed only if it has no children, or if it is the root * of a filesystem. * * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer * any other hashed alias over that one unless @want_discon is set, * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. */ static struct dentry *__d_find_alias(struct inode *inode, int want_discon) { struct dentry *alias, *discon_alias; again: discon_alias = NULL; list_for_each_entry(alias, &inode->i_dentry, d_alias) { spin_lock(&alias->d_lock); if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { if (IS_ROOT(alias) && (alias->d_flags & DCACHE_DISCONNECTED)) { discon_alias = alias; } else if (!want_discon) { __dget_dlock(alias); spin_unlock(&alias->d_lock); return alias; } } spin_unlock(&alias->d_lock); } if (discon_alias) { alias = discon_alias; spin_lock(&alias->d_lock); if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { if (IS_ROOT(alias) && (alias->d_flags & DCACHE_DISCONNECTED)) { __dget_dlock(alias); spin_unlock(&alias->d_lock); return alias; } } spin_unlock(&alias->d_lock); goto again; } return NULL; } struct dentry *d_find_alias(struct inode *inode) { struct dentry *de = NULL; if (!list_empty(&inode->i_dentry)) { spin_lock(&inode->i_lock); de = __d_find_alias(inode, 0); spin_unlock(&inode->i_lock); } return de; } EXPORT_SYMBOL(d_find_alias); /* * Try to kill dentries associated with this inode. * WARNING: you must own a reference to inode. */ void d_prune_aliases(struct inode *inode) { struct dentry *dentry; restart: spin_lock(&inode->i_lock); list_for_each_entry(dentry, &inode->i_dentry, d_alias) { spin_lock(&dentry->d_lock); if (!dentry->d_count) { __dget_dlock(dentry); __d_drop(dentry); spin_unlock(&dentry->d_lock); spin_unlock(&inode->i_lock); dput(dentry); goto restart; } spin_unlock(&dentry->d_lock); } spin_unlock(&inode->i_lock); } EXPORT_SYMBOL(d_prune_aliases); /* * Try to throw away a dentry - free the inode, dput the parent. * Requires dentry->d_lock is held, and dentry->d_count == 0. * Releases dentry->d_lock. * * This may fail if locks cannot be acquired no problem, just try again. */ static void try_prune_one_dentry(struct dentry *dentry) __releases(dentry->d_lock) { struct dentry *parent; parent = dentry_kill(dentry, 0); /* * If dentry_kill returns NULL, we have nothing more to do. * if it returns the same dentry, trylocks failed. In either * case, just loop again. * * Otherwise, we need to prune ancestors too. This is necessary * to prevent quadratic behavior of shrink_dcache_parent(), but * is also expected to be beneficial in reducing dentry cache * fragmentation. */ if (!parent) return; if (parent == dentry) return; /* Prune ancestors. */ dentry = parent; while (dentry) { spin_lock(&dentry->d_lock); if (dentry->d_count > 1) { dentry->d_count--; spin_unlock(&dentry->d_lock); return; } dentry = dentry_kill(dentry, 1); } } static void shrink_dentry_list(struct list_head *list) { struct dentry *dentry; rcu_read_lock(); for (;;) { dentry = list_entry_rcu(list->prev, struct dentry, d_lru); if (&dentry->d_lru == list) break; /* empty */ spin_lock(&dentry->d_lock); if (dentry != list_entry(list->prev, struct dentry, d_lru)) { spin_unlock(&dentry->d_lock); continue; } /* * We found an inuse dentry which was not removed from * the LRU because of laziness during lookup. Do not free * it - just keep it off the LRU list. */ if (dentry->d_count) { dentry_lru_del(dentry); spin_unlock(&dentry->d_lock); continue; } rcu_read_unlock(); try_prune_one_dentry(dentry); rcu_read_lock(); } rcu_read_unlock(); } /** * prune_dcache_sb - shrink the dcache * @sb: superblock * @count: number of entries to try to free * * Attempt to shrink the superblock dcache LRU by @count entries. This is * done when we need more memory an called from the superblock shrinker * function. * * This function may fail to free any resources if all the dentries are in * use. */ void prune_dcache_sb(struct super_block *sb, int count) { struct dentry *dentry; LIST_HEAD(referenced); LIST_HEAD(tmp); relock: spin_lock(&dcache_lru_lock); while (!list_empty(&sb->s_dentry_lru)) { dentry = list_entry(sb->s_dentry_lru.prev, struct dentry, d_lru); BUG_ON(dentry->d_sb != sb); if (!spin_trylock(&dentry->d_lock)) { spin_unlock(&dcache_lru_lock); cpu_relax(); goto relock; } if (dentry->d_flags & DCACHE_REFERENCED) { dentry->d_flags &= ~DCACHE_REFERENCED; list_move(&dentry->d_lru, &referenced); spin_unlock(&dentry->d_lock); } else { list_move_tail(&dentry->d_lru, &tmp); dentry->d_flags |= DCACHE_SHRINK_LIST; spin_unlock(&dentry->d_lock); if (!--count) break; } cond_resched_lock(&dcache_lru_lock); } if (!list_empty(&referenced)) list_splice(&referenced, &sb->s_dentry_lru); spin_unlock(&dcache_lru_lock); shrink_dentry_list(&tmp); } /** * shrink_dcache_sb - shrink dcache for a superblock * @sb: superblock * * Shrink the dcache for the specified super block. This is used to free * the dcache before unmounting a file system. */ void shrink_dcache_sb(struct super_block *sb) { LIST_HEAD(tmp); spin_lock(&dcache_lru_lock); while (!list_empty(&sb->s_dentry_lru)) { list_splice_init(&sb->s_dentry_lru, &tmp); spin_unlock(&dcache_lru_lock); shrink_dentry_list(&tmp); spin_lock(&dcache_lru_lock); } spin_unlock(&dcache_lru_lock); } EXPORT_SYMBOL(shrink_dcache_sb); /* * destroy a single subtree of dentries for unmount * - see the comments on shrink_dcache_for_umount() for a description of the * locking */ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) { struct dentry *parent; BUG_ON(!IS_ROOT(dentry)); for (;;) { /* descend to the first leaf in the current subtree */ while (!list_empty(&dentry->d_subdirs)) dentry = list_entry(dentry->d_subdirs.next, struct dentry, d_u.d_child); /* consume the dentries from this leaf up through its parents * until we find one with children or run out altogether */ do { struct inode *inode; /* * remove the dentry from the lru, and inform * the fs that this dentry is about to be * unhashed and destroyed. */ dentry_lru_prune(dentry); __d_shrink(dentry); if (dentry->d_count != 0) { printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%s}" " still in use (%d)" " [unmount of %s %s]\n", dentry, dentry->d_inode ? dentry->d_inode->i_ino : 0UL, dentry->d_name.name, dentry->d_count, dentry->d_sb->s_type->name, dentry->d_sb->s_id); BUG(); } if (IS_ROOT(dentry)) { parent = NULL; list_del(&dentry->d_u.d_child); } else { parent = dentry->d_parent; parent->d_count--; list_del(&dentry->d_u.d_child); } inode = dentry->d_inode; if (inode) { dentry->d_inode = NULL; list_del_init(&dentry->d_alias); if (dentry->d_op && dentry->d_op->d_iput) dentry->d_op->d_iput(dentry, inode); else iput(inode); } d_free(dentry); /* finished when we fall off the top of the tree, * otherwise we ascend to the parent and move to the * next sibling if there is one */ if (!parent) return; dentry = parent; } while (list_empty(&dentry->d_subdirs)); dentry = list_entry(dentry->d_subdirs.next, struct dentry, d_u.d_child); } } /* * destroy the dentries attached to a superblock on unmounting * - we don't need to use dentry->d_lock because: * - the superblock is detached from all mountings and open files, so the * dentry trees will not be rearranged by the VFS * - s_umount is write-locked, so the memory pressure shrinker will ignore * any dentries belonging to this superblock that it comes across * - the filesystem itself is no longer permitted to rearrange the dentries * in this superblock */ void shrink_dcache_for_umount(struct super_block *sb) { struct dentry *dentry; if (down_read_trylock(&sb->s_umount)) BUG(); dentry = sb->s_root; sb->s_root = NULL; dentry->d_count--; shrink_dcache_for_umount_subtree(dentry); while (!hlist_bl_empty(&sb->s_anon)) { dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash); shrink_dcache_for_umount_subtree(dentry); } } /* * This tries to ascend one level of parenthood, but * we can race with renaming, so we need to re-check * the parenthood after dropping the lock and check * that the sequence number still matches. */ static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq) { struct dentry *new = old->d_parent; rcu_read_lock(); spin_unlock(&old->d_lock); spin_lock(&new->d_lock); /* * might go back up the wrong parent if we have had a rename * or deletion */ if (new != old->d_parent || (old->d_flags & DCACHE_DISCONNECTED) || (!locked && read_seqretry(&rename_lock, seq))) { spin_unlock(&new->d_lock); new = NULL; } rcu_read_unlock(); return new; } /* * Search for at least 1 mount point in the dentry's subdirs. * We descend to the next level whenever the d_subdirs * list is non-empty and continue searching. */ /** * have_submounts - check for mounts over a dentry * @parent: dentry to check. * * Return true if the parent or its subdirectories contain * a mount point */ int have_submounts(struct dentry *parent) { struct dentry *this_parent; struct list_head *next; unsigned seq; int locked = 0; seq = read_seqbegin(&rename_lock); again: this_parent = parent; if (d_mountpoint(parent)) goto positive; spin_lock(&this_parent->d_lock); repeat: next = this_parent->d_subdirs.next; resume: while (next != &this_parent->d_subdirs) { struct list_head *tmp = next; struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); next = tmp->next; spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); /* Have we found a mount point ? */ if (d_mountpoint(dentry)) { spin_unlock(&dentry->d_lock); spin_unlock(&this_parent->d_lock); goto positive; } if (!list_empty(&dentry->d_subdirs)) { spin_unlock(&this_parent->d_lock); spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); this_parent = dentry; spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); goto repeat; } spin_unlock(&dentry->d_lock); } /* * All done at this level ... ascend and resume the search. */ if (this_parent != parent) { struct dentry *child = this_parent; this_parent = try_to_ascend(this_parent, locked, seq); if (!this_parent) goto rename_retry; next = child->d_u.d_child.next; goto resume; } spin_unlock(&this_parent->d_lock); if (!locked && read_seqretry(&rename_lock, seq)) goto rename_retry; if (locked) write_sequnlock(&rename_lock); return 0; /* No mount points found in tree */ positive: if (!locked && read_seqretry(&rename_lock, seq)) goto rename_retry; if (locked) write_sequnlock(&rename_lock); return 1; rename_retry: locked = 1; write_seqlock(&rename_lock); goto again; } EXPORT_SYMBOL(have_submounts); /* * Search the dentry child list for the specified parent, * and move any unused dentries to the end of the unused * list for prune_dcache(). We descend to the next level * whenever the d_subdirs list is non-empty and continue * searching. * * It returns zero iff there are no unused children, * otherwise it returns the number of children moved to * the end of the unused list. This may not be the total * number of unused children, because select_parent can * drop the lock and return early due to latency * constraints. */ static int select_parent(struct dentry *parent, struct list_head *dispose) { struct dentry *this_parent; struct list_head *next; unsigned seq; int found = 0; int locked = 0; seq = read_seqbegin(&rename_lock); again: this_parent = parent; spin_lock(&this_parent->d_lock); repeat: next = this_parent->d_subdirs.next; resume: while (next != &this_parent->d_subdirs) { struct list_head *tmp = next; struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); next = tmp->next; spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); /* * move only zero ref count dentries to the dispose list. * * Those which are presently on the shrink list, being processed * by shrink_dentry_list(), shouldn't be moved. Otherwise the * loop in shrink_dcache_parent() might not make any progress * and loop forever. */ if (dentry->d_count) { dentry_lru_del(dentry); } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { dentry_lru_move_list(dentry, dispose); dentry->d_flags |= DCACHE_SHRINK_LIST; found++; } /* * We can return to the caller if we have found some (this * ensures forward progress). We'll be coming back to find * the rest. */ if (found && need_resched()) { spin_unlock(&dentry->d_lock); goto out; } /* * Descend a level if the d_subdirs list is non-empty. */ if (!list_empty(&dentry->d_subdirs)) { spin_unlock(&this_parent->d_lock); spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); this_parent = dentry; spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); goto repeat; } spin_unlock(&dentry->d_lock); } /* * All done at this level ... ascend and resume the search. */ if (this_parent != parent) { struct dentry *child = this_parent; this_parent = try_to_ascend(this_parent, locked, seq); if (!this_parent) goto rename_retry; next = child->d_u.d_child.next; goto resume; } out: spin_unlock(&this_parent->d_lock); if (!locked && read_seqretry(&rename_lock, seq)) goto rename_retry; if (locked) write_sequnlock(&rename_lock); return found; rename_retry: if (found) return found; locked = 1; write_seqlock(&rename_lock); goto again; } /** * shrink_dcache_parent - prune dcache * @parent: parent of entries to prune * * Prune the dcache to remove unused children of the parent dentry. */ void shrink_dcache_parent(struct dentry * parent) { LIST_HEAD(dispose); int found; while ((found = select_parent(parent, &dispose)) != 0) shrink_dentry_list(&dispose); } EXPORT_SYMBOL(shrink_dcache_parent); /** * __d_alloc - allocate a dcache entry * @sb: filesystem it will belong to * @name: qstr of the name * * Allocates a dentry. It returns %NULL if there is insufficient memory * available. On a success the dentry is returned. The name passed in is * copied and the copy passed in may be reused after this call. */ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) { struct dentry *dentry; char *dname; dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); if (!dentry) return NULL; if (name->len > DNAME_INLINE_LEN-1) { dname = kmalloc(name->len + 1, GFP_KERNEL); if (!dname) { kmem_cache_free(dentry_cache, dentry); return NULL; } } else { dname = dentry->d_iname; } dentry->d_name.name = dname; dentry->d_name.len = name->len; dentry->d_name.hash = name->hash; memcpy(dname, name->name, name->len); dname[name->len] = 0; dentry->d_count = 1; dentry->d_flags = 0; spin_lock_init(&dentry->d_lock); seqcount_init(&dentry->d_seq); dentry->d_inode = NULL; dentry->d_parent = dentry; dentry->d_sb = sb; dentry->d_op = NULL; dentry->d_fsdata = NULL; INIT_HLIST_BL_NODE(&dentry->d_hash); INIT_LIST_HEAD(&dentry->d_lru); INIT_LIST_HEAD(&dentry->d_subdirs); INIT_LIST_HEAD(&dentry->d_alias); INIT_LIST_HEAD(&dentry->d_u.d_child); d_set_d_op(dentry, dentry->d_sb->s_d_op); this_cpu_inc(nr_dentry); return dentry; } /** * d_alloc - allocate a dcache entry * @parent: parent of entry to allocate * @name: qstr of the name * * Allocates a dentry. It returns %NULL if there is insufficient memory * available. On a success the dentry is returned. The name passed in is * copied and the copy passed in may be reused after this call. */ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) { struct dentry *dentry = __d_alloc(parent->d_sb, name); if (!dentry) return NULL; spin_lock(&parent->d_lock); /* * don't need child lock because it is not subject * to concurrency here */ __dget_dlock(parent); dentry->d_parent = parent; list_add(&dentry->d_u.d_child, &parent->d_subdirs); spin_unlock(&parent->d_lock); return dentry; } EXPORT_SYMBOL(d_alloc); struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) { struct dentry *dentry = __d_alloc(sb, name); if (dentry) dentry->d_flags |= DCACHE_DISCONNECTED; return dentry; } EXPORT_SYMBOL(d_alloc_pseudo); struct dentry *d_alloc_name(struct dentry *parent, const char *name) { struct qstr q; q.name = name; q.len = strlen(name); q.hash = full_name_hash(q.name, q.len); return d_alloc(parent, &q); } EXPORT_SYMBOL(d_alloc_name); void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) { WARN_ON_ONCE(dentry->d_op); WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | DCACHE_OP_COMPARE | DCACHE_OP_REVALIDATE | DCACHE_OP_DELETE )); dentry->d_op = op; if (!op) return; if (op->d_hash) dentry->d_flags |= DCACHE_OP_HASH; if (op->d_compare) dentry->d_flags |= DCACHE_OP_COMPARE; if (op->d_revalidate) dentry->d_flags |= DCACHE_OP_REVALIDATE; if (op->d_delete) dentry->d_flags |= DCACHE_OP_DELETE; if (op->d_prune) dentry->d_flags |= DCACHE_OP_PRUNE; } EXPORT_SYMBOL(d_set_d_op); static void __d_instantiate(struct dentry *dentry, struct inode *inode) { spin_lock(&dentry->d_lock); if (inode) { if (unlikely(IS_AUTOMOUNT(inode))) dentry->d_flags |= DCACHE_NEED_AUTOMOUNT; list_add(&dentry->d_alias, &inode->i_dentry); } dentry->d_inode = inode; dentry_rcuwalk_barrier(dentry); spin_unlock(&dentry->d_lock); fsnotify_d_instantiate(dentry, inode); } /** * d_instantiate - fill in inode information for a dentry * @entry: dentry to complete * @inode: inode to attach to this dentry * * Fill in inode information in the entry. * * This turns negative dentries into productive full members * of society. * * NOTE! This assumes that the inode count has been incremented * (or otherwise set) by the caller to indicate that it is now * in use by the dcache. */ void d_instantiate(struct dentry *entry, struct inode * inode) { BUG_ON(!list_empty(&entry->d_alias)); if (inode) spin_lock(&inode->i_lock); __d_instantiate(entry, inode); if (inode) spin_unlock(&inode->i_lock); security_d_instantiate(entry, inode); } EXPORT_SYMBOL(d_instantiate); /** * d_instantiate_unique - instantiate a non-aliased dentry * @entry: dentry to instantiate * @inode: inode to attach to this dentry * * Fill in inode information in the entry. On success, it returns NULL. * If an unhashed alias of "entry" already exists, then we return the * aliased dentry instead and drop one reference to inode. * * Note that in order to avoid conflicts with rename() etc, the caller * had better be holding the parent directory semaphore. * * This also assumes that the inode count has been incremented * (or otherwise set) by the caller to indicate that it is now * in use by the dcache. */ static struct dentry *__d_instantiate_unique(struct dentry *entry, struct inode *inode) { struct dentry *alias; int len = entry->d_name.len; const char *name = entry->d_name.name; unsigned int hash = entry->d_name.hash; if (!inode) { __d_instantiate(entry, NULL); return NULL; } list_for_each_entry(alias, &inode->i_dentry, d_alias) { struct qstr *qstr = &alias->d_name; /* * Don't need alias->d_lock here, because aliases with * d_parent == entry->d_parent are not subject to name or * parent changes, because the parent inode i_mutex is held. */ if (qstr->hash != hash) continue; if (alias->d_parent != entry->d_parent) continue; if (dentry_cmp(qstr->name, qstr->len, name, len)) continue; __dget(alias); return alias; } __d_instantiate(entry, inode); return NULL; } struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) { struct dentry *result; BUG_ON(!list_empty(&entry->d_alias)); if (inode) spin_lock(&inode->i_lock); result = __d_instantiate_unique(entry, inode); if (inode) spin_unlock(&inode->i_lock); if (!result) { security_d_instantiate(entry, inode); return NULL; } BUG_ON(!d_unhashed(result)); iput(inode); return result; } EXPORT_SYMBOL(d_instantiate_unique); struct dentry *d_make_root(struct inode *root_inode) { struct dentry *res = NULL; if (root_inode) { static const struct qstr name = { .name = "/", .len = 1 }; res = __d_alloc(root_inode->i_sb, &name); if (res) d_instantiate(res, root_inode); else iput(root_inode); } return res; } EXPORT_SYMBOL(d_make_root); static struct dentry * __d_find_any_alias(struct inode *inode) { struct dentry *alias; if (list_empty(&inode->i_dentry)) return NULL; alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias); __dget(alias); return alias; } /** * d_find_any_alias - find any alias for a given inode * @inode: inode to find an alias for * * If any aliases exist for the given inode, take and return a * reference for one of them. If no aliases exist, return %NULL. */ struct dentry *d_find_any_alias(struct inode *inode) { struct dentry *de; spin_lock(&inode->i_lock); de = __d_find_any_alias(inode); spin_unlock(&inode->i_lock); return de; } EXPORT_SYMBOL(d_find_any_alias); /** * d_obtain_alias - find or allocate a dentry for a given inode * @inode: inode to allocate the dentry for * * Obtain a dentry for an inode resulting from NFS filehandle conversion or * similar open by handle operations. The returned dentry may be anonymous, * or may have a full name (if the inode was already in the cache). * * When called on a directory inode, we must ensure that the inode only ever * has one dentry. If a dentry is found, that is returned instead of * allocating a new one. * * On successful return, the reference to the inode has been transferred * to the dentry. In case of an error the reference on the inode is released. * To make it easier to use in export operations a %NULL or IS_ERR inode may * be passed in and will be the error will be propagate to the return value, * with a %NULL @inode replaced by ERR_PTR(-ESTALE). */ struct dentry *d_obtain_alias(struct inode *inode) { static const struct qstr anonstring = { .name = "" }; struct dentry *tmp; struct dentry *res; if (!inode) return ERR_PTR(-ESTALE); if (IS_ERR(inode)) return ERR_CAST(inode); res = d_find_any_alias(inode); if (res) goto out_iput; tmp = __d_alloc(inode->i_sb, &anonstring); if (!tmp) { res = ERR_PTR(-ENOMEM); goto out_iput; } spin_lock(&inode->i_lock); res = __d_find_any_alias(inode); if (res) { spin_unlock(&inode->i_lock); dput(tmp); goto out_iput; } /* attach a disconnected dentry */ spin_lock(&tmp->d_lock); tmp->d_inode = inode; tmp->d_flags |= DCACHE_DISCONNECTED; list_add(&tmp->d_alias, &inode->i_dentry); hlist_bl_lock(&tmp->d_sb->s_anon); hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); hlist_bl_unlock(&tmp->d_sb->s_anon); spin_unlock(&tmp->d_lock); spin_unlock(&inode->i_lock); security_d_instantiate(tmp, inode); return tmp; out_iput: if (res && !IS_ERR(res)) security_d_instantiate(res, inode); iput(inode); return res; } EXPORT_SYMBOL(d_obtain_alias); /** * d_splice_alias - splice a disconnected dentry into the tree if one exists * @inode: the inode which may have a disconnected dentry * @dentry: a negative dentry which we want to point to the inode. * * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and * DCACHE_DISCONNECTED), then d_move that in place of the given dentry * and return it, else simply d_add the inode to the dentry and return NULL. * * This is needed in the lookup routine of any filesystem that is exportable * (via knfsd) so that we can build dcache paths to directories effectively. * * If a dentry was found and moved, then it is returned. Otherwise NULL * is returned. This matches the expected return value of ->lookup. * */ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) { struct dentry *new = NULL; if (IS_ERR(inode)) return ERR_CAST(inode); if (inode && S_ISDIR(inode->i_mode)) { spin_lock(&inode->i_lock); new = __d_find_alias(inode, 1); if (new) { BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); spin_unlock(&inode->i_lock); security_d_instantiate(new, inode); d_move(new, dentry); iput(inode); } else { /* already taking inode->i_lock, so d_add() by hand */ __d_instantiate(dentry, inode); spin_unlock(&inode->i_lock); security_d_instantiate(dentry, inode); d_rehash(dentry); } } else d_add(dentry, inode); return new; } EXPORT_SYMBOL(d_splice_alias); /** * d_add_ci - lookup or allocate new dentry with case-exact name * @inode: the inode case-insensitive lookup has found * @dentry: the negative dentry that was passed to the parent's lookup func * @name: the case-exact name to be associated with the returned dentry * * This is to avoid filling the dcache with case-insensitive names to the * same inode, only the actual correct case is stored in the dcache for * case-insensitive filesystems. * * For a case-insensitive lookup match and if the the case-exact dentry * already exists in in the dcache, use it and return it. * * If no entry exists with the exact case name, allocate new dentry with * the exact case, and return the spliced entry. */ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, struct qstr *name) { int error; struct dentry *found; struct dentry *new; /* * First check if a dentry matching the name already exists, * if not go ahead and create it now. */ found = d_hash_and_lookup(dentry->d_parent, name); if (!found) { new = d_alloc(dentry->d_parent, name); if (!new) { error = -ENOMEM; goto err_out; } found = d_splice_alias(inode, new); if (found) { dput(new); return found; } return new; } /* * If a matching dentry exists, and it's not negative use it. * * Decrement the reference count to balance the iget() done * earlier on. */ if (found->d_inode) { if (unlikely(found->d_inode != inode)) { /* This can't happen because bad inodes are unhashed. */ BUG_ON(!is_bad_inode(inode)); BUG_ON(!is_bad_inode(found->d_inode)); } iput(inode); return found; } /* * We are going to instantiate this dentry, unhash it and clear the * lookup flag so we can do that. */ if (unlikely(d_need_lookup(found))) d_clear_need_lookup(found); /* * Negative dentry: instantiate it unless the inode is a directory and * already has a dentry. */ new = d_splice_alias(inode, found); if (new) { dput(found); found = new; } return found; err_out: iput(inode); return ERR_PTR(error); } EXPORT_SYMBOL(d_add_ci); /** * __d_lookup_rcu - search for a dentry (racy, store-free) * @parent: parent dentry * @name: qstr of name we wish to find * @seqp: returns d_seq value at the point where the dentry was found * @inode: returns dentry->d_inode when the inode was found valid. * Returns: dentry, or NULL * * __d_lookup_rcu is the dcache lookup function for rcu-walk name * resolution (store-free path walking) design described in * Documentation/filesystems/path-lookup.txt. * * This is not to be used outside core vfs. * * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock * held, and rcu_read_lock held. The returned dentry must not be stored into * without taking d_lock and checking d_seq sequence count against @seq * returned here. * * A refcount may be taken on the found dentry with the __d_rcu_to_refcount * function. * * Alternatively, __d_lookup_rcu may be called again to look up the child of * the returned dentry, so long as its parent's seqlock is checked after the * child is looked up. Thus, an interlocking stepping of sequence lock checks * is formed, giving integrity down the path walk. */ struct dentry *__d_lookup_rcu(const struct dentry *parent, const struct qstr *name, unsigned *seqp, struct inode **inode) { unsigned int len = name->len; unsigned int hash = name->hash; const unsigned char *str = name->name; struct hlist_bl_head *b = d_hash(parent, hash); struct hlist_bl_node *node; struct dentry *dentry; /* * Note: There is significant duplication with __d_lookup_rcu which is * required to prevent single threaded performance regressions * especially on architectures where smp_rmb (in seqcounts) are costly. * Keep the two functions in sync. */ /* * The hash list is protected using RCU. * * Carefully use d_seq when comparing a candidate dentry, to avoid * races with d_move(). * * It is possible that concurrent renames can mess up our list * walk here and result in missing our dentry, resulting in the * false-negative result. d_lookup() protects against concurrent * renames using rename_lock seqlock. * * See Documentation/filesystems/path-lookup.txt for more details. */ hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { unsigned seq; struct inode *i; const char *tname; int tlen; if (dentry->d_name.hash != hash) continue; seqretry: seq = read_seqcount_begin(&dentry->d_seq); if (dentry->d_parent != parent) continue; if (d_unhashed(dentry)) continue; tlen = dentry->d_name.len; tname = dentry->d_name.name; i = dentry->d_inode; prefetch(tname); /* * This seqcount check is required to ensure name and * len are loaded atomically, so as not to walk off the * edge of memory when walking. If we could load this * atomically some other way, we could drop this check. */ if (read_seqcount_retry(&dentry->d_seq, seq)) goto seqretry; if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { if (parent->d_op->d_compare(parent, *inode, dentry, i, tlen, tname, name)) continue; } else { if (dentry_cmp(tname, tlen, str, len)) continue; } /* * No extra seqcount check is required after the name * compare. The caller must perform a seqcount check in * order to do anything useful with the returned dentry * anyway. */ *seqp = seq; *inode = i; return dentry; } return NULL; } /** * d_lookup - search for a dentry * @parent: parent dentry * @name: qstr of name we wish to find * Returns: dentry, or NULL * * d_lookup searches the children of the parent dentry for the name in * question. If the dentry is found its reference count is incremented and the * dentry is returned. The caller must use dput to free the entry when it has * finished using it. %NULL is returned if the dentry does not exist. */ struct dentry *d_lookup(struct dentry *parent, struct qstr *name) { struct dentry *dentry; unsigned seq; do { seq = read_seqbegin(&rename_lock); dentry = __d_lookup(parent, name); if (dentry) break; } while (read_seqretry(&rename_lock, seq)); return dentry; } EXPORT_SYMBOL(d_lookup); /** * __d_lookup - search for a dentry (racy) * @parent: parent dentry * @name: qstr of name we wish to find * Returns: dentry, or NULL * * __d_lookup is like d_lookup, however it may (rarely) return a * false-negative result due to unrelated rename activity. * * __d_lookup is slightly faster by avoiding rename_lock read seqlock, * however it must be used carefully, eg. with a following d_lookup in * the case of failure. * * __d_lookup callers must be commented. */ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name) { unsigned int len = name->len; unsigned int hash = name->hash; const unsigned char *str = name->name; struct hlist_bl_head *b = d_hash(parent, hash); struct hlist_bl_node *node; struct dentry *found = NULL; struct dentry *dentry; /* * Note: There is significant duplication with __d_lookup_rcu which is * required to prevent single threaded performance regressions * especially on architectures where smp_rmb (in seqcounts) are costly. * Keep the two functions in sync. */ /* * The hash list is protected using RCU. * * Take d_lock when comparing a candidate dentry, to avoid races * with d_move(). * * It is possible that concurrent renames can mess up our list * walk here and result in missing our dentry, resulting in the * false-negative result. d_lookup() protects against concurrent * renames using rename_lock seqlock. * * See Documentation/filesystems/path-lookup.txt for more details. */ rcu_read_lock(); hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { const char *tname; int tlen; if (dentry->d_name.hash != hash) continue; spin_lock(&dentry->d_lock); if (dentry->d_parent != parent) goto next; if (d_unhashed(dentry)) goto next; /* * It is safe to compare names since d_move() cannot * change the qstr (protected by d_lock). */ tlen = dentry->d_name.len; tname = dentry->d_name.name; if (parent->d_flags & DCACHE_OP_COMPARE) { if (parent->d_op->d_compare(parent, parent->d_inode, dentry, dentry->d_inode, tlen, tname, name)) goto next; } else { if (dentry_cmp(tname, tlen, str, len)) goto next; } dentry->d_count++; found = dentry; spin_unlock(&dentry->d_lock); break; next: spin_unlock(&dentry->d_lock); } rcu_read_unlock(); return found; } /** * d_hash_and_lookup - hash the qstr then search for a dentry * @dir: Directory to search in * @name: qstr of name we wish to find * * On hash failure or on lookup failure NULL is returned. */ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) { struct dentry *dentry = NULL; /* * Check for a fs-specific hash function. Note that we must * calculate the standard hash first, as the d_op->d_hash() * routine may choose to leave the hash value unchanged. */ name->hash = full_name_hash(name->name, name->len); if (dir->d_flags & DCACHE_OP_HASH) { if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0) goto out; } dentry = d_lookup(dir, name); out: return dentry; } /** * d_validate - verify dentry provided from insecure source (deprecated) * @dentry: The dentry alleged to be valid child of @dparent * @dparent: The parent dentry (known to be valid) * * An insecure source has sent us a dentry, here we verify it and dget() it. * This is used by ncpfs in its readdir implementation. * Zero is returned in the dentry is invalid. * * This function is slow for big directories, and deprecated, do not use it. */ int d_validate(struct dentry *dentry, struct dentry *dparent) { struct dentry *child; spin_lock(&dparent->d_lock); list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) { if (dentry == child) { spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); __dget_dlock(dentry); spin_unlock(&dentry->d_lock); spin_unlock(&dparent->d_lock); return 1; } } spin_unlock(&dparent->d_lock); return 0; } EXPORT_SYMBOL(d_validate); /* * When a file is deleted, we have two options: * - turn this dentry into a negative dentry * - unhash this dentry and free it. * * Usually, we want to just turn this into * a negative dentry, but if anybody else is * currently using the dentry or the inode * we can't do that and we fall back on removing * it from the hash queues and waiting for * it to be deleted later when it has no users */ /** * d_delete - delete a dentry * @dentry: The dentry to delete * * Turn the dentry into a negative dentry if possible, otherwise * remove it from the hash queues so it can be deleted later */ void d_delete(struct dentry * dentry) { struct inode *inode; int isdir = 0; /* * Are we the only user? */ again: spin_lock(&dentry->d_lock); inode = dentry->d_inode; isdir = S_ISDIR(inode->i_mode); if (dentry->d_count == 1) { if (inode && !spin_trylock(&inode->i_lock)) { spin_unlock(&dentry->d_lock); cpu_relax(); goto again; } dentry->d_flags &= ~DCACHE_CANT_MOUNT; dentry_unlink_inode(dentry); fsnotify_nameremove(dentry, isdir); return; } if (!d_unhashed(dentry)) __d_drop(dentry); spin_unlock(&dentry->d_lock); fsnotify_nameremove(dentry, isdir); } EXPORT_SYMBOL(d_delete); static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) { BUG_ON(!d_unhashed(entry)); hlist_bl_lock(b); entry->d_flags |= DCACHE_RCUACCESS; hlist_bl_add_head_rcu(&entry->d_hash, b); hlist_bl_unlock(b); } static void _d_rehash(struct dentry * entry) { __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); } /** * d_rehash - add an entry back to the hash * @entry: dentry to add to the hash * * Adds a dentry to the hash according to its name. */ void d_rehash(struct dentry * entry) { spin_lock(&entry->d_lock); _d_rehash(entry); spin_unlock(&entry->d_lock); } EXPORT_SYMBOL(d_rehash); /** * dentry_update_name_case - update case insensitive dentry with a new name * @dentry: dentry to be updated * @name: new name * * Update a case insensitive dentry with new case of name. * * dentry must have been returned by d_lookup with name @name. Old and new * name lengths must match (ie. no d_compare which allows mismatched name * lengths). * * Parent inode i_mutex must be held over d_lookup and into this call (to * keep renames and concurrent inserts, and readdir(2) away). */ void dentry_update_name_case(struct dentry *dentry, struct qstr *name) { BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex)); BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ spin_lock(&dentry->d_lock); write_seqcount_begin(&dentry->d_seq); memcpy((unsigned char *)dentry->d_name.name, name->name, name->len); write_seqcount_end(&dentry->d_seq); spin_unlock(&dentry->d_lock); } EXPORT_SYMBOL(dentry_update_name_case); static void switch_names(struct dentry *dentry, struct dentry *target) { if (dname_external(target)) { if (dname_external(dentry)) { /* * Both external: swap the pointers */ swap(target->d_name.name, dentry->d_name.name); } else { /* * dentry:internal, target:external. Steal target's * storage and make target internal. */ memcpy(target->d_iname, dentry->d_name.name, dentry->d_name.len + 1); dentry->d_name.name = target->d_name.name; target->d_name.name = target->d_iname; } } else { if (dname_external(dentry)) { /* * dentry:external, target:internal. Give dentry's * storage to target and make dentry internal */ memcpy(dentry->d_iname, target->d_name.name, target->d_name.len + 1); target->d_name.name = dentry->d_name.name; dentry->d_name.name = dentry->d_iname; } else { /* * Both are internal. Just copy target to dentry */ memcpy(dentry->d_iname, target->d_name.name, target->d_name.len + 1); dentry->d_name.len = target->d_name.len; return; } } swap(dentry->d_name.len, target->d_name.len); } static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) { /* * XXXX: do we really need to take target->d_lock? */ if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent) spin_lock(&target->d_parent->d_lock); else { if (d_ancestor(dentry->d_parent, target->d_parent)) { spin_lock(&dentry->d_parent->d_lock); spin_lock_nested(&target->d_parent->d_lock, DENTRY_D_LOCK_NESTED); } else { spin_lock(&target->d_parent->d_lock); spin_lock_nested(&dentry->d_parent->d_lock, DENTRY_D_LOCK_NESTED); } } if (target < dentry) { spin_lock_nested(&target->d_lock, 2); spin_lock_nested(&dentry->d_lock, 3); } else { spin_lock_nested(&dentry->d_lock, 2); spin_lock_nested(&target->d_lock, 3); } } static void dentry_unlock_parents_for_move(struct dentry *dentry, struct dentry *target) { if (target->d_parent != dentry->d_parent) spin_unlock(&dentry->d_parent->d_lock); if (target->d_parent != target) spin_unlock(&target->d_parent->d_lock); } /* * When switching names, the actual string doesn't strictly have to * be preserved in the target - because we're dropping the target * anyway. As such, we can just do a simple memcpy() to copy over * the new name before we switch. * * Note that we have to be a lot more careful about getting the hash * switched - we have to switch the hash value properly even if it * then no longer matches the actual (corrupted) string of the target. * The hash value has to match the hash queue that the dentry is on.. */ /* * __d_move - move a dentry * @dentry: entry to move * @target: new dentry * * Update the dcache to reflect the move of a file name. Negative * dcache entries should not be moved in this way. Caller must hold * rename_lock, the i_mutex of the source and target directories, * and the sb->s_vfs_rename_mutex if they differ. See lock_rename(). */ static void __d_move(struct dentry * dentry, struct dentry * target) { if (!dentry->d_inode) printk(KERN_WARNING "VFS: moving negative dcache entry\n"); BUG_ON(d_ancestor(dentry, target)); BUG_ON(d_ancestor(target, dentry)); dentry_lock_for_move(dentry, target); write_seqcount_begin(&dentry->d_seq); write_seqcount_begin(&target->d_seq); /* __d_drop does write_seqcount_barrier, but they're OK to nest. */ /* * Move the dentry to the target hash queue. Don't bother checking * for the same hash queue because of how unlikely it is. */ __d_drop(dentry); __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash)); /* Unhash the target: dput() will then get rid of it */ __d_drop(target); list_del(&dentry->d_u.d_child); list_del(&target->d_u.d_child); /* Switch the names.. */ switch_names(dentry, target); swap(dentry->d_name.hash, target->d_name.hash); /* ... and switch the parents */ if (IS_ROOT(dentry)) { dentry->d_parent = target->d_parent; target->d_parent = target; INIT_LIST_HEAD(&target->d_u.d_child); } else { swap(dentry->d_parent, target->d_parent); /* And add them back to the (new) parent lists */ list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); } list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); write_seqcount_end(&target->d_seq); write_seqcount_end(&dentry->d_seq); dentry_unlock_parents_for_move(dentry, target); spin_unlock(&target->d_lock); fsnotify_d_move(dentry); spin_unlock(&dentry->d_lock); } /* * d_move - move a dentry * @dentry: entry to move * @target: new dentry * * Update the dcache to reflect the move of a file name. Negative * dcache entries should not be moved in this way. See the locking * requirements for __d_move. */ void d_move(struct dentry *dentry, struct dentry *target) { write_seqlock(&rename_lock); __d_move(dentry, target); write_sequnlock(&rename_lock); } EXPORT_SYMBOL(d_move); /** * d_ancestor - search for an ancestor * @p1: ancestor dentry * @p2: child dentry * * Returns the ancestor dentry of p2 which is a child of p1, if p1 is * an ancestor of p2, else NULL. */ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) { struct dentry *p; for (p = p2; !IS_ROOT(p); p = p->d_parent) { if (p->d_parent == p1) return p; } return NULL; } /* * This helper attempts to cope with remotely renamed directories * * It assumes that the caller is already holding * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock * * Note: If ever the locking in lock_rename() changes, then please * remember to update this too... */ static struct dentry *__d_unalias(struct inode *inode, struct dentry *dentry, struct dentry *alias) { struct mutex *m1 = NULL, *m2 = NULL; struct dentry *ret; /* If alias and dentry share a parent, then no extra locks required */ if (alias->d_parent == dentry->d_parent) goto out_unalias; /* See lock_rename() */ ret = ERR_PTR(-EBUSY); if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) goto out_err; m1 = &dentry->d_sb->s_vfs_rename_mutex; if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) goto out_err; m2 = &alias->d_parent->d_inode->i_mutex; out_unalias: __d_move(alias, dentry); ret = alias; out_err: spin_unlock(&inode->i_lock); if (m2) mutex_unlock(m2); if (m1) mutex_unlock(m1); return ret; } /* * Prepare an anonymous dentry for life in the superblock's dentry tree as a * named dentry in place of the dentry to be replaced. * returns with anon->d_lock held! */ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) { struct dentry *dparent, *aparent; dentry_lock_for_move(anon, dentry); write_seqcount_begin(&dentry->d_seq); write_seqcount_begin(&anon->d_seq); dparent = dentry->d_parent; aparent = anon->d_parent; switch_names(dentry, anon); swap(dentry->d_name.hash, anon->d_name.hash); dentry->d_parent = (aparent == anon) ? dentry : aparent; list_del(&dentry->d_u.d_child); if (!IS_ROOT(dentry)) list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); else INIT_LIST_HEAD(&dentry->d_u.d_child); anon->d_parent = (dparent == dentry) ? anon : dparent; list_del(&anon->d_u.d_child); if (!IS_ROOT(anon)) list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); else INIT_LIST_HEAD(&anon->d_u.d_child); write_seqcount_end(&dentry->d_seq); write_seqcount_end(&anon->d_seq); dentry_unlock_parents_for_move(anon, dentry); spin_unlock(&dentry->d_lock); /* anon->d_lock still locked, returns locked */ anon->d_flags &= ~DCACHE_DISCONNECTED; } /** * d_materialise_unique - introduce an inode into the tree * @dentry: candidate dentry * @inode: inode to bind to the dentry, to which aliases may be attached * * Introduces an dentry into the tree, substituting an extant disconnected * root directory alias in its place if there is one. Caller must hold the * i_mutex of the parent directory. */ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) { struct dentry *actual; BUG_ON(!d_unhashed(dentry)); if (!inode) { actual = dentry; __d_instantiate(dentry, NULL); d_rehash(actual); goto out_nolock; } spin_lock(&inode->i_lock); if (S_ISDIR(inode->i_mode)) { struct dentry *alias; /* Does an aliased dentry already exist? */ alias = __d_find_alias(inode, 0); if (alias) { actual = alias; write_seqlock(&rename_lock); if (d_ancestor(alias, dentry)) { /* Check for loops */ actual = ERR_PTR(-ELOOP); spin_unlock(&inode->i_lock); } else if (IS_ROOT(alias)) { /* Is this an anonymous mountpoint that we * could splice into our tree? */ __d_materialise_dentry(dentry, alias); write_sequnlock(&rename_lock); __d_drop(alias); goto found; } else { /* Nope, but we must(!) avoid directory * aliasing. This drops inode->i_lock */ actual = __d_unalias(inode, dentry, alias); } write_sequnlock(&rename_lock); if (IS_ERR(actual)) { if (PTR_ERR(actual) == -ELOOP) pr_warn_ratelimited( "VFS: Lookup of '%s' in %s %s" " would have caused loop\n", dentry->d_name.name, inode->i_sb->s_type->name, inode->i_sb->s_id); dput(alias); } goto out_nolock; } } /* Add a unique reference */ actual = __d_instantiate_unique(dentry, inode); if (!actual) actual = dentry; else BUG_ON(!d_unhashed(actual)); spin_lock(&actual->d_lock); found: _d_rehash(actual); spin_unlock(&actual->d_lock); spin_unlock(&inode->i_lock); out_nolock: if (actual == dentry) { security_d_instantiate(dentry, inode); return NULL; } iput(inode); return actual; } EXPORT_SYMBOL_GPL(d_materialise_unique); static int prepend(char **buffer, int *buflen, const char *str, int namelen) { *buflen -= namelen; if (*buflen < 0) return -ENAMETOOLONG; *buffer -= namelen; memcpy(*buffer, str, namelen); return 0; } static int prepend_name(char **buffer, int *buflen, struct qstr *name) { return prepend(buffer, buflen, name->name, name->len); } /** * prepend_path - Prepend path string to a buffer * @path: the dentry/vfsmount to report * @root: root vfsmnt/dentry * @buffer: pointer to the end of the buffer * @buflen: pointer to buffer length * * Caller holds the rename_lock. */ static int prepend_path(const struct path *path, const struct path *root, char **buffer, int *buflen) { struct dentry *dentry = path->dentry; struct vfsmount *vfsmnt = path->mnt; struct mount *mnt = real_mount(vfsmnt); bool slash = false; int error = 0; br_read_lock(vfsmount_lock); while (dentry != root->dentry || vfsmnt != root->mnt) { struct dentry * parent; if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { /* Global root? */ if (!mnt_has_parent(mnt)) goto global_root; dentry = mnt->mnt_mountpoint; mnt = mnt->mnt_parent; vfsmnt = &mnt->mnt; continue; } parent = dentry->d_parent; prefetch(parent); spin_lock(&dentry->d_lock); error = prepend_name(buffer, buflen, &dentry->d_name); spin_unlock(&dentry->d_lock); if (!error) error = prepend(buffer, buflen, "/", 1); if (error) break; slash = true; dentry = parent; } if (!error && !slash) error = prepend(buffer, buflen, "/", 1); out: br_read_unlock(vfsmount_lock); return error; global_root: /* * Filesystems needing to implement special "root names" * should do so with ->d_dname() */ if (IS_ROOT(dentry) && (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) { WARN(1, "Root dentry has weird name <%.*s>\n", (int) dentry->d_name.len, dentry->d_name.name); } if (!slash) error = prepend(buffer, buflen, "/", 1); if (!error) error = real_mount(vfsmnt)->mnt_ns ? 1 : 2; goto out; } /** * __d_path - return the path of a dentry * @path: the dentry/vfsmount to report * @root: root vfsmnt/dentry * @buf: buffer to return value in * @buflen: buffer length * * Convert a dentry into an ASCII path name. * * Returns a pointer into the buffer or an error code if the * path was too long. * * "buflen" should be positive. * * If the path is not reachable from the supplied root, return %NULL. */ char *__d_path(const struct path *path, const struct path *root, char *buf, int buflen) { char *res = buf + buflen; int error; prepend(&res, &buflen, "\0", 1); write_seqlock(&rename_lock); error = prepend_path(path, root, &res, &buflen); write_sequnlock(&rename_lock); if (error < 0) return ERR_PTR(error); if (error > 0) return NULL; return res; } char *d_absolute_path(const struct path *path, char *buf, int buflen) { struct path root = {}; char *res = buf + buflen; int error; prepend(&res, &buflen, "\0", 1); write_seqlock(&rename_lock); error = prepend_path(path, &root, &res, &buflen); write_sequnlock(&rename_lock); if (error > 1) error = -EINVAL; if (error < 0) return ERR_PTR(error); return res; } /* * same as __d_path but appends "(deleted)" for unlinked files. */ static int path_with_deleted(const struct path *path, const struct path *root, char **buf, int *buflen) { prepend(buf, buflen, "\0", 1); if (d_unlinked(path->dentry)) { int error = prepend(buf, buflen, " (deleted)", 10); if (error) return error; } return prepend_path(path, root, buf, buflen); } static int prepend_unreachable(char **buffer, int *buflen) { return prepend(buffer, buflen, "(unreachable)", 13); } /** * d_path - return the path of a dentry * @path: path to report * @buf: buffer to return value in * @buflen: buffer length * * Convert a dentry into an ASCII path name. If the entry has been deleted * the string " (deleted)" is appended. Note that this is ambiguous. * * Returns a pointer into the buffer or an error code if the path was * too long. Note: Callers should use the returned pointer, not the passed * in buffer, to use the name! The implementation often starts at an offset * into the buffer, and may leave 0 bytes at the start. * * "buflen" should be positive. */ char *d_path(const struct path *path, char *buf, int buflen) { char *res = buf + buflen; struct path root; int error; /* * We have various synthetic filesystems that never get mounted. On * these filesystems dentries are never used for lookup purposes, and * thus don't need to be hashed. They also don't need a name until a * user wants to identify the object in /proc/pid/fd/. The little hack * below allows us to generate a name for these objects on demand: */ if (path->dentry->d_op && path->dentry->d_op->d_dname) return path->dentry->d_op->d_dname(path->dentry, buf, buflen); get_fs_root(current->fs, &root); write_seqlock(&rename_lock); error = path_with_deleted(path, &root, &res, &buflen); if (error < 0) res = ERR_PTR(error); write_sequnlock(&rename_lock); path_put(&root); return res; } EXPORT_SYMBOL(d_path); /** * d_path_with_unreachable - return the path of a dentry * @path: path to report * @buf: buffer to return value in * @buflen: buffer length * * The difference from d_path() is that this prepends "(unreachable)" * to paths which are unreachable from the current process' root. */ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen) { char *res = buf + buflen; struct path root; int error; if (path->dentry->d_op && path->dentry->d_op->d_dname) return path->dentry->d_op->d_dname(path->dentry, buf, buflen); get_fs_root(current->fs, &root); write_seqlock(&rename_lock); error = path_with_deleted(path, &root, &res, &buflen); if (error > 0) error = prepend_unreachable(&res, &buflen); write_sequnlock(&rename_lock); path_put(&root); if (error) res = ERR_PTR(error); return res; } /* * Helper function for dentry_operations.d_dname() members */ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, const char *fmt, ...) { va_list args; char temp[64]; int sz; va_start(args, fmt); sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; va_end(args); if (sz > sizeof(temp) || sz > buflen) return ERR_PTR(-ENAMETOOLONG); buffer += buflen - sz; return memcpy(buffer, temp, sz); } /* * Write full pathname from the root of the filesystem into the buffer. */ static char *__dentry_path(struct dentry *dentry, char *buf, int buflen) { char *end = buf + buflen; char *retval; prepend(&end, &buflen, "\0", 1); if (buflen < 1) goto Elong; /* Get '/' right */ retval = end-1; *retval = '/'; while (!IS_ROOT(dentry)) { struct dentry *parent = dentry->d_parent; int error; prefetch(parent); spin_lock(&dentry->d_lock); error = prepend_name(&end, &buflen, &dentry->d_name); spin_unlock(&dentry->d_lock); if (error != 0 || prepend(&end, &buflen, "/", 1) != 0) goto Elong; retval = end; dentry = parent; } return retval; Elong: return ERR_PTR(-ENAMETOOLONG); } char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen) { char *retval; write_seqlock(&rename_lock); retval = __dentry_path(dentry, buf, buflen); write_sequnlock(&rename_lock); return retval; } EXPORT_SYMBOL(dentry_path_raw); char *dentry_path(struct dentry *dentry, char *buf, int buflen) { char *p = NULL; char *retval; write_seqlock(&rename_lock); if (d_unlinked(dentry)) { p = buf + buflen; if (prepend(&p, &buflen, "//deleted", 10) != 0) goto Elong; buflen++; } retval = __dentry_path(dentry, buf, buflen); write_sequnlock(&rename_lock); if (!IS_ERR(retval) && p) *p = '/'; /* restore '/' overriden with '\0' */ return retval; Elong: return ERR_PTR(-ENAMETOOLONG); } /* * NOTE! The user-level library version returns a * character pointer. The kernel system call just * returns the length of the buffer filled (which * includes the ending '\0' character), or a negative * error value. So libc would do something like * * char *getcwd(char * buf, size_t size) * { * int retval; * * retval = sys_getcwd(buf, size); * if (retval >= 0) * return buf; * errno = -retval; * return NULL; * } */ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) { int error; struct path pwd, root; char *page = (char *) __get_free_page(GFP_USER); if (!page) return -ENOMEM; get_fs_root_and_pwd(current->fs, &root, &pwd); error = -ENOENT; write_seqlock(&rename_lock); if (!d_unlinked(pwd.dentry)) { unsigned long len; char *cwd = page + PAGE_SIZE; int buflen = PAGE_SIZE; prepend(&cwd, &buflen, "\0", 1); error = prepend_path(&pwd, &root, &cwd, &buflen); write_sequnlock(&rename_lock); if (error < 0) goto out; /* Unreachable from current root */ if (error > 0) { error = prepend_unreachable(&cwd, &buflen); if (error) goto out; } error = -ERANGE; len = PAGE_SIZE + page - cwd; if (len <= size) { error = len; if (copy_to_user(buf, cwd, len)) error = -EFAULT; } } else { write_sequnlock(&rename_lock); } out: path_put(&pwd); path_put(&root); free_page((unsigned long) page); return error; } /* * Test whether new_dentry is a subdirectory of old_dentry. * * Trivially implemented using the dcache structure */ /** * is_subdir - is new dentry a subdirectory of old_dentry * @new_dentry: new dentry * @old_dentry: old dentry * * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). * Returns 0 otherwise. * Caller must ensure that "new_dentry" is pinned before calling is_subdir() */ int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) { int result; unsigned seq; if (new_dentry == old_dentry) return 1; do { /* for restarting inner loop in case of seq retry */ seq = read_seqbegin(&rename_lock); /* * Need rcu_readlock to protect against the d_parent trashing * due to d_move */ rcu_read_lock(); if (d_ancestor(old_dentry, new_dentry)) result = 1; else result = 0; rcu_read_unlock(); } while (read_seqretry(&rename_lock, seq)); return result; } void d_genocide(struct dentry *root) { struct dentry *this_parent; struct list_head *next; unsigned seq; int locked = 0; seq = read_seqbegin(&rename_lock); again: this_parent = root; spin_lock(&this_parent->d_lock); repeat: next = this_parent->d_subdirs.next; resume: while (next != &this_parent->d_subdirs) { struct list_head *tmp = next; struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); next = tmp->next; spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); if (d_unhashed(dentry) || !dentry->d_inode) { spin_unlock(&dentry->d_lock); continue; } if (!list_empty(&dentry->d_subdirs)) { spin_unlock(&this_parent->d_lock); spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); this_parent = dentry; spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); goto repeat; } if (!(dentry->d_flags & DCACHE_GENOCIDE)) { dentry->d_flags |= DCACHE_GENOCIDE; dentry->d_count--; } spin_unlock(&dentry->d_lock); } if (this_parent != root) { struct dentry *child = this_parent; if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { this_parent->d_flags |= DCACHE_GENOCIDE; this_parent->d_count--; } this_parent = try_to_ascend(this_parent, locked, seq); if (!this_parent) goto rename_retry; next = child->d_u.d_child.next; goto resume; } spin_unlock(&this_parent->d_lock); if (!locked && read_seqretry(&rename_lock, seq)) goto rename_retry; if (locked) write_sequnlock(&rename_lock); return; rename_retry: locked = 1; write_seqlock(&rename_lock); goto again; } /** * find_inode_number - check for dentry with name * @dir: directory to check * @name: Name to find. * * Check whether a dentry already exists for the given name, * and return the inode number if it has an inode. Otherwise * 0 is returned. * * This routine is used to post-process directory listings for * filesystems using synthetic inode numbers, and is necessary * to keep getcwd() working. */ ino_t find_inode_number(struct dentry *dir, struct qstr *name) { struct dentry * dentry; ino_t ino = 0; dentry = d_hash_and_lookup(dir, name); if (dentry) { if (dentry->d_inode) ino = dentry->d_inode->i_ino; dput(dentry); } return ino; } EXPORT_SYMBOL(find_inode_number); static __initdata unsigned long dhash_entries; static int __init set_dhash_entries(char *str) { if (!str) return 0; dhash_entries = simple_strtoul(str, &str, 0); return 1; } __setup("dhash_entries=", set_dhash_entries); static void __init dcache_init_early(void) { unsigned int loop; /* If hashes are distributed across NUMA nodes, defer * hash allocation until vmalloc space is available. */ if (hashdist) return; dentry_hashtable = alloc_large_system_hash("Dentry cache", sizeof(struct hlist_bl_head), dhash_entries, 13, HASH_EARLY, &d_hash_shift, &d_hash_mask, 0); for (loop = 0; loop < (1U << d_hash_shift); loop++) INIT_HLIST_BL_HEAD(dentry_hashtable + loop); } static void __init dcache_init(void) { unsigned int loop; /* * A constructor could be added for stable state like the lists, * but it is probably not worth it because of the cache nature * of the dcache. */ dentry_cache = KMEM_CACHE(dentry, SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); /* Hash may have been set up in dcache_init_early */ if (!hashdist) return; dentry_hashtable = alloc_large_system_hash("Dentry cache", sizeof(struct hlist_bl_head), dhash_entries, 13, 0, &d_hash_shift, &d_hash_mask, 0); for (loop = 0; loop < (1U << d_hash_shift); loop++) INIT_HLIST_BL_HEAD(dentry_hashtable + loop); } /* SLAB cache for __getname() consumers */ struct kmem_cache *names_cachep __read_mostly; EXPORT_SYMBOL(names_cachep); EXPORT_SYMBOL(d_genocide); void __init vfs_caches_init_early(void) { dcache_init_early(); inode_init_early(); } void __init vfs_caches_init(unsigned long mempages) { unsigned long reserve; /* Base hash sizes on available memory, with a reserve equal to 150% of current kernel size */ reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); mempages -= reserve; names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); dcache_init(); inode_init(); files_init(mempages); mnt_init(); bdev_cache_init(); chrdev_init(); }
gpl-2.0
lvming/linux
arch/sparc/kernel/power.c
3100
1531
/* power.c: Power management driver. * * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/reboot.h> #include <linux/of_device.h> #include <asm/prom.h> #include <asm/io.h> static void __iomem *power_reg; static irqreturn_t power_handler(int irq, void *dev_id) { orderly_poweroff(true); /* FIXME: Check registers for status... */ return IRQ_HANDLED; } static int has_button_interrupt(unsigned int irq, struct device_node *dp) { if (irq == 0xffffffff) return 0; if (!of_find_property(dp, "button", NULL)) return 0; return 1; } static int power_probe(struct platform_device *op) { struct resource *res = &op->resource[0]; unsigned int irq = op->archdata.irqs[0]; power_reg = of_ioremap(res, 0, 0x4, "power"); printk(KERN_INFO "%s: Control reg at %llx\n", op->dev.of_node->name, res->start); if (has_button_interrupt(irq, op->dev.of_node)) { if (request_irq(irq, power_handler, 0, "power", NULL) < 0) printk(KERN_ERR "power: Cannot setup IRQ handler.\n"); } return 0; } static const struct of_device_id power_match[] = { { .name = "power", }, {}, }; static struct platform_driver power_driver = { .probe = power_probe, .driver = { .name = "power", .owner = THIS_MODULE, .of_match_table = power_match, }, }; static int __init power_init(void) { return platform_driver_register(&power_driver); } device_initcall(power_init);
gpl-2.0
voidz777/android_kernel_htc_shooterk
drivers/power/jz4740-battery.c
3100
11527
/* * Battery measurement code for Ingenic JZ SOC. * * Copyright (C) 2009 Jiejing Zhang <kzjeef@gmail.com> * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> * * based on tosa_battery.c * * Copyright (C) 2008 Marek Vasut <marek.vasut@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/mfd/core.h> #include <linux/power_supply.h> #include <linux/power/jz4740-battery.h> #include <linux/jz4740-adc.h> struct jz_battery { struct jz_battery_platform_data *pdata; struct platform_device *pdev; struct resource *mem; void __iomem *base; int irq; int charge_irq; const struct mfd_cell *cell; int status; long voltage; struct completion read_completion; struct power_supply battery; struct delayed_work work; struct mutex lock; }; static inline struct jz_battery *psy_to_jz_battery(struct power_supply *psy) { return container_of(psy, struct jz_battery, battery); } static irqreturn_t jz_battery_irq_handler(int irq, void *devid) { struct jz_battery *battery = devid; complete(&battery->read_completion); return IRQ_HANDLED; } static long jz_battery_read_voltage(struct jz_battery *battery) { unsigned long t; unsigned long val; long voltage; mutex_lock(&battery->lock); INIT_COMPLETION(battery->read_completion); enable_irq(battery->irq); battery->cell->enable(battery->pdev); t = wait_for_completion_interruptible_timeout(&battery->read_completion, HZ); if (t > 0) { val = readw(battery->base) & 0xfff; if (battery->pdata->info.voltage_max_design <= 2500000) val = (val * 78125UL) >> 7UL; else val = ((val * 924375UL) >> 9UL) + 33000; voltage = (long)val; } else { voltage = t ? t : -ETIMEDOUT; } battery->cell->disable(battery->pdev); disable_irq(battery->irq); mutex_unlock(&battery->lock); return voltage; } static int jz_battery_get_capacity(struct power_supply *psy) { struct jz_battery *jz_battery = psy_to_jz_battery(psy); struct power_supply_info *info = &jz_battery->pdata->info; long voltage; int ret; int voltage_span; voltage = jz_battery_read_voltage(jz_battery); if (voltage < 0) return voltage; voltage_span = info->voltage_max_design - info->voltage_min_design; ret = ((voltage - info->voltage_min_design) * 100) / voltage_span; if (ret > 100) ret = 100; else if (ret < 0) ret = 0; return ret; } static int jz_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct jz_battery *jz_battery = psy_to_jz_battery(psy); struct power_supply_info *info = &jz_battery->pdata->info; long voltage; switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = jz_battery->status; break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = jz_battery->pdata->info.technology; break; case POWER_SUPPLY_PROP_HEALTH: voltage = jz_battery_read_voltage(jz_battery); if (voltage < info->voltage_min_design) val->intval = POWER_SUPPLY_HEALTH_DEAD; else val->intval = POWER_SUPPLY_HEALTH_GOOD; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = jz_battery_get_capacity(psy); break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = jz_battery_read_voltage(jz_battery); if (val->intval < 0) return val->intval; break; case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: val->intval = info->voltage_max_design; break; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: val->intval = info->voltage_min_design; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = 1; break; default: return -EINVAL; } return 0; } static void jz_battery_external_power_changed(struct power_supply *psy) { struct jz_battery *jz_battery = psy_to_jz_battery(psy); cancel_delayed_work(&jz_battery->work); schedule_delayed_work(&jz_battery->work, 0); } static irqreturn_t jz_battery_charge_irq(int irq, void *data) { struct jz_battery *jz_battery = data; cancel_delayed_work(&jz_battery->work); schedule_delayed_work(&jz_battery->work, 0); return IRQ_HANDLED; } static void jz_battery_update(struct jz_battery *jz_battery) { int status; long voltage; bool has_changed = false; int is_charging; if (gpio_is_valid(jz_battery->pdata->gpio_charge)) { is_charging = gpio_get_value(jz_battery->pdata->gpio_charge); is_charging ^= jz_battery->pdata->gpio_charge_active_low; if (is_charging) status = POWER_SUPPLY_STATUS_CHARGING; else status = POWER_SUPPLY_STATUS_NOT_CHARGING; if (status != jz_battery->status) { jz_battery->status = status; has_changed = true; } } voltage = jz_battery_read_voltage(jz_battery); if (abs(voltage - jz_battery->voltage) < 50000) { jz_battery->voltage = voltage; has_changed = true; } if (has_changed) power_supply_changed(&jz_battery->battery); } static enum power_supply_property jz_battery_properties[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_PRESENT, }; static void jz_battery_work(struct work_struct *work) { /* Too small interval will increase system workload */ const int interval = HZ * 30; struct jz_battery *jz_battery = container_of(work, struct jz_battery, work.work); jz_battery_update(jz_battery); schedule_delayed_work(&jz_battery->work, interval); } static int __devinit jz_battery_probe(struct platform_device *pdev) { int ret = 0; struct jz_battery_platform_data *pdata = pdev->dev.parent->platform_data; struct jz_battery *jz_battery; struct power_supply *battery; if (!pdata) { dev_err(&pdev->dev, "No platform_data supplied\n"); return -ENXIO; } jz_battery = kzalloc(sizeof(*jz_battery), GFP_KERNEL); if (!jz_battery) { dev_err(&pdev->dev, "Failed to allocate driver structure\n"); return -ENOMEM; } jz_battery->cell = mfd_get_cell(pdev); jz_battery->irq = platform_get_irq(pdev, 0); if (jz_battery->irq < 0) { ret = jz_battery->irq; dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret); goto err_free; } jz_battery->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!jz_battery->mem) { ret = -ENOENT; dev_err(&pdev->dev, "Failed to get platform mmio resource\n"); goto err_free; } jz_battery->mem = request_mem_region(jz_battery->mem->start, resource_size(jz_battery->mem), pdev->name); if (!jz_battery->mem) { ret = -EBUSY; dev_err(&pdev->dev, "Failed to request mmio memory region\n"); goto err_free; } jz_battery->base = ioremap_nocache(jz_battery->mem->start, resource_size(jz_battery->mem)); if (!jz_battery->base) { ret = -EBUSY; dev_err(&pdev->dev, "Failed to ioremap mmio memory\n"); goto err_release_mem_region; } battery = &jz_battery->battery; battery->name = pdata->info.name; battery->type = POWER_SUPPLY_TYPE_BATTERY; battery->properties = jz_battery_properties; battery->num_properties = ARRAY_SIZE(jz_battery_properties); battery->get_property = jz_battery_get_property; battery->external_power_changed = jz_battery_external_power_changed; battery->use_for_apm = 1; jz_battery->pdata = pdata; jz_battery->pdev = pdev; init_completion(&jz_battery->read_completion); mutex_init(&jz_battery->lock); INIT_DELAYED_WORK(&jz_battery->work, jz_battery_work); ret = request_irq(jz_battery->irq, jz_battery_irq_handler, 0, pdev->name, jz_battery); if (ret) { dev_err(&pdev->dev, "Failed to request irq %d\n", ret); goto err_iounmap; } disable_irq(jz_battery->irq); if (gpio_is_valid(pdata->gpio_charge)) { ret = gpio_request(pdata->gpio_charge, dev_name(&pdev->dev)); if (ret) { dev_err(&pdev->dev, "charger state gpio request failed.\n"); goto err_free_irq; } ret = gpio_direction_input(pdata->gpio_charge); if (ret) { dev_err(&pdev->dev, "charger state gpio set direction failed.\n"); goto err_free_gpio; } jz_battery->charge_irq = gpio_to_irq(pdata->gpio_charge); if (jz_battery->charge_irq >= 0) { ret = request_irq(jz_battery->charge_irq, jz_battery_charge_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), jz_battery); if (ret) { dev_err(&pdev->dev, "Failed to request charge irq: %d\n", ret); goto err_free_gpio; } } } else { jz_battery->charge_irq = -1; } if (jz_battery->pdata->info.voltage_max_design <= 2500000) jz4740_adc_set_config(pdev->dev.parent, JZ_ADC_CONFIG_BAT_MB, JZ_ADC_CONFIG_BAT_MB); else jz4740_adc_set_config(pdev->dev.parent, JZ_ADC_CONFIG_BAT_MB, 0); ret = power_supply_register(&pdev->dev, &jz_battery->battery); if (ret) { dev_err(&pdev->dev, "power supply battery register failed.\n"); goto err_free_charge_irq; } platform_set_drvdata(pdev, jz_battery); schedule_delayed_work(&jz_battery->work, 0); return 0; err_free_charge_irq: if (jz_battery->charge_irq >= 0) free_irq(jz_battery->charge_irq, jz_battery); err_free_gpio: if (gpio_is_valid(pdata->gpio_charge)) gpio_free(jz_battery->pdata->gpio_charge); err_free_irq: free_irq(jz_battery->irq, jz_battery); err_iounmap: platform_set_drvdata(pdev, NULL); iounmap(jz_battery->base); err_release_mem_region: release_mem_region(jz_battery->mem->start, resource_size(jz_battery->mem)); err_free: kfree(jz_battery); return ret; } static int __devexit jz_battery_remove(struct platform_device *pdev) { struct jz_battery *jz_battery = platform_get_drvdata(pdev); cancel_delayed_work_sync(&jz_battery->work); if (gpio_is_valid(jz_battery->pdata->gpio_charge)) { if (jz_battery->charge_irq >= 0) free_irq(jz_battery->charge_irq, jz_battery); gpio_free(jz_battery->pdata->gpio_charge); } power_supply_unregister(&jz_battery->battery); free_irq(jz_battery->irq, jz_battery); iounmap(jz_battery->base); release_mem_region(jz_battery->mem->start, resource_size(jz_battery->mem)); kfree(jz_battery); return 0; } #ifdef CONFIG_PM static int jz_battery_suspend(struct device *dev) { struct jz_battery *jz_battery = dev_get_drvdata(dev); cancel_delayed_work_sync(&jz_battery->work); jz_battery->status = POWER_SUPPLY_STATUS_UNKNOWN; return 0; } static int jz_battery_resume(struct device *dev) { struct jz_battery *jz_battery = dev_get_drvdata(dev); schedule_delayed_work(&jz_battery->work, 0); return 0; } static const struct dev_pm_ops jz_battery_pm_ops = { .suspend = jz_battery_suspend, .resume = jz_battery_resume, }; #define JZ_BATTERY_PM_OPS (&jz_battery_pm_ops) #else #define JZ_BATTERY_PM_OPS NULL #endif static struct platform_driver jz_battery_driver = { .probe = jz_battery_probe, .remove = __devexit_p(jz_battery_remove), .driver = { .name = "jz4740-battery", .owner = THIS_MODULE, .pm = JZ_BATTERY_PM_OPS, }, }; static int __init jz_battery_init(void) { return platform_driver_register(&jz_battery_driver); } module_init(jz_battery_init); static void __exit jz_battery_exit(void) { platform_driver_unregister(&jz_battery_driver); } module_exit(jz_battery_exit); MODULE_ALIAS("platform:jz4740-battery"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("JZ4740 SoC battery driver");
gpl-2.0
talnoah/Leaping_Lemur
drivers/media/dvb/frontends/dib0090.c
5148
76928
/* * Linux-DVB Driver for DiBcom's DiB0090 base-band RF Tuner. * * Copyright (C) 2005-9 DiBcom (http://www.dibcom.fr/) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * * This code is more or less generated from another driver, please * excuse some codingstyle oddities. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/mutex.h> #include "dvb_frontend.h" #include "dib0090.h" #include "dibx000_common.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "turn on debugging (default: 0)"); #define dprintk(args...) do { \ if (debug) { \ printk(KERN_DEBUG "DiB0090: "); \ printk(args); \ printk("\n"); \ } \ } while (0) #define CONFIG_SYS_DVBT #define CONFIG_SYS_ISDBT #define CONFIG_BAND_CBAND #define CONFIG_BAND_VHF #define CONFIG_BAND_UHF #define CONFIG_DIB0090_USE_PWM_AGC #define EN_LNA0 0x8000 #define EN_LNA1 0x4000 #define EN_LNA2 0x2000 #define EN_LNA3 0x1000 #define EN_MIX0 0x0800 #define EN_MIX1 0x0400 #define EN_MIX2 0x0200 #define EN_MIX3 0x0100 #define EN_IQADC 0x0040 #define EN_PLL 0x0020 #define EN_TX 0x0010 #define EN_BB 0x0008 #define EN_LO 0x0004 #define EN_BIAS 0x0001 #define EN_IQANA 0x0002 #define EN_DIGCLK 0x0080 /* not in the 0x24 reg, only in 0x1b */ #define EN_CRYSTAL 0x0002 #define EN_UHF 0x22E9 #define EN_VHF 0x44E9 #define EN_LBD 0x11E9 #define EN_SBD 0x44E9 #define EN_CAB 0x88E9 /* Calibration defines */ #define DC_CAL 0x1 #define WBD_CAL 0x2 #define TEMP_CAL 0x4 #define CAPTRIM_CAL 0x8 #define KROSUS_PLL_LOCKED 0x800 #define KROSUS 0x2 /* Use those defines to identify SOC version */ #define SOC 0x02 #define SOC_7090_P1G_11R1 0x82 #define SOC_7090_P1G_21R1 0x8a #define SOC_8090_P1G_11R1 0x86 #define SOC_8090_P1G_21R1 0x8e /* else use thos ones to check */ #define P1A_B 0x0 #define P1C 0x1 #define P1D_E_F 0x3 #define P1G 0x7 #define P1G_21R2 0xf #define MP001 0x1 /* Single 9090/8096 */ #define MP005 0x4 /* Single Sband */ #define MP008 0x6 /* Dual diversity VHF-UHF-LBAND */ #define MP009 0x7 /* Dual diversity 29098 CBAND-UHF-LBAND-SBAND */ #define pgm_read_word(w) (*w) struct dc_calibration; struct dib0090_tuning { u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */ u8 switch_trim; u8 lna_tune; u16 lna_bias; u16 v2i; u16 mix; u16 load; u16 tuner_enable; }; struct dib0090_pll { u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */ u8 vco_band; u8 hfdiv_code; u8 hfdiv; u8 topresc; }; struct dib0090_identity { u8 version; u8 product; u8 p1g; u8 in_soc; }; struct dib0090_state { struct i2c_adapter *i2c; struct dvb_frontend *fe; const struct dib0090_config *config; u8 current_band; enum frontend_tune_state tune_state; u32 current_rf; u16 wbd_offset; s16 wbd_target; /* in dB */ s16 rf_gain_limit; /* take-over-point: where to split between bb and rf gain */ s16 current_gain; /* keeps the currently programmed gain */ u8 agc_step; /* new binary search */ u16 gain[2]; /* for channel monitoring */ const u16 *rf_ramp; const u16 *bb_ramp; /* for the software AGC ramps */ u16 bb_1_def; u16 rf_lt_def; u16 gain_reg[4]; /* for the captrim/dc-offset search */ s8 step; s16 adc_diff; s16 min_adc_diff; s8 captrim; s8 fcaptrim; const struct dc_calibration *dc; u16 bb6, bb7; const struct dib0090_tuning *current_tune_table_index; const struct dib0090_pll *current_pll_table_index; u8 tuner_is_tuned; u8 agc_freeze; struct dib0090_identity identity; u32 rf_request; u8 current_standard; u8 calibrate; u32 rest; u16 bias; s16 temperature; u8 wbd_calibration_gain; const struct dib0090_wbd_slope *current_wbd_table; u16 wbdmux; /* for the I2C transfer */ struct i2c_msg msg[2]; u8 i2c_write_buffer[3]; u8 i2c_read_buffer[2]; struct mutex i2c_buffer_lock; }; struct dib0090_fw_state { struct i2c_adapter *i2c; struct dvb_frontend *fe; struct dib0090_identity identity; const struct dib0090_config *config; /* for the I2C transfer */ struct i2c_msg msg; u8 i2c_write_buffer[2]; u8 i2c_read_buffer[2]; struct mutex i2c_buffer_lock; }; static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg) { u16 ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return 0; } state->i2c_write_buffer[0] = reg; memset(state->msg, 0, 2 * sizeof(struct i2c_msg)); state->msg[0].addr = state->config->i2c_address; state->msg[0].flags = 0; state->msg[0].buf = state->i2c_write_buffer; state->msg[0].len = 1; state->msg[1].addr = state->config->i2c_address; state->msg[1].flags = I2C_M_RD; state->msg[1].buf = state->i2c_read_buffer; state->msg[1].len = 2; if (i2c_transfer(state->i2c, state->msg, 2) != 2) { printk(KERN_WARNING "DiB0090 I2C read failed\n"); ret = 0; } else ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1]; mutex_unlock(&state->i2c_buffer_lock); return ret; } static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val) { int ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return -EINVAL; } state->i2c_write_buffer[0] = reg & 0xff; state->i2c_write_buffer[1] = val >> 8; state->i2c_write_buffer[2] = val & 0xff; memset(state->msg, 0, sizeof(struct i2c_msg)); state->msg[0].addr = state->config->i2c_address; state->msg[0].flags = 0; state->msg[0].buf = state->i2c_write_buffer; state->msg[0].len = 3; if (i2c_transfer(state->i2c, state->msg, 1) != 1) { printk(KERN_WARNING "DiB0090 I2C write failed\n"); ret = -EREMOTEIO; } else ret = 0; mutex_unlock(&state->i2c_buffer_lock); return ret; } static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg) { u16 ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return 0; } state->i2c_write_buffer[0] = reg; memset(&state->msg, 0, sizeof(struct i2c_msg)); state->msg.addr = reg; state->msg.flags = I2C_M_RD; state->msg.buf = state->i2c_read_buffer; state->msg.len = 2; if (i2c_transfer(state->i2c, &state->msg, 1) != 1) { printk(KERN_WARNING "DiB0090 I2C read failed\n"); ret = 0; } else ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1]; mutex_unlock(&state->i2c_buffer_lock); return ret; } static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val) { int ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return -EINVAL; } state->i2c_write_buffer[0] = val >> 8; state->i2c_write_buffer[1] = val & 0xff; memset(&state->msg, 0, sizeof(struct i2c_msg)); state->msg.addr = reg; state->msg.flags = 0; state->msg.buf = state->i2c_write_buffer; state->msg.len = 2; if (i2c_transfer(state->i2c, &state->msg, 1) != 1) { printk(KERN_WARNING "DiB0090 I2C write failed\n"); ret = -EREMOTEIO; } else ret = 0; mutex_unlock(&state->i2c_buffer_lock); return ret; } #define HARD_RESET(state) do { if (cfg->reset) { if (cfg->sleep) cfg->sleep(fe, 0); msleep(10); cfg->reset(fe, 1); msleep(10); cfg->reset(fe, 0); msleep(10); } } while (0) #define ADC_TARGET -220 #define GAIN_ALPHA 5 #define WBD_ALPHA 6 #define LPF 100 static void dib0090_write_regs(struct dib0090_state *state, u8 r, const u16 * b, u8 c) { do { dib0090_write_reg(state, r++, *b++); } while (--c); } static int dib0090_identify(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; u16 v; struct dib0090_identity *identity = &state->identity; v = dib0090_read_reg(state, 0x1a); identity->p1g = 0; identity->in_soc = 0; dprintk("Tuner identification (Version = 0x%04x)", v); /* without PLL lock info */ v &= ~KROSUS_PLL_LOCKED; identity->version = v & 0xff; identity->product = (v >> 8) & 0xf; if (identity->product != KROSUS) goto identification_error; if ((identity->version & 0x3) == SOC) { identity->in_soc = 1; switch (identity->version) { case SOC_8090_P1G_11R1: dprintk("SOC 8090 P1-G11R1 Has been detected"); identity->p1g = 1; break; case SOC_8090_P1G_21R1: dprintk("SOC 8090 P1-G21R1 Has been detected"); identity->p1g = 1; break; case SOC_7090_P1G_11R1: dprintk("SOC 7090 P1-G11R1 Has been detected"); identity->p1g = 1; break; case SOC_7090_P1G_21R1: dprintk("SOC 7090 P1-G21R1 Has been detected"); identity->p1g = 1; break; default: goto identification_error; } } else { switch ((identity->version >> 5) & 0x7) { case MP001: dprintk("MP001 : 9090/8096"); break; case MP005: dprintk("MP005 : Single Sband"); break; case MP008: dprintk("MP008 : diversity VHF-UHF-LBAND"); break; case MP009: dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND"); break; default: goto identification_error; } switch (identity->version & 0x1f) { case P1G_21R2: dprintk("P1G_21R2 detected"); identity->p1g = 1; break; case P1G: dprintk("P1G detected"); identity->p1g = 1; break; case P1D_E_F: dprintk("P1D/E/F detected"); break; case P1C: dprintk("P1C detected"); break; case P1A_B: dprintk("P1-A/B detected: driver is deactivated - not available"); goto identification_error; break; default: goto identification_error; } } return 0; identification_error: return -EIO; } static int dib0090_fw_identify(struct dvb_frontend *fe) { struct dib0090_fw_state *state = fe->tuner_priv; struct dib0090_identity *identity = &state->identity; u16 v = dib0090_fw_read_reg(state, 0x1a); identity->p1g = 0; identity->in_soc = 0; dprintk("FE: Tuner identification (Version = 0x%04x)", v); /* without PLL lock info */ v &= ~KROSUS_PLL_LOCKED; identity->version = v & 0xff; identity->product = (v >> 8) & 0xf; if (identity->product != KROSUS) goto identification_error; if ((identity->version & 0x3) == SOC) { identity->in_soc = 1; switch (identity->version) { case SOC_8090_P1G_11R1: dprintk("SOC 8090 P1-G11R1 Has been detected"); identity->p1g = 1; break; case SOC_8090_P1G_21R1: dprintk("SOC 8090 P1-G21R1 Has been detected"); identity->p1g = 1; break; case SOC_7090_P1G_11R1: dprintk("SOC 7090 P1-G11R1 Has been detected"); identity->p1g = 1; break; case SOC_7090_P1G_21R1: dprintk("SOC 7090 P1-G21R1 Has been detected"); identity->p1g = 1; break; default: goto identification_error; } } else { switch ((identity->version >> 5) & 0x7) { case MP001: dprintk("MP001 : 9090/8096"); break; case MP005: dprintk("MP005 : Single Sband"); break; case MP008: dprintk("MP008 : diversity VHF-UHF-LBAND"); break; case MP009: dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND"); break; default: goto identification_error; } switch (identity->version & 0x1f) { case P1G_21R2: dprintk("P1G_21R2 detected"); identity->p1g = 1; break; case P1G: dprintk("P1G detected"); identity->p1g = 1; break; case P1D_E_F: dprintk("P1D/E/F detected"); break; case P1C: dprintk("P1C detected"); break; case P1A_B: dprintk("P1-A/B detected: driver is deactivated - not available"); goto identification_error; break; default: goto identification_error; } } return 0; identification_error: return -EIO; } static void dib0090_reset_digital(struct dvb_frontend *fe, const struct dib0090_config *cfg) { struct dib0090_state *state = fe->tuner_priv; u16 PllCfg, i, v; HARD_RESET(state); dib0090_write_reg(state, 0x24, EN_PLL | EN_CRYSTAL); dib0090_write_reg(state, 0x1b, EN_DIGCLK | EN_PLL | EN_CRYSTAL); /* PLL, DIG_CLK and CRYSTAL remain */ if (!cfg->in_soc) { /* adcClkOutRatio=8->7, release reset */ dib0090_write_reg(state, 0x20, ((cfg->io.adc_clock_ratio - 1) << 11) | (0 << 10) | (1 << 9) | (1 << 8) | (0 << 4) | 0); if (cfg->clkoutdrive != 0) dib0090_write_reg(state, 0x23, (0 << 15) | ((!cfg->analog_output) << 14) | (2 << 10) | (1 << 9) | (0 << 8) | (cfg->clkoutdrive << 5) | (cfg->clkouttobamse << 4) | (0 << 2) | (0)); else dib0090_write_reg(state, 0x23, (0 << 15) | ((!cfg->analog_output) << 14) | (2 << 10) | (1 << 9) | (0 << 8) | (7 << 5) | (cfg->clkouttobamse << 4) | (0 << 2) | (0)); } /* Read Pll current config * */ PllCfg = dib0090_read_reg(state, 0x21); /** Reconfigure PLL if current setting is different from default setting **/ if ((PllCfg & 0x1FFF) != ((cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv)) && (!cfg->in_soc) && !cfg->io.pll_bypass) { /* Set Bypass mode */ PllCfg |= (1 << 15); dib0090_write_reg(state, 0x21, PllCfg); /* Set Reset Pll */ PllCfg &= ~(1 << 13); dib0090_write_reg(state, 0x21, PllCfg); /*** Set new Pll configuration in bypass and reset state ***/ PllCfg = (1 << 15) | (0 << 13) | (cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv); dib0090_write_reg(state, 0x21, PllCfg); /* Remove Reset Pll */ PllCfg |= (1 << 13); dib0090_write_reg(state, 0x21, PllCfg); /*** Wait for PLL lock ***/ i = 100; do { v = !!(dib0090_read_reg(state, 0x1a) & 0x800); if (v) break; } while (--i); if (i == 0) { dprintk("Pll: Unable to lock Pll"); return; } /* Finally Remove Bypass mode */ PllCfg &= ~(1 << 15); dib0090_write_reg(state, 0x21, PllCfg); } if (cfg->io.pll_bypass) { PllCfg |= (cfg->io.pll_bypass << 15); dib0090_write_reg(state, 0x21, PllCfg); } } static int dib0090_fw_reset_digital(struct dvb_frontend *fe, const struct dib0090_config *cfg) { struct dib0090_fw_state *state = fe->tuner_priv; u16 PllCfg; u16 v; int i; dprintk("fw reset digital"); HARD_RESET(state); dib0090_fw_write_reg(state, 0x24, EN_PLL | EN_CRYSTAL); dib0090_fw_write_reg(state, 0x1b, EN_DIGCLK | EN_PLL | EN_CRYSTAL); /* PLL, DIG_CLK and CRYSTAL remain */ dib0090_fw_write_reg(state, 0x20, ((cfg->io.adc_clock_ratio - 1) << 11) | (0 << 10) | (1 << 9) | (1 << 8) | (cfg->data_tx_drv << 4) | cfg->ls_cfg_pad_drv); v = (0 << 15) | ((!cfg->analog_output) << 14) | (1 << 9) | (0 << 8) | (cfg->clkouttobamse << 4) | (0 << 2) | (0); if (cfg->clkoutdrive != 0) v |= cfg->clkoutdrive << 5; else v |= 7 << 5; v |= 2 << 10; dib0090_fw_write_reg(state, 0x23, v); /* Read Pll current config * */ PllCfg = dib0090_fw_read_reg(state, 0x21); /** Reconfigure PLL if current setting is different from default setting **/ if ((PllCfg & 0x1FFF) != ((cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv)) && !cfg->io.pll_bypass) { /* Set Bypass mode */ PllCfg |= (1 << 15); dib0090_fw_write_reg(state, 0x21, PllCfg); /* Set Reset Pll */ PllCfg &= ~(1 << 13); dib0090_fw_write_reg(state, 0x21, PllCfg); /*** Set new Pll configuration in bypass and reset state ***/ PllCfg = (1 << 15) | (0 << 13) | (cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv); dib0090_fw_write_reg(state, 0x21, PllCfg); /* Remove Reset Pll */ PllCfg |= (1 << 13); dib0090_fw_write_reg(state, 0x21, PllCfg); /*** Wait for PLL lock ***/ i = 100; do { v = !!(dib0090_fw_read_reg(state, 0x1a) & 0x800); if (v) break; } while (--i); if (i == 0) { dprintk("Pll: Unable to lock Pll"); return -EIO; } /* Finally Remove Bypass mode */ PllCfg &= ~(1 << 15); dib0090_fw_write_reg(state, 0x21, PllCfg); } if (cfg->io.pll_bypass) { PllCfg |= (cfg->io.pll_bypass << 15); dib0090_fw_write_reg(state, 0x21, PllCfg); } return dib0090_fw_identify(fe); } static int dib0090_wakeup(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; if (state->config->sleep) state->config->sleep(fe, 0); /* enable dataTX in case we have been restarted in the wrong moment */ dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) | (1 << 14)); return 0; } static int dib0090_sleep(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; if (state->config->sleep) state->config->sleep(fe, 1); return 0; } void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast) { struct dib0090_state *state = fe->tuner_priv; if (fast) dib0090_write_reg(state, 0x04, 0); else dib0090_write_reg(state, 0x04, 1); } EXPORT_SYMBOL(dib0090_dcc_freq); static const u16 bb_ramp_pwm_normal_socs[] = { 550, /* max BB gain in 10th of dB */ (1 << 9) | 8, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> BB_RAMP2 */ 440, (4 << 9) | 0, /* BB_RAMP3 = 26dB */ (0 << 9) | 208, /* BB_RAMP4 */ (4 << 9) | 208, /* BB_RAMP5 = 29dB */ (0 << 9) | 440, /* BB_RAMP6 */ }; static const u16 rf_ramp_pwm_cband_7090[] = { 280, /* max RF gain in 10th of dB */ 18, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 504, /* ramp_max = maximum X used on the ramp */ (29 << 10) | 364, /* RF_RAMP5, LNA 1 = 8dB */ (0 << 10) | 504, /* RF_RAMP6, LNA 1 */ (60 << 10) | 228, /* RF_RAMP7, LNA 2 = 7.7dB */ (0 << 10) | 364, /* RF_RAMP8, LNA 2 */ (34 << 10) | 109, /* GAIN_4_1, LNA 3 = 6.8dB */ (0 << 10) | 228, /* GAIN_4_2, LNA 3 */ (37 << 10) | 0, /* RF_RAMP3, LNA 4 = 6.2dB */ (0 << 10) | 109, /* RF_RAMP4, LNA 4 */ }; static const uint16_t rf_ramp_pwm_cband_7090e_sensitivity[] = { 186, 40, 746, (10 << 10) | 345, (0 << 10) | 746, (0 << 10) | 0, (0 << 10) | 0, (28 << 10) | 200, (0 << 10) | 345, (20 << 10) | 0, (0 << 10) | 200, }; static const uint16_t rf_ramp_pwm_cband_7090e_aci[] = { 86, 40, 345, (0 << 10) | 0, (0 << 10) | 0, (0 << 10) | 0, (0 << 10) | 0, (28 << 10) | 200, (0 << 10) | 345, (20 << 10) | 0, (0 << 10) | 200, }; static const u16 rf_ramp_pwm_cband_8090[] = { 345, /* max RF gain in 10th of dB */ 29, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 1000, /* ramp_max = maximum X used on the ramp */ (35 << 10) | 772, /* RF_RAMP3, LNA 1 = 8dB */ (0 << 10) | 1000, /* RF_RAMP4, LNA 1 */ (58 << 10) | 496, /* RF_RAMP5, LNA 2 = 9.5dB */ (0 << 10) | 772, /* RF_RAMP6, LNA 2 */ (27 << 10) | 200, /* RF_RAMP7, LNA 3 = 10.5dB */ (0 << 10) | 496, /* RF_RAMP8, LNA 3 */ (40 << 10) | 0, /* GAIN_4_1, LNA 4 = 7dB */ (0 << 10) | 200, /* GAIN_4_2, LNA 4 */ }; static const u16 rf_ramp_pwm_uhf_7090[] = { 407, /* max RF gain in 10th of dB */ 13, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 529, /* ramp_max = maximum X used on the ramp */ (23 << 10) | 0, /* RF_RAMP3, LNA 1 = 14.7dB */ (0 << 10) | 176, /* RF_RAMP4, LNA 1 */ (63 << 10) | 400, /* RF_RAMP5, LNA 2 = 8dB */ (0 << 10) | 529, /* RF_RAMP6, LNA 2 */ (48 << 10) | 316, /* RF_RAMP7, LNA 3 = 6.8dB */ (0 << 10) | 400, /* RF_RAMP8, LNA 3 */ (29 << 10) | 176, /* GAIN_4_1, LNA 4 = 11.5dB */ (0 << 10) | 316, /* GAIN_4_2, LNA 4 */ }; static const u16 rf_ramp_pwm_uhf_8090[] = { 388, /* max RF gain in 10th of dB */ 26, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 1008, /* ramp_max = maximum X used on the ramp */ (11 << 10) | 0, /* RF_RAMP3, LNA 1 = 14.7dB */ (0 << 10) | 369, /* RF_RAMP4, LNA 1 */ (41 << 10) | 809, /* RF_RAMP5, LNA 2 = 8dB */ (0 << 10) | 1008, /* RF_RAMP6, LNA 2 */ (27 << 10) | 659, /* RF_RAMP7, LNA 3 = 6dB */ (0 << 10) | 809, /* RF_RAMP8, LNA 3 */ (14 << 10) | 369, /* GAIN_4_1, LNA 4 = 11.5dB */ (0 << 10) | 659, /* GAIN_4_2, LNA 4 */ }; static const u16 rf_ramp_pwm_cband[] = { 0, /* max RF gain in 10th of dB */ 0, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x2b */ 0, /* ramp_max = maximum X used on the ramp */ (0 << 10) | 0, /* 0x2c, LNA 1 = 0dB */ (0 << 10) | 0, /* 0x2d, LNA 1 */ (0 << 10) | 0, /* 0x2e, LNA 2 = 0dB */ (0 << 10) | 0, /* 0x2f, LNA 2 */ (0 << 10) | 0, /* 0x30, LNA 3 = 0dB */ (0 << 10) | 0, /* 0x31, LNA 3 */ (0 << 10) | 0, /* GAIN_4_1, LNA 4 = 0dB */ (0 << 10) | 0, /* GAIN_4_2, LNA 4 */ }; static const u16 rf_ramp_vhf[] = { 412, /* max RF gain in 10th of dB */ 132, 307, 127, /* LNA1, 13.2dB */ 105, 412, 255, /* LNA2, 10.5dB */ 50, 50, 127, /* LNA3, 5dB */ 125, 175, 127, /* LNA4, 12.5dB */ 0, 0, 127, /* CBAND, 0dB */ }; static const u16 rf_ramp_uhf[] = { 412, /* max RF gain in 10th of dB */ 132, 307, 127, /* LNA1 : total gain = 13.2dB, point on the ramp where this amp is full gain, value to write to get full gain */ 105, 412, 255, /* LNA2 : 10.5 dB */ 50, 50, 127, /* LNA3 : 5.0 dB */ 125, 175, 127, /* LNA4 : 12.5 dB */ 0, 0, 127, /* CBAND : 0.0 dB */ }; static const u16 rf_ramp_cband_broadmatching[] = /* for p1G only */ { 314, /* Calibrated at 200MHz order has been changed g4-g3-g2-g1 */ 84, 314, 127, /* LNA1 */ 80, 230, 255, /* LNA2 */ 80, 150, 127, /* LNA3 It was measured 12dB, do not lock if 120 */ 70, 70, 127, /* LNA4 */ 0, 0, 127, /* CBAND */ }; static const u16 rf_ramp_cband[] = { 332, /* max RF gain in 10th of dB */ 132, 252, 127, /* LNA1, dB */ 80, 332, 255, /* LNA2, dB */ 0, 0, 127, /* LNA3, dB */ 0, 0, 127, /* LNA4, dB */ 120, 120, 127, /* LT1 CBAND */ }; static const u16 rf_ramp_pwm_vhf[] = { 404, /* max RF gain in 10th of dB */ 25, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x2b */ 1011, /* ramp_max = maximum X used on the ramp */ (6 << 10) | 417, /* 0x2c, LNA 1 = 13.2dB */ (0 << 10) | 756, /* 0x2d, LNA 1 */ (16 << 10) | 756, /* 0x2e, LNA 2 = 10.5dB */ (0 << 10) | 1011, /* 0x2f, LNA 2 */ (16 << 10) | 290, /* 0x30, LNA 3 = 5dB */ (0 << 10) | 417, /* 0x31, LNA 3 */ (7 << 10) | 0, /* GAIN_4_1, LNA 4 = 12.5dB */ (0 << 10) | 290, /* GAIN_4_2, LNA 4 */ }; static const u16 rf_ramp_pwm_uhf[] = { 404, /* max RF gain in 10th of dB */ 25, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x2b */ 1011, /* ramp_max = maximum X used on the ramp */ (6 << 10) | 417, /* 0x2c, LNA 1 = 13.2dB */ (0 << 10) | 756, /* 0x2d, LNA 1 */ (16 << 10) | 756, /* 0x2e, LNA 2 = 10.5dB */ (0 << 10) | 1011, /* 0x2f, LNA 2 */ (16 << 10) | 0, /* 0x30, LNA 3 = 5dB */ (0 << 10) | 127, /* 0x31, LNA 3 */ (7 << 10) | 127, /* GAIN_4_1, LNA 4 = 12.5dB */ (0 << 10) | 417, /* GAIN_4_2, LNA 4 */ }; static const u16 bb_ramp_boost[] = { 550, /* max BB gain in 10th of dB */ 260, 260, 26, /* BB1, 26dB */ 290, 550, 29, /* BB2, 29dB */ }; static const u16 bb_ramp_pwm_normal[] = { 500, /* max RF gain in 10th of dB */ 8, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x34 */ 400, (2 << 9) | 0, /* 0x35 = 21dB */ (0 << 9) | 168, /* 0x36 */ (2 << 9) | 168, /* 0x37 = 29dB */ (0 << 9) | 400, /* 0x38 */ }; struct slope { s16 range; s16 slope; }; static u16 slopes_to_scale(const struct slope *slopes, u8 num, s16 val) { u8 i; u16 rest; u16 ret = 0; for (i = 0; i < num; i++) { if (val > slopes[i].range) rest = slopes[i].range; else rest = val; ret += (rest * slopes[i].slope) / slopes[i].range; val -= rest; } return ret; } static const struct slope dib0090_wbd_slopes[3] = { {66, 120}, /* -64,-52: offset - 65 */ {600, 170}, /* -52,-35: 65 - 665 */ {170, 250}, /* -45,-10: 665 - 835 */ }; static s16 dib0090_wbd_to_db(struct dib0090_state *state, u16 wbd) { wbd &= 0x3ff; if (wbd < state->wbd_offset) wbd = 0; else wbd -= state->wbd_offset; /* -64dB is the floor */ return -640 + (s16) slopes_to_scale(dib0090_wbd_slopes, ARRAY_SIZE(dib0090_wbd_slopes), wbd); } static void dib0090_wbd_target(struct dib0090_state *state, u32 rf) { u16 offset = 250; /* TODO : DAB digital N+/-1 interferer perfs : offset = 10 */ if (state->current_band == BAND_VHF) offset = 650; #ifndef FIRMWARE_FIREFLY if (state->current_band == BAND_VHF) offset = state->config->wbd_vhf_offset; if (state->current_band == BAND_CBAND) offset = state->config->wbd_cband_offset; #endif state->wbd_target = dib0090_wbd_to_db(state, state->wbd_offset + offset); dprintk("wbd-target: %d dB", (u32) state->wbd_target); } static const int gain_reg_addr[4] = { 0x08, 0x0a, 0x0f, 0x01 }; static void dib0090_gain_apply(struct dib0090_state *state, s16 gain_delta, s16 top_delta, u8 force) { u16 rf, bb, ref; u16 i, v, gain_reg[4] = { 0 }, gain; const u16 *g; if (top_delta < -511) top_delta = -511; if (top_delta > 511) top_delta = 511; if (force) { top_delta *= (1 << WBD_ALPHA); gain_delta *= (1 << GAIN_ALPHA); } if (top_delta >= ((s16) (state->rf_ramp[0] << WBD_ALPHA) - state->rf_gain_limit)) /* overflow */ state->rf_gain_limit = state->rf_ramp[0] << WBD_ALPHA; else state->rf_gain_limit += top_delta; if (state->rf_gain_limit < 0) /*underflow */ state->rf_gain_limit = 0; /* use gain as a temporary variable and correct current_gain */ gain = ((state->rf_gain_limit >> WBD_ALPHA) + state->bb_ramp[0]) << GAIN_ALPHA; if (gain_delta >= ((s16) gain - state->current_gain)) /* overflow */ state->current_gain = gain; else state->current_gain += gain_delta; /* cannot be less than 0 (only if gain_delta is less than 0 we can have current_gain < 0) */ if (state->current_gain < 0) state->current_gain = 0; /* now split total gain to rf and bb gain */ gain = state->current_gain >> GAIN_ALPHA; /* requested gain is bigger than rf gain limit - ACI/WBD adjustment */ if (gain > (state->rf_gain_limit >> WBD_ALPHA)) { rf = state->rf_gain_limit >> WBD_ALPHA; bb = gain - rf; if (bb > state->bb_ramp[0]) bb = state->bb_ramp[0]; } else { /* high signal level -> all gains put on RF */ rf = gain; bb = 0; } state->gain[0] = rf; state->gain[1] = bb; /* software ramp */ /* Start with RF gains */ g = state->rf_ramp + 1; /* point on RF LNA1 max gain */ ref = rf; for (i = 0; i < 7; i++) { /* Go over all amplifiers => 5RF amps + 2 BB amps = 7 amps */ if (g[0] == 0 || ref < (g[1] - g[0])) /* if total gain of the current amp is null or this amp is not concerned because it starts to work from an higher gain value */ v = 0; /* force the gain to write for the current amp to be null */ else if (ref >= g[1]) /* Gain to set is higher than the high working point of this amp */ v = g[2]; /* force this amp to be full gain */ else /* compute the value to set to this amp because we are somewhere in his range */ v = ((ref - (g[1] - g[0])) * g[2]) / g[0]; if (i == 0) /* LNA 1 reg mapping */ gain_reg[0] = v; else if (i == 1) /* LNA 2 reg mapping */ gain_reg[0] |= v << 7; else if (i == 2) /* LNA 3 reg mapping */ gain_reg[1] = v; else if (i == 3) /* LNA 4 reg mapping */ gain_reg[1] |= v << 7; else if (i == 4) /* CBAND LNA reg mapping */ gain_reg[2] = v | state->rf_lt_def; else if (i == 5) /* BB gain 1 reg mapping */ gain_reg[3] = v << 3; else if (i == 6) /* BB gain 2 reg mapping */ gain_reg[3] |= v << 8; g += 3; /* go to next gain bloc */ /* When RF is finished, start with BB */ if (i == 4) { g = state->bb_ramp + 1; /* point on BB gain 1 max gain */ ref = bb; } } gain_reg[3] |= state->bb_1_def; gain_reg[3] |= ((bb % 10) * 100) / 125; #ifdef DEBUG_AGC dprintk("GA CALC: DB: %3d(rf) + %3d(bb) = %3d gain_reg[0]=%04x gain_reg[1]=%04x gain_reg[2]=%04x gain_reg[0]=%04x", rf, bb, rf + bb, gain_reg[0], gain_reg[1], gain_reg[2], gain_reg[3]); #endif /* Write the amplifier regs */ for (i = 0; i < 4; i++) { v = gain_reg[i]; if (force || state->gain_reg[i] != v) { state->gain_reg[i] = v; dib0090_write_reg(state, gain_reg_addr[i], v); } } } static void dib0090_set_boost(struct dib0090_state *state, int onoff) { state->bb_1_def &= 0xdfff; state->bb_1_def |= onoff << 13; } static void dib0090_set_rframp(struct dib0090_state *state, const u16 * cfg) { state->rf_ramp = cfg; } static void dib0090_set_rframp_pwm(struct dib0090_state *state, const u16 * cfg) { state->rf_ramp = cfg; dib0090_write_reg(state, 0x2a, 0xffff); dprintk("total RF gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x2a)); dib0090_write_regs(state, 0x2c, cfg + 3, 6); dib0090_write_regs(state, 0x3e, cfg + 9, 2); } static void dib0090_set_bbramp(struct dib0090_state *state, const u16 * cfg) { state->bb_ramp = cfg; dib0090_set_boost(state, cfg[0] > 500); /* we want the boost if the gain is higher that 50dB */ } static void dib0090_set_bbramp_pwm(struct dib0090_state *state, const u16 * cfg) { state->bb_ramp = cfg; dib0090_set_boost(state, cfg[0] > 500); /* we want the boost if the gain is higher that 50dB */ dib0090_write_reg(state, 0x33, 0xffff); dprintk("total BB gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x33)); dib0090_write_regs(state, 0x35, cfg + 3, 4); } void dib0090_pwm_gain_reset(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; /* reset the AGC */ if (state->config->use_pwm_agc) { #ifdef CONFIG_BAND_SBAND if (state->current_band == BAND_SBAND) { dib0090_set_rframp_pwm(state, rf_ramp_pwm_sband); dib0090_set_bbramp_pwm(state, bb_ramp_pwm_boost); } else #endif #ifdef CONFIG_BAND_CBAND if (state->current_band == BAND_CBAND) { if (state->identity.in_soc) { dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal_socs); if (state->identity.version == SOC_8090_P1G_11R1 || state->identity.version == SOC_8090_P1G_21R1) dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband_8090); else if (state->identity.version == SOC_7090_P1G_11R1 || state->identity.version == SOC_7090_P1G_21R1) { if (state->config->is_dib7090e) { if (state->rf_ramp == NULL) dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband_7090e_sensitivity); else dib0090_set_rframp_pwm(state, state->rf_ramp); } else dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband_7090); } } else { dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband); dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal); } } else #endif #ifdef CONFIG_BAND_VHF if (state->current_band == BAND_VHF) { if (state->identity.in_soc) { dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal_socs); } else { dib0090_set_rframp_pwm(state, rf_ramp_pwm_vhf); dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal); } } else #endif { if (state->identity.in_soc) { if (state->identity.version == SOC_8090_P1G_11R1 || state->identity.version == SOC_8090_P1G_21R1) dib0090_set_rframp_pwm(state, rf_ramp_pwm_uhf_8090); else if (state->identity.version == SOC_7090_P1G_11R1 || state->identity.version == SOC_7090_P1G_21R1) dib0090_set_rframp_pwm(state, rf_ramp_pwm_uhf_7090); dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal_socs); } else { dib0090_set_rframp_pwm(state, rf_ramp_pwm_uhf); dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal); } } if (state->rf_ramp[0] != 0) dib0090_write_reg(state, 0x32, (3 << 11)); else dib0090_write_reg(state, 0x32, (0 << 11)); dib0090_write_reg(state, 0x04, 0x03); dib0090_write_reg(state, 0x39, (1 << 10)); } } EXPORT_SYMBOL(dib0090_pwm_gain_reset); void dib0090_set_dc_servo(struct dvb_frontend *fe, u8 DC_servo_cutoff) { struct dib0090_state *state = fe->tuner_priv; if (DC_servo_cutoff < 4) dib0090_write_reg(state, 0x04, DC_servo_cutoff); } EXPORT_SYMBOL(dib0090_set_dc_servo); static u32 dib0090_get_slow_adc_val(struct dib0090_state *state) { u16 adc_val = dib0090_read_reg(state, 0x1d); if (state->identity.in_soc) adc_val >>= 2; return adc_val; } int dib0090_gain_control(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; enum frontend_tune_state *tune_state = &state->tune_state; int ret = 10; u16 wbd_val = 0; u8 apply_gain_immediatly = 1; s16 wbd_error = 0, adc_error = 0; if (*tune_state == CT_AGC_START) { state->agc_freeze = 0; dib0090_write_reg(state, 0x04, 0x0); #ifdef CONFIG_BAND_SBAND if (state->current_band == BAND_SBAND) { dib0090_set_rframp(state, rf_ramp_sband); dib0090_set_bbramp(state, bb_ramp_boost); } else #endif #ifdef CONFIG_BAND_VHF if (state->current_band == BAND_VHF && !state->identity.p1g) { dib0090_set_rframp(state, rf_ramp_vhf); dib0090_set_bbramp(state, bb_ramp_boost); } else #endif #ifdef CONFIG_BAND_CBAND if (state->current_band == BAND_CBAND && !state->identity.p1g) { dib0090_set_rframp(state, rf_ramp_cband); dib0090_set_bbramp(state, bb_ramp_boost); } else #endif if ((state->current_band == BAND_CBAND || state->current_band == BAND_VHF) && state->identity.p1g) { dib0090_set_rframp(state, rf_ramp_cband_broadmatching); dib0090_set_bbramp(state, bb_ramp_boost); } else { dib0090_set_rframp(state, rf_ramp_uhf); dib0090_set_bbramp(state, bb_ramp_boost); } dib0090_write_reg(state, 0x32, 0); dib0090_write_reg(state, 0x39, 0); dib0090_wbd_target(state, state->current_rf); state->rf_gain_limit = state->rf_ramp[0] << WBD_ALPHA; state->current_gain = ((state->rf_ramp[0] + state->bb_ramp[0]) / 2) << GAIN_ALPHA; *tune_state = CT_AGC_STEP_0; } else if (!state->agc_freeze) { s16 wbd = 0, i, cnt; int adc; wbd_val = dib0090_get_slow_adc_val(state); if (*tune_state == CT_AGC_STEP_0) cnt = 5; else cnt = 1; for (i = 0; i < cnt; i++) { wbd_val = dib0090_get_slow_adc_val(state); wbd += dib0090_wbd_to_db(state, wbd_val); } wbd /= cnt; wbd_error = state->wbd_target - wbd; if (*tune_state == CT_AGC_STEP_0) { if (wbd_error < 0 && state->rf_gain_limit > 0 && !state->identity.p1g) { #ifdef CONFIG_BAND_CBAND /* in case of CBAND tune reduce first the lt_gain2 before adjusting the RF gain */ u8 ltg2 = (state->rf_lt_def >> 10) & 0x7; if (state->current_band == BAND_CBAND && ltg2) { ltg2 >>= 1; state->rf_lt_def &= ltg2 << 10; /* reduce in 3 steps from 7 to 0 */ } #endif } else { state->agc_step = 0; *tune_state = CT_AGC_STEP_1; } } else { /* calc the adc power */ adc = state->config->get_adc_power(fe); adc = (adc * ((s32) 355774) + (((s32) 1) << 20)) >> 21; /* included in [0:-700] */ adc_error = (s16) (((s32) ADC_TARGET) - adc); #ifdef CONFIG_STANDARD_DAB if (state->fe->dtv_property_cache.delivery_system == STANDARD_DAB) adc_error -= 10; #endif #ifdef CONFIG_STANDARD_DVBT if (state->fe->dtv_property_cache.delivery_system == STANDARD_DVBT && (state->fe->dtv_property_cache.modulation == QAM_64 || state->fe->dtv_property_cache.modulation == QAM_16)) adc_error += 60; #endif #ifdef CONFIG_SYS_ISDBT if ((state->fe->dtv_property_cache.delivery_system == SYS_ISDBT) && (((state->fe->dtv_property_cache.layer[0].segment_count > 0) && ((state->fe->dtv_property_cache.layer[0].modulation == QAM_64) || (state->fe->dtv_property_cache. layer[0].modulation == QAM_16))) || ((state->fe->dtv_property_cache.layer[1].segment_count > 0) && ((state->fe->dtv_property_cache.layer[1].modulation == QAM_64) || (state->fe->dtv_property_cache. layer[1].modulation == QAM_16))) || ((state->fe->dtv_property_cache.layer[2].segment_count > 0) && ((state->fe->dtv_property_cache.layer[2].modulation == QAM_64) || (state->fe->dtv_property_cache. layer[2].modulation == QAM_16))) ) ) adc_error += 60; #endif if (*tune_state == CT_AGC_STEP_1) { /* quickly go to the correct range of the ADC power */ if (ABS(adc_error) < 50 || state->agc_step++ > 5) { #ifdef CONFIG_STANDARD_DAB if (state->fe->dtv_property_cache.delivery_system == STANDARD_DAB) { dib0090_write_reg(state, 0x02, (1 << 15) | (15 << 11) | (31 << 6) | (63)); /* cap value = 63 : narrow BB filter : Fc = 1.8MHz */ dib0090_write_reg(state, 0x04, 0x0); } else #endif { dib0090_write_reg(state, 0x02, (1 << 15) | (3 << 11) | (6 << 6) | (32)); dib0090_write_reg(state, 0x04, 0x01); /*0 = 1KHz ; 1 = 150Hz ; 2 = 50Hz ; 3 = 50KHz ; 4 = servo fast */ } *tune_state = CT_AGC_STOP; } } else { /* everything higher than or equal to CT_AGC_STOP means tracking */ ret = 100; /* 10ms interval */ apply_gain_immediatly = 0; } } #ifdef DEBUG_AGC dprintk ("tune state %d, ADC = %3ddB (ADC err %3d) WBD %3ddB (WBD err %3d, WBD val SADC: %4d), RFGainLimit (TOP): %3d, signal: %3ddBm", (u32) *tune_state, (u32) adc, (u32) adc_error, (u32) wbd, (u32) wbd_error, (u32) wbd_val, (u32) state->rf_gain_limit >> WBD_ALPHA, (s32) 200 + adc - (state->current_gain >> GAIN_ALPHA)); #endif } /* apply gain */ if (!state->agc_freeze) dib0090_gain_apply(state, adc_error, wbd_error, apply_gain_immediatly); return ret; } EXPORT_SYMBOL(dib0090_gain_control); void dib0090_get_current_gain(struct dvb_frontend *fe, u16 * rf, u16 * bb, u16 * rf_gain_limit, u16 * rflt) { struct dib0090_state *state = fe->tuner_priv; if (rf) *rf = state->gain[0]; if (bb) *bb = state->gain[1]; if (rf_gain_limit) *rf_gain_limit = state->rf_gain_limit; if (rflt) *rflt = (state->rf_lt_def >> 10) & 0x7; } EXPORT_SYMBOL(dib0090_get_current_gain); u16 dib0090_get_wbd_target(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; u32 f_MHz = state->fe->dtv_property_cache.frequency / 1000000; s32 current_temp = state->temperature; s32 wbd_thot, wbd_tcold; const struct dib0090_wbd_slope *wbd = state->current_wbd_table; while (f_MHz > wbd->max_freq) wbd++; dprintk("using wbd-table-entry with max freq %d", wbd->max_freq); if (current_temp < 0) current_temp = 0; if (current_temp > 128) current_temp = 128; state->wbdmux &= ~(7 << 13); if (wbd->wbd_gain != 0) state->wbdmux |= (wbd->wbd_gain << 13); else state->wbdmux |= (4 << 13); dib0090_write_reg(state, 0x10, state->wbdmux); wbd_thot = wbd->offset_hot - (((u32) wbd->slope_hot * f_MHz) >> 6); wbd_tcold = wbd->offset_cold - (((u32) wbd->slope_cold * f_MHz) >> 6); wbd_tcold += ((wbd_thot - wbd_tcold) * current_temp) >> 7; state->wbd_target = dib0090_wbd_to_db(state, state->wbd_offset + wbd_tcold); dprintk("wbd-target: %d dB", (u32) state->wbd_target); dprintk("wbd offset applied is %d", wbd_tcold); return state->wbd_offset + wbd_tcold; } EXPORT_SYMBOL(dib0090_get_wbd_target); u16 dib0090_get_wbd_offset(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; return state->wbd_offset; } EXPORT_SYMBOL(dib0090_get_wbd_offset); int dib0090_set_switch(struct dvb_frontend *fe, u8 sw1, u8 sw2, u8 sw3) { struct dib0090_state *state = fe->tuner_priv; dib0090_write_reg(state, 0x0b, (dib0090_read_reg(state, 0x0b) & 0xfff8) | ((sw3 & 1) << 2) | ((sw2 & 1) << 1) | (sw1 & 1)); return 0; } EXPORT_SYMBOL(dib0090_set_switch); int dib0090_set_vga(struct dvb_frontend *fe, u8 onoff) { struct dib0090_state *state = fe->tuner_priv; dib0090_write_reg(state, 0x09, (dib0090_read_reg(state, 0x09) & 0x7fff) | ((onoff & 1) << 15)); return 0; } EXPORT_SYMBOL(dib0090_set_vga); int dib0090_update_rframp_7090(struct dvb_frontend *fe, u8 cfg_sensitivity) { struct dib0090_state *state = fe->tuner_priv; if ((!state->identity.p1g) || (!state->identity.in_soc) || ((state->identity.version != SOC_7090_P1G_21R1) && (state->identity.version != SOC_7090_P1G_11R1))) { dprintk("%s() function can only be used for dib7090P", __func__); return -ENODEV; } if (cfg_sensitivity) state->rf_ramp = (const u16 *)&rf_ramp_pwm_cband_7090e_sensitivity; else state->rf_ramp = (const u16 *)&rf_ramp_pwm_cband_7090e_aci; dib0090_pwm_gain_reset(fe); return 0; } EXPORT_SYMBOL(dib0090_update_rframp_7090); static const u16 dib0090_defaults[] = { 25, 0x01, 0x0000, 0x99a0, 0x6008, 0x0000, 0x8bcb, 0x0000, 0x0405, 0x0000, 0x0000, 0x0000, 0xb802, 0x0300, 0x2d12, 0xbac0, 0x7c00, 0xdbb9, 0x0954, 0x0743, 0x8000, 0x0001, 0x0040, 0x0100, 0x0000, 0xe910, 0x149e, 1, 0x1c, 0xff2d, 1, 0x39, 0x0000, 2, 0x1e, 0x07FF, 0x0007, 1, 0x24, EN_UHF | EN_CRYSTAL, 2, 0x3c, 0x3ff, 0x111, 0 }; static const u16 dib0090_p1g_additionnal_defaults[] = { 1, 0x05, 0xabcd, 1, 0x11, 0x00b4, 1, 0x1c, 0xfffd, 1, 0x40, 0x108, 0 }; static void dib0090_set_default_config(struct dib0090_state *state, const u16 * n) { u16 l, r; l = pgm_read_word(n++); while (l) { r = pgm_read_word(n++); do { dib0090_write_reg(state, r, pgm_read_word(n++)); r++; } while (--l); l = pgm_read_word(n++); } } #define CAP_VALUE_MIN (u8) 9 #define CAP_VALUE_MAX (u8) 40 #define HR_MIN (u8) 25 #define HR_MAX (u8) 40 #define POLY_MIN (u8) 0 #define POLY_MAX (u8) 8 static void dib0090_set_EFUSE(struct dib0090_state *state) { u8 c, h, n; u16 e2, e4; u16 cal; e2 = dib0090_read_reg(state, 0x26); e4 = dib0090_read_reg(state, 0x28); if ((state->identity.version == P1D_E_F) || (state->identity.version == P1G) || (e2 == 0xffff)) { dib0090_write_reg(state, 0x22, 0x10); cal = (dib0090_read_reg(state, 0x22) >> 6) & 0x3ff; if ((cal < 670) || (cal == 1023)) cal = 850; n = 165 - ((cal * 10)>>6) ; e2 = e4 = (3<<12) | (34<<6) | (n); } if (e2 != e4) e2 &= e4; /* Remove the redundancy */ if (e2 != 0xffff) { c = e2 & 0x3f; n = (e2 >> 12) & 0xf; h = (e2 >> 6) & 0x3f; if ((c >= CAP_VALUE_MAX) || (c <= CAP_VALUE_MIN)) c = 32; if ((h >= HR_MAX) || (h <= HR_MIN)) h = 34; if ((n >= POLY_MAX) || (n <= POLY_MIN)) n = 3; dib0090_write_reg(state, 0x13, (h << 10)) ; e2 = (n<<11) | ((h>>2)<<6) | (c); dib0090_write_reg(state, 0x2, e2) ; /* Load the BB_2 */ } } static int dib0090_reset(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; dib0090_reset_digital(fe, state->config); if (dib0090_identify(fe) < 0) return -EIO; #ifdef CONFIG_TUNER_DIB0090_P1B_SUPPORT if (!(state->identity.version & 0x1)) /* it is P1B - reset is already done */ return 0; #endif if (!state->identity.in_soc) { if ((dib0090_read_reg(state, 0x1a) >> 5) & 0x2) dib0090_write_reg(state, 0x1b, (EN_IQADC | EN_BB | EN_BIAS | EN_DIGCLK | EN_PLL | EN_CRYSTAL)); else dib0090_write_reg(state, 0x1b, (EN_DIGCLK | EN_PLL | EN_CRYSTAL)); } dib0090_set_default_config(state, dib0090_defaults); if (state->identity.in_soc) dib0090_write_reg(state, 0x18, 0x2910); /* charge pump current = 0 */ if (state->identity.p1g) dib0090_set_default_config(state, dib0090_p1g_additionnal_defaults); /* Update the efuse : Only available for KROSUS > P1C and SOC as well*/ if (((state->identity.version & 0x1f) >= P1D_E_F) || (state->identity.in_soc)) dib0090_set_EFUSE(state); /* Congigure in function of the crystal */ if (state->config->force_crystal_mode != 0) dib0090_write_reg(state, 0x14, state->config->force_crystal_mode & 3); else if (state->config->io.clock_khz >= 24000) dib0090_write_reg(state, 0x14, 1); else dib0090_write_reg(state, 0x14, 2); dprintk("Pll lock : %d", (dib0090_read_reg(state, 0x1a) >> 11) & 0x1); state->calibrate = DC_CAL | WBD_CAL | TEMP_CAL; /* enable iq-offset-calibration and wbd-calibration when tuning next time */ return 0; } #define steps(u) (((u) > 15) ? ((u)-16) : (u)) #define INTERN_WAIT 10 static int dib0090_get_offset(struct dib0090_state *state, enum frontend_tune_state *tune_state) { int ret = INTERN_WAIT * 10; switch (*tune_state) { case CT_TUNER_STEP_2: /* Turns to positive */ dib0090_write_reg(state, 0x1f, 0x7); *tune_state = CT_TUNER_STEP_3; break; case CT_TUNER_STEP_3: state->adc_diff = dib0090_read_reg(state, 0x1d); /* Turns to negative */ dib0090_write_reg(state, 0x1f, 0x4); *tune_state = CT_TUNER_STEP_4; break; case CT_TUNER_STEP_4: state->adc_diff -= dib0090_read_reg(state, 0x1d); *tune_state = CT_TUNER_STEP_5; ret = 0; break; default: break; } return ret; } struct dc_calibration { u8 addr; u8 offset; u8 pga:1; u16 bb1; u8 i:1; }; static const struct dc_calibration dc_table[] = { /* Step1 BB gain1= 26 with boost 1, gain 2 = 0 */ {0x06, 5, 1, (1 << 13) | (0 << 8) | (26 << 3), 1}, {0x07, 11, 1, (1 << 13) | (0 << 8) | (26 << 3), 0}, /* Step 2 BB gain 1 = 26 with boost = 1 & gain 2 = 29 */ {0x06, 0, 0, (1 << 13) | (29 << 8) | (26 << 3), 1}, {0x06, 10, 0, (1 << 13) | (29 << 8) | (26 << 3), 0}, {0}, }; static const struct dc_calibration dc_p1g_table[] = { /* Step1 BB gain1= 26 with boost 1, gain 2 = 0 */ /* addr ; trim reg offset ; pga ; CTRL_BB1 value ; i or q */ {0x06, 5, 1, (1 << 13) | (0 << 8) | (15 << 3), 1}, {0x07, 11, 1, (1 << 13) | (0 << 8) | (15 << 3), 0}, /* Step 2 BB gain 1 = 26 with boost = 1 & gain 2 = 29 */ {0x06, 0, 0, (1 << 13) | (29 << 8) | (15 << 3), 1}, {0x06, 10, 0, (1 << 13) | (29 << 8) | (15 << 3), 0}, {0}, }; static void dib0090_set_trim(struct dib0090_state *state) { u16 *val; if (state->dc->addr == 0x07) val = &state->bb7; else val = &state->bb6; *val &= ~(0x1f << state->dc->offset); *val |= state->step << state->dc->offset; dib0090_write_reg(state, state->dc->addr, *val); } static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum frontend_tune_state *tune_state) { int ret = 0; u16 reg; switch (*tune_state) { case CT_TUNER_START: dprintk("Start DC offset calibration"); /* force vcm2 = 0.8V */ state->bb6 = 0; state->bb7 = 0x040d; /* the LNA AND LO are off */ reg = dib0090_read_reg(state, 0x24) & 0x0ffb; /* shutdown lna and lo */ dib0090_write_reg(state, 0x24, reg); state->wbdmux = dib0090_read_reg(state, 0x10); dib0090_write_reg(state, 0x10, (state->wbdmux & ~(0xff << 3)) | (0x7 << 3) | 0x3); dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) & ~(1 << 14)); state->dc = dc_table; if (state->identity.p1g) state->dc = dc_p1g_table; *tune_state = CT_TUNER_STEP_0; /* fall through */ case CT_TUNER_STEP_0: dprintk("Sart/continue DC calibration for %s path", (state->dc->i == 1) ? "I" : "Q"); dib0090_write_reg(state, 0x01, state->dc->bb1); dib0090_write_reg(state, 0x07, state->bb7 | (state->dc->i << 7)); state->step = 0; state->min_adc_diff = 1023; *tune_state = CT_TUNER_STEP_1; ret = 50; break; case CT_TUNER_STEP_1: dib0090_set_trim(state); *tune_state = CT_TUNER_STEP_2; break; case CT_TUNER_STEP_2: case CT_TUNER_STEP_3: case CT_TUNER_STEP_4: ret = dib0090_get_offset(state, tune_state); break; case CT_TUNER_STEP_5: /* found an offset */ dprintk("adc_diff = %d, current step= %d", (u32) state->adc_diff, state->step); if (state->step == 0 && state->adc_diff < 0) { state->min_adc_diff = -1023; dprintk("Change of sign of the minimum adc diff"); } dprintk("adc_diff = %d, min_adc_diff = %d current_step = %d", state->adc_diff, state->min_adc_diff, state->step); /* first turn for this frequency */ if (state->step == 0) { if (state->dc->pga && state->adc_diff < 0) state->step = 0x10; if (state->dc->pga == 0 && state->adc_diff > 0) state->step = 0x10; } /* Look for a change of Sign in the Adc_diff.min_adc_diff is used to STORE the setp N-1 */ if ((state->adc_diff & 0x8000) == (state->min_adc_diff & 0x8000) && steps(state->step) < 15) { /* stop search when the delta the sign is changing and Steps =15 and Step=0 is force for continuance */ state->step++; state->min_adc_diff = state->adc_diff; *tune_state = CT_TUNER_STEP_1; } else { /* the minimum was what we have seen in the step before */ if (ABS(state->adc_diff) > ABS(state->min_adc_diff)) { dprintk("Since adc_diff N = %d > adc_diff step N-1 = %d, Come back one step", state->adc_diff, state->min_adc_diff); state->step--; } dib0090_set_trim(state); dprintk("BB Offset Cal, BBreg=%hd,Offset=%hd,Value Set=%hd", state->dc->addr, state->adc_diff, state->step); state->dc++; if (state->dc->addr == 0) /* done */ *tune_state = CT_TUNER_STEP_6; else *tune_state = CT_TUNER_STEP_0; } break; case CT_TUNER_STEP_6: dib0090_write_reg(state, 0x07, state->bb7 & ~0x0008); dib0090_write_reg(state, 0x1f, 0x7); *tune_state = CT_TUNER_START; /* reset done -> real tuning can now begin */ state->calibrate &= ~DC_CAL; default: break; } return ret; } static int dib0090_wbd_calibration(struct dib0090_state *state, enum frontend_tune_state *tune_state) { u8 wbd_gain; const struct dib0090_wbd_slope *wbd = state->current_wbd_table; switch (*tune_state) { case CT_TUNER_START: while (state->current_rf / 1000 > wbd->max_freq) wbd++; if (wbd->wbd_gain != 0) wbd_gain = wbd->wbd_gain; else { wbd_gain = 4; #if defined(CONFIG_BAND_LBAND) || defined(CONFIG_BAND_SBAND) if ((state->current_band == BAND_LBAND) || (state->current_band == BAND_SBAND)) wbd_gain = 2; #endif } if (wbd_gain == state->wbd_calibration_gain) { /* the WBD calibration has already been done */ *tune_state = CT_TUNER_START; state->calibrate &= ~WBD_CAL; return 0; } dib0090_write_reg(state, 0x10, 0x1b81 | (1 << 10) | (wbd_gain << 13) | (1 << 3)); dib0090_write_reg(state, 0x24, ((EN_UHF & 0x0fff) | (1 << 1))); *tune_state = CT_TUNER_STEP_0; state->wbd_calibration_gain = wbd_gain; return 90; /* wait for the WBDMUX to switch and for the ADC to sample */ case CT_TUNER_STEP_0: state->wbd_offset = dib0090_get_slow_adc_val(state); dprintk("WBD calibration offset = %d", state->wbd_offset); *tune_state = CT_TUNER_START; /* reset done -> real tuning can now begin */ state->calibrate &= ~WBD_CAL; break; default: break; } return 0; } static void dib0090_set_bandwidth(struct dib0090_state *state) { u16 tmp; if (state->fe->dtv_property_cache.bandwidth_hz / 1000 <= 5000) tmp = (3 << 14); else if (state->fe->dtv_property_cache.bandwidth_hz / 1000 <= 6000) tmp = (2 << 14); else if (state->fe->dtv_property_cache.bandwidth_hz / 1000 <= 7000) tmp = (1 << 14); else tmp = (0 << 14); state->bb_1_def &= 0x3fff; state->bb_1_def |= tmp; dib0090_write_reg(state, 0x01, state->bb_1_def); /* be sure that we have the right bb-filter */ dib0090_write_reg(state, 0x03, 0x6008); /* = 0x6008 : vcm3_trim = 1 ; filter2_gm1_trim = 8 ; filter2_cutoff_freq = 0 */ dib0090_write_reg(state, 0x04, 0x1); /* 0 = 1KHz ; 1 = 50Hz ; 2 = 150Hz ; 3 = 50KHz ; 4 = servo fast */ if (state->identity.in_soc) { dib0090_write_reg(state, 0x05, 0x9bcf); /* attenuator_ibias_tri = 2 ; input_stage_ibias_tr = 1 ; nc = 11 ; ext_gm_trim = 1 ; obuf_ibias_trim = 4 ; filter13_gm2_ibias_t = 15 */ } else { dib0090_write_reg(state, 0x02, (5 << 11) | (8 << 6) | (22 & 0x3f)); /* 22 = cap_value */ dib0090_write_reg(state, 0x05, 0xabcd); /* = 0xabcd : attenuator_ibias_tri = 2 ; input_stage_ibias_tr = 2 ; nc = 11 ; ext_gm_trim = 1 ; obuf_ibias_trim = 4 ; filter13_gm2_ibias_t = 13 */ } } static const struct dib0090_pll dib0090_pll_table[] = { #ifdef CONFIG_BAND_CBAND {56000, 0, 9, 48, 6}, {70000, 1, 9, 48, 6}, {87000, 0, 8, 32, 4}, {105000, 1, 8, 32, 4}, {115000, 0, 7, 24, 6}, {140000, 1, 7, 24, 6}, {170000, 0, 6, 16, 4}, #endif #ifdef CONFIG_BAND_VHF {200000, 1, 6, 16, 4}, {230000, 0, 5, 12, 6}, {280000, 1, 5, 12, 6}, {340000, 0, 4, 8, 4}, {380000, 1, 4, 8, 4}, {450000, 0, 3, 6, 6}, #endif #ifdef CONFIG_BAND_UHF {580000, 1, 3, 6, 6}, {700000, 0, 2, 4, 4}, {860000, 1, 2, 4, 4}, #endif #ifdef CONFIG_BAND_LBAND {1800000, 1, 0, 2, 4}, #endif #ifdef CONFIG_BAND_SBAND {2900000, 0, 14, 1, 4}, #endif }; static const struct dib0090_tuning dib0090_tuning_table_fm_vhf_on_cband[] = { #ifdef CONFIG_BAND_CBAND {184000, 4, 1, 15, 0x280, 0x2912, 0xb94e, EN_CAB}, {227000, 4, 3, 15, 0x280, 0x2912, 0xb94e, EN_CAB}, {380000, 4, 7, 15, 0x280, 0x2912, 0xb94e, EN_CAB}, #endif #ifdef CONFIG_BAND_UHF {520000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {550000, 2, 2, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {650000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {750000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {850000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, #endif #ifdef CONFIG_BAND_LBAND {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, #endif #ifdef CONFIG_BAND_SBAND {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD}, {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD}, #endif }; static const struct dib0090_tuning dib0090_tuning_table[] = { #ifdef CONFIG_BAND_CBAND {170000, 4, 1, 15, 0x280, 0x2912, 0xb94e, EN_CAB}, #endif #ifdef CONFIG_BAND_VHF {184000, 1, 1, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, {227000, 1, 3, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, {380000, 1, 7, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, #endif #ifdef CONFIG_BAND_UHF {520000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {550000, 2, 2, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {650000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {750000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {850000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, #endif #ifdef CONFIG_BAND_LBAND {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, #endif #ifdef CONFIG_BAND_SBAND {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD}, {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD}, #endif }; static const struct dib0090_tuning dib0090_p1g_tuning_table[] = { #ifdef CONFIG_BAND_CBAND {170000, 4, 1, 0x820f, 0x300, 0x2d22, 0x82cb, EN_CAB}, #endif #ifdef CONFIG_BAND_VHF {184000, 1, 1, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, {227000, 1, 3, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, {380000, 1, 7, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, #endif #ifdef CONFIG_BAND_UHF {510000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {540000, 2, 1, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {600000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {630000, 2, 4, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {680000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {720000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, #endif #ifdef CONFIG_BAND_LBAND {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, #endif #ifdef CONFIG_BAND_SBAND {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD}, {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD}, #endif }; static const struct dib0090_pll dib0090_p1g_pll_table[] = { #ifdef CONFIG_BAND_CBAND {57000, 0, 11, 48, 6}, {70000, 1, 11, 48, 6}, {86000, 0, 10, 32, 4}, {105000, 1, 10, 32, 4}, {115000, 0, 9, 24, 6}, {140000, 1, 9, 24, 6}, {170000, 0, 8, 16, 4}, #endif #ifdef CONFIG_BAND_VHF {200000, 1, 8, 16, 4}, {230000, 0, 7, 12, 6}, {280000, 1, 7, 12, 6}, {340000, 0, 6, 8, 4}, {380000, 1, 6, 8, 4}, {455000, 0, 5, 6, 6}, #endif #ifdef CONFIG_BAND_UHF {580000, 1, 5, 6, 6}, {680000, 0, 4, 4, 4}, {860000, 1, 4, 4, 4}, #endif #ifdef CONFIG_BAND_LBAND {1800000, 1, 2, 2, 4}, #endif #ifdef CONFIG_BAND_SBAND {2900000, 0, 1, 1, 6}, #endif }; static const struct dib0090_tuning dib0090_p1g_tuning_table_fm_vhf_on_cband[] = { #ifdef CONFIG_BAND_CBAND {184000, 4, 3, 0x4187, 0x2c0, 0x2d22, 0x81cb, EN_CAB}, {227000, 4, 3, 0x4187, 0x2c0, 0x2d22, 0x81cb, EN_CAB}, {380000, 4, 3, 0x4187, 0x2c0, 0x2d22, 0x81cb, EN_CAB}, #endif #ifdef CONFIG_BAND_UHF {520000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {550000, 2, 2, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {650000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {750000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {850000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, #endif #ifdef CONFIG_BAND_LBAND {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, #endif #ifdef CONFIG_BAND_SBAND {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD}, {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD}, #endif }; static const struct dib0090_tuning dib0090_tuning_table_cband_7090[] = { #ifdef CONFIG_BAND_CBAND {300000, 4, 3, 0x018F, 0x2c0, 0x2d22, 0xb9ce, EN_CAB}, {380000, 4, 10, 0x018F, 0x2c0, 0x2d22, 0xb9ce, EN_CAB}, {570000, 4, 10, 0x8190, 0x2c0, 0x2d22, 0xb9ce, EN_CAB}, {858000, 4, 5, 0x8190, 0x2c0, 0x2d22, 0xb9ce, EN_CAB}, #endif }; static const struct dib0090_tuning dib0090_tuning_table_cband_7090e_sensitivity[] = { #ifdef CONFIG_BAND_CBAND { 300000, 0 , 3, 0x8105, 0x2c0, 0x2d12, 0xb84e, EN_CAB }, { 380000, 0 , 10, 0x810F, 0x2c0, 0x2d12, 0xb84e, EN_CAB }, { 600000, 0 , 10, 0x815E, 0x280, 0x2d12, 0xb84e, EN_CAB }, { 660000, 0 , 5, 0x85E3, 0x280, 0x2d12, 0xb84e, EN_CAB }, { 720000, 0 , 5, 0x852E, 0x280, 0x2d12, 0xb84e, EN_CAB }, { 860000, 0 , 4, 0x85E5, 0x280, 0x2d12, 0xb84e, EN_CAB }, #endif }; int dib0090_update_tuning_table_7090(struct dvb_frontend *fe, u8 cfg_sensitivity) { struct dib0090_state *state = fe->tuner_priv; const struct dib0090_tuning *tune = dib0090_tuning_table_cband_7090e_sensitivity; const struct dib0090_tuning dib0090_tuning_table_cband_7090e_aci[] = { { 300000, 0 , 3, 0x8165, 0x2c0, 0x2d12, 0xb84e, EN_CAB }, { 650000, 0 , 4, 0x815B, 0x280, 0x2d12, 0xb84e, EN_CAB }, { 860000, 0 , 5, 0x84EF, 0x280, 0x2d12, 0xb84e, EN_CAB }, }; if ((!state->identity.p1g) || (!state->identity.in_soc) || ((state->identity.version != SOC_7090_P1G_21R1) && (state->identity.version != SOC_7090_P1G_11R1))) { dprintk("%s() function can only be used for dib7090", __func__); return -ENODEV; } if (cfg_sensitivity) tune = dib0090_tuning_table_cband_7090e_sensitivity; else tune = dib0090_tuning_table_cband_7090e_aci; while (state->rf_request > tune->max_freq) tune++; dib0090_write_reg(state, 0x09, (dib0090_read_reg(state, 0x09) & 0x8000) | (tune->lna_bias & 0x7fff)); dib0090_write_reg(state, 0x0b, (dib0090_read_reg(state, 0x0b) & 0xf83f) | ((tune->lna_tune << 6) & 0x07c0)); return 0; } EXPORT_SYMBOL(dib0090_update_tuning_table_7090); static int dib0090_captrim_search(struct dib0090_state *state, enum frontend_tune_state *tune_state) { int ret = 0; u16 lo4 = 0xe900; s16 adc_target; u16 adc; s8 step_sign; u8 force_soft_search = 0; if (state->identity.version == SOC_8090_P1G_11R1 || state->identity.version == SOC_8090_P1G_21R1) force_soft_search = 1; if (*tune_state == CT_TUNER_START) { dprintk("Start Captrim search : %s", (force_soft_search == 1) ? "FORCE SOFT SEARCH" : "AUTO"); dib0090_write_reg(state, 0x10, 0x2B1); dib0090_write_reg(state, 0x1e, 0x0032); if (!state->tuner_is_tuned) { /* prepare a complete captrim */ if (!state->identity.p1g || force_soft_search) state->step = state->captrim = state->fcaptrim = 64; state->current_rf = state->rf_request; } else { /* we are already tuned to this frequency - the configuration is correct */ if (!state->identity.p1g || force_soft_search) { /* do a minimal captrim even if the frequency has not changed */ state->step = 4; state->captrim = state->fcaptrim = dib0090_read_reg(state, 0x18) & 0x7f; } } state->adc_diff = 3000; *tune_state = CT_TUNER_STEP_0; } else if (*tune_state == CT_TUNER_STEP_0) { if (state->identity.p1g && !force_soft_search) { u8 ratio = 31; dib0090_write_reg(state, 0x40, (3 << 7) | (ratio << 2) | (1 << 1) | 1); dib0090_read_reg(state, 0x40); ret = 50; } else { state->step /= 2; dib0090_write_reg(state, 0x18, lo4 | state->captrim); if (state->identity.in_soc) ret = 25; } *tune_state = CT_TUNER_STEP_1; } else if (*tune_state == CT_TUNER_STEP_1) { if (state->identity.p1g && !force_soft_search) { dib0090_write_reg(state, 0x40, 0x18c | (0 << 1) | 0); dib0090_read_reg(state, 0x40); state->fcaptrim = dib0090_read_reg(state, 0x18) & 0x7F; dprintk("***Final Captrim= 0x%x", state->fcaptrim); *tune_state = CT_TUNER_STEP_3; } else { /* MERGE for all krosus before P1G */ adc = dib0090_get_slow_adc_val(state); dprintk("CAPTRIM=%d; ADC = %d (ADC) & %dmV", (u32) state->captrim, (u32) adc, (u32) (adc) * (u32) 1800 / (u32) 1024); if (state->rest == 0 || state->identity.in_soc) { /* Just for 8090P SOCS where auto captrim HW bug : TO CHECK IN ACI for SOCS !!! if 400 for 8090p SOC => tune issue !!! */ adc_target = 200; } else adc_target = 400; if (adc >= adc_target) { adc -= adc_target; step_sign = -1; } else { adc = adc_target - adc; step_sign = 1; } if (adc < state->adc_diff) { dprintk("CAPTRIM=%d is closer to target (%d/%d)", (u32) state->captrim, (u32) adc, (u32) state->adc_diff); state->adc_diff = adc; state->fcaptrim = state->captrim; } state->captrim += step_sign * state->step; if (state->step >= 1) *tune_state = CT_TUNER_STEP_0; else *tune_state = CT_TUNER_STEP_2; ret = 25; } } else if (*tune_state == CT_TUNER_STEP_2) { /* this step is only used by krosus < P1G */ /*write the final cptrim config */ dib0090_write_reg(state, 0x18, lo4 | state->fcaptrim); *tune_state = CT_TUNER_STEP_3; } else if (*tune_state == CT_TUNER_STEP_3) { state->calibrate &= ~CAPTRIM_CAL; *tune_state = CT_TUNER_STEP_0; } return ret; } static int dib0090_get_temperature(struct dib0090_state *state, enum frontend_tune_state *tune_state) { int ret = 15; s16 val; switch (*tune_state) { case CT_TUNER_START: state->wbdmux = dib0090_read_reg(state, 0x10); dib0090_write_reg(state, 0x10, (state->wbdmux & ~(0xff << 3)) | (0x8 << 3)); state->bias = dib0090_read_reg(state, 0x13); dib0090_write_reg(state, 0x13, state->bias | (0x3 << 8)); *tune_state = CT_TUNER_STEP_0; /* wait for the WBDMUX to switch and for the ADC to sample */ break; case CT_TUNER_STEP_0: state->adc_diff = dib0090_get_slow_adc_val(state); dib0090_write_reg(state, 0x13, (state->bias & ~(0x3 << 8)) | (0x2 << 8)); *tune_state = CT_TUNER_STEP_1; break; case CT_TUNER_STEP_1: val = dib0090_get_slow_adc_val(state); state->temperature = ((s16) ((val - state->adc_diff) * 180) >> 8) + 55; dprintk("temperature: %d C", state->temperature - 30); *tune_state = CT_TUNER_STEP_2; break; case CT_TUNER_STEP_2: dib0090_write_reg(state, 0x13, state->bias); dib0090_write_reg(state, 0x10, state->wbdmux); /* write back original WBDMUX */ *tune_state = CT_TUNER_START; state->calibrate &= ~TEMP_CAL; if (state->config->analog_output == 0) dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) | (1 << 14)); break; default: ret = 0; break; } return ret; } #define WBD 0x781 /* 1 1 1 1 0000 0 0 1 */ static int dib0090_tune(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; const struct dib0090_tuning *tune = state->current_tune_table_index; const struct dib0090_pll *pll = state->current_pll_table_index; enum frontend_tune_state *tune_state = &state->tune_state; u16 lo5, lo6, Den, tmp; u32 FBDiv, Rest, FREF, VCOF_kHz = 0; int ret = 10; /* 1ms is the default delay most of the time */ u8 c, i; /************************* VCO ***************************/ /* Default values for FG */ /* from these are needed : */ /* Cp,HFdiv,VCOband,SD,Num,Den,FB and REFDiv */ /* in any case we first need to do a calibration if needed */ if (*tune_state == CT_TUNER_START) { /* deactivate DataTX before some calibrations */ if (state->calibrate & (DC_CAL | TEMP_CAL | WBD_CAL)) dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) & ~(1 << 14)); else /* Activate DataTX in case a calibration has been done before */ if (state->config->analog_output == 0) dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) | (1 << 14)); } if (state->calibrate & DC_CAL) return dib0090_dc_offset_calibration(state, tune_state); else if (state->calibrate & WBD_CAL) { if (state->current_rf == 0) state->current_rf = state->fe->dtv_property_cache.frequency / 1000; return dib0090_wbd_calibration(state, tune_state); } else if (state->calibrate & TEMP_CAL) return dib0090_get_temperature(state, tune_state); else if (state->calibrate & CAPTRIM_CAL) return dib0090_captrim_search(state, tune_state); if (*tune_state == CT_TUNER_START) { /* if soc and AGC pwm control, disengage mux to be able to R/W access to 0x01 register to set the right filter (cutoff_freq_select) during the tune sequence, otherwise, SOC SERPAR error when accessing to 0x01 */ if (state->config->use_pwm_agc && state->identity.in_soc) { tmp = dib0090_read_reg(state, 0x39); if ((tmp >> 10) & 0x1) dib0090_write_reg(state, 0x39, tmp & ~(1 << 10)); } state->current_band = (u8) BAND_OF_FREQUENCY(state->fe->dtv_property_cache.frequency / 1000); state->rf_request = state->fe->dtv_property_cache.frequency / 1000 + (state->current_band == BAND_UHF ? state->config->freq_offset_khz_uhf : state->config-> freq_offset_khz_vhf); /* in ISDB-T 1seg we shift tuning frequency */ if ((state->fe->dtv_property_cache.delivery_system == SYS_ISDBT && state->fe->dtv_property_cache.isdbt_sb_mode == 1 && state->fe->dtv_property_cache.isdbt_partial_reception == 0)) { const struct dib0090_low_if_offset_table *LUT_offset = state->config->low_if; u8 found_offset = 0; u32 margin_khz = 100; if (LUT_offset != NULL) { while (LUT_offset->RF_freq != 0xffff) { if (((state->rf_request > (LUT_offset->RF_freq - margin_khz)) && (state->rf_request < (LUT_offset->RF_freq + margin_khz))) && LUT_offset->std == state->fe->dtv_property_cache.delivery_system) { state->rf_request += LUT_offset->offset_khz; found_offset = 1; break; } LUT_offset++; } } if (found_offset == 0) state->rf_request += 400; } if (state->current_rf != state->rf_request || (state->current_standard != state->fe->dtv_property_cache.delivery_system)) { state->tuner_is_tuned = 0; state->current_rf = 0; state->current_standard = 0; tune = dib0090_tuning_table; if (state->identity.p1g) tune = dib0090_p1g_tuning_table; tmp = (state->identity.version >> 5) & 0x7; if (state->identity.in_soc) { if (state->config->force_cband_input) { /* Use the CBAND input for all band */ if (state->current_band & BAND_CBAND || state->current_band & BAND_FM || state->current_band & BAND_VHF || state->current_band & BAND_UHF) { state->current_band = BAND_CBAND; if (state->config->is_dib7090e) tune = dib0090_tuning_table_cband_7090e_sensitivity; else tune = dib0090_tuning_table_cband_7090; } } else { /* Use the CBAND input for all band under UHF */ if (state->current_band & BAND_CBAND || state->current_band & BAND_FM || state->current_band & BAND_VHF) { state->current_band = BAND_CBAND; if (state->config->is_dib7090e) tune = dib0090_tuning_table_cband_7090e_sensitivity; else tune = dib0090_tuning_table_cband_7090; } } } else if (tmp == 0x4 || tmp == 0x7) { /* CBAND tuner version for VHF */ if (state->current_band == BAND_FM || state->current_band == BAND_CBAND || state->current_band == BAND_VHF) { state->current_band = BAND_CBAND; /* Force CBAND */ tune = dib0090_tuning_table_fm_vhf_on_cband; if (state->identity.p1g) tune = dib0090_p1g_tuning_table_fm_vhf_on_cband; } } pll = dib0090_pll_table; if (state->identity.p1g) pll = dib0090_p1g_pll_table; /* Look for the interval */ while (state->rf_request > tune->max_freq) tune++; while (state->rf_request > pll->max_freq) pll++; state->current_tune_table_index = tune; state->current_pll_table_index = pll; dib0090_write_reg(state, 0x0b, 0xb800 | (tune->switch_trim)); VCOF_kHz = (pll->hfdiv * state->rf_request) * 2; FREF = state->config->io.clock_khz; if (state->config->fref_clock_ratio != 0) FREF /= state->config->fref_clock_ratio; FBDiv = (VCOF_kHz / pll->topresc / FREF); Rest = (VCOF_kHz / pll->topresc) - FBDiv * FREF; if (Rest < LPF) Rest = 0; else if (Rest < 2 * LPF) Rest = 2 * LPF; else if (Rest > (FREF - LPF)) { Rest = 0; FBDiv += 1; } else if (Rest > (FREF - 2 * LPF)) Rest = FREF - 2 * LPF; Rest = (Rest * 6528) / (FREF / 10); state->rest = Rest; /* external loop filter, otherwise: * lo5 = (0 << 15) | (0 << 12) | (0 << 11) | (3 << 9) | (4 << 6) | (3 << 4) | 4; * lo6 = 0x0e34 */ if (Rest == 0) { if (pll->vco_band) lo5 = 0x049f; else lo5 = 0x041f; } else { if (pll->vco_band) lo5 = 0x049e; else if (state->config->analog_output) lo5 = 0x041d; else lo5 = 0x041c; } if (state->identity.p1g) { /* Bias is done automatically in P1G */ if (state->identity.in_soc) { if (state->identity.version == SOC_8090_P1G_11R1) lo5 = 0x46f; else lo5 = 0x42f; } else lo5 = 0x42c; } lo5 |= (pll->hfdiv_code << 11) | (pll->vco_band << 7); /* bit 15 is the split to the slave, we do not do it here */ if (!state->config->io.pll_int_loop_filt) { if (state->identity.in_soc) lo6 = 0xff98; else if (state->identity.p1g || (Rest == 0)) lo6 = 0xfff8; else lo6 = 0xff28; } else lo6 = (state->config->io.pll_int_loop_filt << 3); Den = 1; if (Rest > 0) { if (state->config->analog_output) lo6 |= (1 << 2) | 2; else { if (state->identity.in_soc) lo6 |= (1 << 2) | 2; else lo6 |= (1 << 2) | 2; } Den = 255; } dib0090_write_reg(state, 0x15, (u16) FBDiv); if (state->config->fref_clock_ratio != 0) dib0090_write_reg(state, 0x16, (Den << 8) | state->config->fref_clock_ratio); else dib0090_write_reg(state, 0x16, (Den << 8) | 1); dib0090_write_reg(state, 0x17, (u16) Rest); dib0090_write_reg(state, 0x19, lo5); dib0090_write_reg(state, 0x1c, lo6); lo6 = tune->tuner_enable; if (state->config->analog_output) lo6 = (lo6 & 0xff9f) | 0x2; dib0090_write_reg(state, 0x24, lo6 | EN_LO | state->config->use_pwm_agc * EN_CRYSTAL); } state->current_rf = state->rf_request; state->current_standard = state->fe->dtv_property_cache.delivery_system; ret = 20; state->calibrate = CAPTRIM_CAL; /* captrim serach now */ } else if (*tune_state == CT_TUNER_STEP_0) { /* Warning : because of captrim cal, if you change this step, change it also in _cal.c file because it is the step following captrim cal state machine */ const struct dib0090_wbd_slope *wbd = state->current_wbd_table; while (state->current_rf / 1000 > wbd->max_freq) wbd++; dib0090_write_reg(state, 0x1e, 0x07ff); dprintk("Final Captrim: %d", (u32) state->fcaptrim); dprintk("HFDIV code: %d", (u32) pll->hfdiv_code); dprintk("VCO = %d", (u32) pll->vco_band); dprintk("VCOF in kHz: %d ((%d*%d) << 1))", (u32) ((pll->hfdiv * state->rf_request) * 2), (u32) pll->hfdiv, (u32) state->rf_request); dprintk("REFDIV: %d, FREF: %d", (u32) 1, (u32) state->config->io.clock_khz); dprintk("FBDIV: %d, Rest: %d", (u32) dib0090_read_reg(state, 0x15), (u32) dib0090_read_reg(state, 0x17)); dprintk("Num: %d, Den: %d, SD: %d", (u32) dib0090_read_reg(state, 0x17), (u32) (dib0090_read_reg(state, 0x16) >> 8), (u32) dib0090_read_reg(state, 0x1c) & 0x3); #define WBD 0x781 /* 1 1 1 1 0000 0 0 1 */ c = 4; i = 3; if (wbd->wbd_gain != 0) c = wbd->wbd_gain; state->wbdmux = (c << 13) | (i << 11) | (WBD | (state->config->use_pwm_agc << 1)); dib0090_write_reg(state, 0x10, state->wbdmux); if ((tune->tuner_enable == EN_CAB) && state->identity.p1g) { dprintk("P1G : The cable band is selected and lna_tune = %d", tune->lna_tune); dib0090_write_reg(state, 0x09, tune->lna_bias); dib0090_write_reg(state, 0x0b, 0xb800 | (tune->lna_tune << 6) | (tune->switch_trim)); } else dib0090_write_reg(state, 0x09, (tune->lna_tune << 5) | tune->lna_bias); dib0090_write_reg(state, 0x0c, tune->v2i); dib0090_write_reg(state, 0x0d, tune->mix); dib0090_write_reg(state, 0x0e, tune->load); *tune_state = CT_TUNER_STEP_1; } else if (*tune_state == CT_TUNER_STEP_1) { /* initialize the lt gain register */ state->rf_lt_def = 0x7c00; dib0090_set_bandwidth(state); state->tuner_is_tuned = 1; state->calibrate |= WBD_CAL; state->calibrate |= TEMP_CAL; *tune_state = CT_TUNER_STOP; } else ret = FE_CALLBACK_TIME_NEVER; return ret; } static int dib0090_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } enum frontend_tune_state dib0090_get_tune_state(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; return state->tune_state; } EXPORT_SYMBOL(dib0090_get_tune_state); int dib0090_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state) { struct dib0090_state *state = fe->tuner_priv; state->tune_state = tune_state; return 0; } EXPORT_SYMBOL(dib0090_set_tune_state); static int dib0090_get_frequency(struct dvb_frontend *fe, u32 * frequency) { struct dib0090_state *state = fe->tuner_priv; *frequency = 1000 * state->current_rf; return 0; } static int dib0090_set_params(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; u32 ret; state->tune_state = CT_TUNER_START; do { ret = dib0090_tune(fe); if (ret != FE_CALLBACK_TIME_NEVER) msleep(ret / 10); else break; } while (state->tune_state != CT_TUNER_STOP); return 0; } static const struct dvb_tuner_ops dib0090_ops = { .info = { .name = "DiBcom DiB0090", .frequency_min = 45000000, .frequency_max = 860000000, .frequency_step = 1000, }, .release = dib0090_release, .init = dib0090_wakeup, .sleep = dib0090_sleep, .set_params = dib0090_set_params, .get_frequency = dib0090_get_frequency, }; static const struct dvb_tuner_ops dib0090_fw_ops = { .info = { .name = "DiBcom DiB0090", .frequency_min = 45000000, .frequency_max = 860000000, .frequency_step = 1000, }, .release = dib0090_release, .init = NULL, .sleep = NULL, .set_params = NULL, .get_frequency = NULL, }; static const struct dib0090_wbd_slope dib0090_wbd_table_default[] = { {470, 0, 250, 0, 100, 4}, {860, 51, 866, 21, 375, 4}, {1700, 0, 800, 0, 850, 4}, {2900, 0, 250, 0, 100, 6}, {0xFFFF, 0, 0, 0, 0, 0}, }; struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config) { struct dib0090_state *st = kzalloc(sizeof(struct dib0090_state), GFP_KERNEL); if (st == NULL) return NULL; st->config = config; st->i2c = i2c; st->fe = fe; mutex_init(&st->i2c_buffer_lock); fe->tuner_priv = st; if (config->wbd == NULL) st->current_wbd_table = dib0090_wbd_table_default; else st->current_wbd_table = config->wbd; if (dib0090_reset(fe) != 0) goto free_mem; printk(KERN_INFO "DiB0090: successfully identified\n"); memcpy(&fe->ops.tuner_ops, &dib0090_ops, sizeof(struct dvb_tuner_ops)); return fe; free_mem: kfree(st); fe->tuner_priv = NULL; return NULL; } EXPORT_SYMBOL(dib0090_register); struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config) { struct dib0090_fw_state *st = kzalloc(sizeof(struct dib0090_fw_state), GFP_KERNEL); if (st == NULL) return NULL; st->config = config; st->i2c = i2c; st->fe = fe; mutex_init(&st->i2c_buffer_lock); fe->tuner_priv = st; if (dib0090_fw_reset_digital(fe, st->config) != 0) goto free_mem; dprintk("DiB0090 FW: successfully identified"); memcpy(&fe->ops.tuner_ops, &dib0090_fw_ops, sizeof(struct dvb_tuner_ops)); return fe; free_mem: kfree(st); fe->tuner_priv = NULL; return NULL; } EXPORT_SYMBOL(dib0090_fw_register); MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); MODULE_AUTHOR("Olivier Grenie <olivier.grenie@dibcom.fr>"); MODULE_DESCRIPTION("Driver for the DiBcom 0090 base-band RF Tuner"); MODULE_LICENSE("GPL");
gpl-2.0
GustavoRD78/78Kernel-ZL-283
mm/sparse-vmemmap.c
5404
5999
/* * Virtual Memory Map support * * (C) 2007 sgi. Christoph Lameter. * * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, * virt_to_page, page_address() to be implemented as a base offset * calculation without memory access. * * However, virtual mappings need a page table and TLBs. Many Linux * architectures already map their physical space using 1-1 mappings * via TLBs. For those arches the virtual memory map is essentially * for free if we use the same page size as the 1-1 mappings. In that * case the overhead consists of a few additional pages that are * allocated to create a view of memory for vmemmap. * * The architecture is expected to provide a vmemmap_populate() function * to instantiate the mapping. */ #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/vmalloc.h> #include <linux/sched.h> #include <asm/dma.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> /* * Allocate a block of memory to be used to back the virtual memory map * or to back the page tables that are used to create the mapping. * Uses the main allocators if they are available, else bootmem. */ static void * __init_refok __earlyonly_bootmem_alloc(int node, unsigned long size, unsigned long align, unsigned long goal) { return __alloc_bootmem_node_high(NODE_DATA(node), size, align, goal); } static void *vmemmap_buf; static void *vmemmap_buf_end; void * __meminit vmemmap_alloc_block(unsigned long size, int node) { /* If the main allocator is up use that, fallback to bootmem. */ if (slab_is_available()) { struct page *page; if (node_state(node, N_HIGH_MEMORY)) page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, get_order(size)); else page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(size)); if (page) return page_address(page); return NULL; } else return __earlyonly_bootmem_alloc(node, size, size, __pa(MAX_DMA_ADDRESS)); } /* need to make sure size is all the same during early stage */ void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) { void *ptr; if (!vmemmap_buf) return vmemmap_alloc_block(size, node); /* take the from buf */ ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size); if (ptr + size > vmemmap_buf_end) return vmemmap_alloc_block(size, node); vmemmap_buf = ptr + size; return ptr; } void __meminit vmemmap_verify(pte_t *pte, int node, unsigned long start, unsigned long end) { unsigned long pfn = pte_pfn(*pte); int actual_node = early_pfn_to_nid(pfn); if (node_distance(actual_node, node) > LOCAL_DISTANCE) printk(KERN_WARNING "[%lx-%lx] potential offnode " "page_structs\n", start, end - 1); } pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) { pte_t *pte = pte_offset_kernel(pmd, addr); if (pte_none(*pte)) { pte_t entry; void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); if (!p) return NULL; entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); set_pte_at(&init_mm, addr, pte, entry); } return pte; } pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) { pmd_t *pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { void *p = vmemmap_alloc_block(PAGE_SIZE, node); if (!p) return NULL; pmd_populate_kernel(&init_mm, pmd, p); } return pmd; } pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) { pud_t *pud = pud_offset(pgd, addr); if (pud_none(*pud)) { void *p = vmemmap_alloc_block(PAGE_SIZE, node); if (!p) return NULL; pud_populate(&init_mm, pud, p); } return pud; } pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) { pgd_t *pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { void *p = vmemmap_alloc_block(PAGE_SIZE, node); if (!p) return NULL; pgd_populate(&init_mm, pgd, p); } return pgd; } int __meminit vmemmap_populate_basepages(struct page *start_page, unsigned long size, int node) { unsigned long addr = (unsigned long)start_page; unsigned long end = (unsigned long)(start_page + size); pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; for (; addr < end; addr += PAGE_SIZE) { pgd = vmemmap_pgd_populate(addr, node); if (!pgd) return -ENOMEM; pud = vmemmap_pud_populate(pgd, addr, node); if (!pud) return -ENOMEM; pmd = vmemmap_pmd_populate(pud, addr, node); if (!pmd) return -ENOMEM; pte = vmemmap_pte_populate(pmd, addr, node); if (!pte) return -ENOMEM; vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); } return 0; } struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) { struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION); int error = vmemmap_populate(map, PAGES_PER_SECTION, nid); if (error) return NULL; return map; } void __init sparse_mem_maps_populate_node(struct page **map_map, unsigned long pnum_begin, unsigned long pnum_end, unsigned long map_count, int nodeid) { unsigned long pnum; unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; void *vmemmap_buf_start; size = ALIGN(size, PMD_SIZE); vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count, PMD_SIZE, __pa(MAX_DMA_ADDRESS)); if (vmemmap_buf_start) { vmemmap_buf = vmemmap_buf_start; vmemmap_buf_end = vmemmap_buf_start + size * map_count; } for (pnum = pnum_begin; pnum < pnum_end; pnum++) { struct mem_section *ms; if (!present_section_nr(pnum)) continue; map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); if (map_map[pnum]) continue; ms = __nr_to_section(pnum); printk(KERN_ERR "%s: sparsemem memory map backing failed " "some memory will not be available.\n", __func__); ms->section_mem_map = 0; } if (vmemmap_buf_start) { /* need to free left buf */ free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf); vmemmap_buf = NULL; vmemmap_buf_end = NULL; } }
gpl-2.0
CatcherXue/linux
drivers/isdn/divert/divert_procfs.c
7708
9035
/* $Id: divert_procfs.c,v 1.11.6.2 2001/09/23 22:24:36 kai Exp $ * * Filesystem handling for the diversion supplementary services. * * Copyright 1998 by Werner Cornelius (werner@isdn4linux.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/poll.h> #include <linux/slab.h> #ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> #else #include <linux/fs.h> #endif #include <linux/sched.h> #include <linux/isdnif.h> #include <net/net_namespace.h> #include <linux/mutex.h> #include "isdn_divert.h" /*********************************/ /* Variables for interface queue */ /*********************************/ ulong if_used = 0; /* number of interface users */ static DEFINE_MUTEX(isdn_divert_mutex); static struct divert_info *divert_info_head = NULL; /* head of queue */ static struct divert_info *divert_info_tail = NULL; /* pointer to last entry */ static DEFINE_SPINLOCK(divert_info_lock);/* lock for queue */ static wait_queue_head_t rd_queue; /*********************************/ /* put an info buffer into queue */ /*********************************/ void put_info_buffer(char *cp) { struct divert_info *ib; unsigned long flags; if (if_used <= 0) return; if (!cp) return; if (!*cp) return; if (!(ib = kmalloc(sizeof(struct divert_info) + strlen(cp), GFP_ATOMIC))) return; /* no memory */ strcpy(ib->info_start, cp); /* set output string */ ib->next = NULL; spin_lock_irqsave(&divert_info_lock, flags); ib->usage_cnt = if_used; if (!divert_info_head) divert_info_head = ib; /* new head */ else divert_info_tail->next = ib; /* follows existing messages */ divert_info_tail = ib; /* new tail */ /* delete old entrys */ while (divert_info_head->next) { if ((divert_info_head->usage_cnt <= 0) && (divert_info_head->next->usage_cnt <= 0)) { ib = divert_info_head; divert_info_head = divert_info_head->next; kfree(ib); } else break; } /* divert_info_head->next */ spin_unlock_irqrestore(&divert_info_lock, flags); wake_up_interruptible(&(rd_queue)); } /* put_info_buffer */ #ifdef CONFIG_PROC_FS /**********************************/ /* deflection device read routine */ /**********************************/ static ssize_t isdn_divert_read(struct file *file, char __user *buf, size_t count, loff_t *off) { struct divert_info *inf; int len; if (!*((struct divert_info **) file->private_data)) { if (file->f_flags & O_NONBLOCK) return -EAGAIN; interruptible_sleep_on(&(rd_queue)); } if (!(inf = *((struct divert_info **) file->private_data))) return (0); inf->usage_cnt--; /* new usage count */ file->private_data = &inf->next; /* next structure */ if ((len = strlen(inf->info_start)) <= count) { if (copy_to_user(buf, inf->info_start, len)) return -EFAULT; *off += len; return (len); } return (0); } /* isdn_divert_read */ /**********************************/ /* deflection device write routine */ /**********************************/ static ssize_t isdn_divert_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { return (-ENODEV); } /* isdn_divert_write */ /***************************************/ /* select routines for various kernels */ /***************************************/ static unsigned int isdn_divert_poll(struct file *file, poll_table *wait) { unsigned int mask = 0; poll_wait(file, &(rd_queue), wait); /* mask = POLLOUT | POLLWRNORM; */ if (*((struct divert_info **) file->private_data)) { mask |= POLLIN | POLLRDNORM; } return mask; } /* isdn_divert_poll */ /****************/ /* Open routine */ /****************/ static int isdn_divert_open(struct inode *ino, struct file *filep) { unsigned long flags; spin_lock_irqsave(&divert_info_lock, flags); if_used++; if (divert_info_head) filep->private_data = &(divert_info_tail->next); else filep->private_data = &divert_info_head; spin_unlock_irqrestore(&divert_info_lock, flags); /* start_divert(); */ return nonseekable_open(ino, filep); } /* isdn_divert_open */ /*******************/ /* close routine */ /*******************/ static int isdn_divert_close(struct inode *ino, struct file *filep) { struct divert_info *inf; unsigned long flags; spin_lock_irqsave(&divert_info_lock, flags); if_used--; inf = *((struct divert_info **) filep->private_data); while (inf) { inf->usage_cnt--; inf = inf->next; } if (if_used <= 0) while (divert_info_head) { inf = divert_info_head; divert_info_head = divert_info_head->next; kfree(inf); } spin_unlock_irqrestore(&divert_info_lock, flags); return (0); } /* isdn_divert_close */ /*********/ /* IOCTL */ /*********/ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg) { divert_ioctl dioctl; int i; unsigned long flags; divert_rule *rulep; char *cp; if (copy_from_user(&dioctl, (void __user *) arg, sizeof(dioctl))) return -EFAULT; switch (cmd) { case IIOCGETVER: dioctl.drv_version = DIVERT_IIOC_VERSION; /* set version */ break; case IIOCGETDRV: if ((dioctl.getid.drvid = divert_if.name_to_drv(dioctl.getid.drvnam)) < 0) return (-EINVAL); break; case IIOCGETNAM: cp = divert_if.drv_to_name(dioctl.getid.drvid); if (!cp) return (-EINVAL); if (!*cp) return (-EINVAL); strcpy(dioctl.getid.drvnam, cp); break; case IIOCGETRULE: if (!(rulep = getruleptr(dioctl.getsetrule.ruleidx))) return (-EINVAL); dioctl.getsetrule.rule = *rulep; /* copy data */ break; case IIOCMODRULE: if (!(rulep = getruleptr(dioctl.getsetrule.ruleidx))) return (-EINVAL); spin_lock_irqsave(&divert_lock, flags); *rulep = dioctl.getsetrule.rule; /* copy data */ spin_unlock_irqrestore(&divert_lock, flags); return (0); /* no copy required */ break; case IIOCINSRULE: return (insertrule(dioctl.getsetrule.ruleidx, &dioctl.getsetrule.rule)); break; case IIOCDELRULE: return (deleterule(dioctl.getsetrule.ruleidx)); break; case IIOCDODFACT: return (deflect_extern_action(dioctl.fwd_ctrl.subcmd, dioctl.fwd_ctrl.callid, dioctl.fwd_ctrl.to_nr)); case IIOCDOCFACT: case IIOCDOCFDIS: case IIOCDOCFINT: if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid)) return (-EINVAL); /* invalid driver */ if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) == sizeof(dioctl.cf_ctrl.msn)) return -EINVAL; if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) == sizeof(dioctl.cf_ctrl.fwd_nr)) return -EINVAL; if ((i = cf_command(dioctl.cf_ctrl.drvid, (cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2, dioctl.cf_ctrl.cfproc, dioctl.cf_ctrl.msn, dioctl.cf_ctrl.service, dioctl.cf_ctrl.fwd_nr, &dioctl.cf_ctrl.procid))) return (i); break; default: return (-EINVAL); } /* switch cmd */ return copy_to_user((void __user *)arg, &dioctl, sizeof(dioctl)) ? -EFAULT : 0; } /* isdn_divert_ioctl */ static long isdn_divert_ioctl(struct file *file, uint cmd, ulong arg) { long ret; mutex_lock(&isdn_divert_mutex); ret = isdn_divert_ioctl_unlocked(file, cmd, arg); mutex_unlock(&isdn_divert_mutex); return ret; } static const struct file_operations isdn_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = isdn_divert_read, .write = isdn_divert_write, .poll = isdn_divert_poll, .unlocked_ioctl = isdn_divert_ioctl, .open = isdn_divert_open, .release = isdn_divert_close, }; /****************************/ /* isdn subdir in /proc/net */ /****************************/ static struct proc_dir_entry *isdn_proc_entry = NULL; static struct proc_dir_entry *isdn_divert_entry = NULL; #endif /* CONFIG_PROC_FS */ /***************************************************************************/ /* divert_dev_init must be called before the proc filesystem may be used */ /***************************************************************************/ int divert_dev_init(void) { init_waitqueue_head(&rd_queue); #ifdef CONFIG_PROC_FS isdn_proc_entry = proc_mkdir("isdn", init_net.proc_net); if (!isdn_proc_entry) return (-1); isdn_divert_entry = proc_create("divert", S_IFREG | S_IRUGO, isdn_proc_entry, &isdn_fops); if (!isdn_divert_entry) { remove_proc_entry("isdn", init_net.proc_net); return (-1); } #endif /* CONFIG_PROC_FS */ return (0); } /* divert_dev_init */ /***************************************************************************/ /* divert_dev_deinit must be called before leaving isdn when included as */ /* a module. */ /***************************************************************************/ int divert_dev_deinit(void) { #ifdef CONFIG_PROC_FS remove_proc_entry("divert", isdn_proc_entry); remove_proc_entry("isdn", init_net.proc_net); #endif /* CONFIG_PROC_FS */ return (0); } /* divert_dev_deinit */
gpl-2.0
nian0114/android_kernel_zte_n918st
drivers/net/wireless/libertas/cmdresp.c
8220
9356
/* * This file contains the handling of command * responses as well as events generated by firmware. */ #include <linux/hardirq.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/sched.h> #include <asm/unaligned.h> #include <net/cfg80211.h> #include "cfg.h" #include "cmd.h" /** * lbs_mac_event_disconnected - handles disconnect event. It * reports disconnect to upper layer, clean tx/rx packets, * reset link state etc. * * @priv: A pointer to struct lbs_private structure * * returns: n/a */ void lbs_mac_event_disconnected(struct lbs_private *priv) { if (priv->connect_status != LBS_CONNECTED) return; lbs_deb_enter(LBS_DEB_ASSOC); /* * Cisco AP sends EAP failure and de-auth in less than 0.5 ms. * It causes problem in the Supplicant */ msleep_interruptible(1000); if (priv->wdev->iftype == NL80211_IFTYPE_STATION) lbs_send_disconnect_notification(priv); /* report disconnect to upper layer */ netif_stop_queue(priv->dev); netif_carrier_off(priv->dev); /* Free Tx and Rx packets */ kfree_skb(priv->currenttxskb); priv->currenttxskb = NULL; priv->tx_pending_len = 0; priv->connect_status = LBS_DISCONNECTED; if (priv->psstate != PS_STATE_FULL_POWER) { /* make firmware to exit PS mode */ lbs_deb_cmd("disconnected, so exit PS mode\n"); lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, false); } lbs_deb_leave(LBS_DEB_ASSOC); } int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len) { uint16_t respcmd, curcmd; struct cmd_header *resp; int ret = 0; unsigned long flags; uint16_t result; lbs_deb_enter(LBS_DEB_HOST); mutex_lock(&priv->lock); spin_lock_irqsave(&priv->driver_lock, flags); if (!priv->cur_cmd) { lbs_deb_host("CMD_RESP: cur_cmd is NULL\n"); ret = -1; spin_unlock_irqrestore(&priv->driver_lock, flags); goto done; } resp = (void *)data; curcmd = le16_to_cpu(priv->cur_cmd->cmdbuf->command); respcmd = le16_to_cpu(resp->command); result = le16_to_cpu(resp->result); lbs_deb_cmd("CMD_RESP: response 0x%04x, seq %d, size %d\n", respcmd, le16_to_cpu(resp->seqnum), len); lbs_deb_hex(LBS_DEB_CMD, "CMD_RESP", (void *) resp, len); if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) { netdev_info(priv->dev, "Received CMD_RESP with invalid sequence %d (expected %d)\n", le16_to_cpu(resp->seqnum), le16_to_cpu(priv->cur_cmd->cmdbuf->seqnum)); spin_unlock_irqrestore(&priv->driver_lock, flags); ret = -1; goto done; } if (respcmd != CMD_RET(curcmd) && respcmd != CMD_RET_802_11_ASSOCIATE && curcmd != CMD_802_11_ASSOCIATE) { netdev_info(priv->dev, "Invalid CMD_RESP %x to command %x!\n", respcmd, curcmd); spin_unlock_irqrestore(&priv->driver_lock, flags); ret = -1; goto done; } if (resp->result == cpu_to_le16(0x0004)) { /* 0x0004 means -EAGAIN. Drop the response, let it time out and be resubmitted */ netdev_info(priv->dev, "Firmware returns DEFER to command %x. Will let it time out...\n", le16_to_cpu(resp->command)); spin_unlock_irqrestore(&priv->driver_lock, flags); ret = -1; goto done; } /* Now we got response from FW, cancel the command timer */ del_timer(&priv->command_timer); priv->cmd_timed_out = 0; if (respcmd == CMD_RET(CMD_802_11_PS_MODE)) { struct cmd_ds_802_11_ps_mode *psmode = (void *) &resp[1]; u16 action = le16_to_cpu(psmode->action); lbs_deb_host( "CMD_RESP: PS_MODE cmd reply result 0x%x, action 0x%x\n", result, action); if (result) { lbs_deb_host("CMD_RESP: PS command failed with 0x%x\n", result); /* * We should not re-try enter-ps command in * ad-hoc mode. It takes place in * lbs_execute_next_command(). */ if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR && action == PS_MODE_ACTION_ENTER_PS) priv->psmode = LBS802_11POWERMODECAM; } else if (action == PS_MODE_ACTION_ENTER_PS) { priv->needtowakeup = 0; priv->psstate = PS_STATE_AWAKE; lbs_deb_host("CMD_RESP: ENTER_PS command response\n"); if (priv->connect_status != LBS_CONNECTED) { /* * When Deauth Event received before Enter_PS command * response, We need to wake up the firmware. */ lbs_deb_host( "disconnected, invoking lbs_ps_wakeup\n"); spin_unlock_irqrestore(&priv->driver_lock, flags); mutex_unlock(&priv->lock); lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, false); mutex_lock(&priv->lock); spin_lock_irqsave(&priv->driver_lock, flags); } } else if (action == PS_MODE_ACTION_EXIT_PS) { priv->needtowakeup = 0; priv->psstate = PS_STATE_FULL_POWER; lbs_deb_host("CMD_RESP: EXIT_PS command response\n"); } else { lbs_deb_host("CMD_RESP: PS action 0x%X\n", action); } __lbs_complete_command(priv, priv->cur_cmd, result); spin_unlock_irqrestore(&priv->driver_lock, flags); ret = 0; goto done; } /* If the command is not successful, cleanup and return failure */ if ((result != 0 || !(respcmd & 0x8000))) { lbs_deb_host("CMD_RESP: error 0x%04x in command reply 0x%04x\n", result, respcmd); /* * Handling errors here */ switch (respcmd) { case CMD_RET(CMD_GET_HW_SPEC): case CMD_RET(CMD_802_11_RESET): lbs_deb_host("CMD_RESP: reset failed\n"); break; } __lbs_complete_command(priv, priv->cur_cmd, result); spin_unlock_irqrestore(&priv->driver_lock, flags); ret = -1; goto done; } spin_unlock_irqrestore(&priv->driver_lock, flags); if (priv->cur_cmd && priv->cur_cmd->callback) { ret = priv->cur_cmd->callback(priv, priv->cur_cmd->callback_arg, resp); } spin_lock_irqsave(&priv->driver_lock, flags); if (priv->cur_cmd) { /* Clean up and Put current command back to cmdfreeq */ __lbs_complete_command(priv, priv->cur_cmd, result); } spin_unlock_irqrestore(&priv->driver_lock, flags); done: mutex_unlock(&priv->lock); lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); return ret; } int lbs_process_event(struct lbs_private *priv, u32 event) { int ret = 0; struct cmd_header cmd; lbs_deb_enter(LBS_DEB_CMD); switch (event) { case MACREG_INT_CODE_LINK_SENSED: lbs_deb_cmd("EVENT: link sensed\n"); break; case MACREG_INT_CODE_DEAUTHENTICATED: lbs_deb_cmd("EVENT: deauthenticated\n"); lbs_mac_event_disconnected(priv); break; case MACREG_INT_CODE_DISASSOCIATED: lbs_deb_cmd("EVENT: disassociated\n"); lbs_mac_event_disconnected(priv); break; case MACREG_INT_CODE_LINK_LOST_NO_SCAN: lbs_deb_cmd("EVENT: link lost\n"); lbs_mac_event_disconnected(priv); break; case MACREG_INT_CODE_PS_SLEEP: lbs_deb_cmd("EVENT: ps sleep\n"); /* handle unexpected PS SLEEP event */ if (priv->psstate == PS_STATE_FULL_POWER) { lbs_deb_cmd( "EVENT: in FULL POWER mode, ignoreing PS_SLEEP\n"); break; } priv->psstate = PS_STATE_PRE_SLEEP; lbs_ps_confirm_sleep(priv); break; case MACREG_INT_CODE_HOST_AWAKE: lbs_deb_cmd("EVENT: host awake\n"); if (priv->reset_deep_sleep_wakeup) priv->reset_deep_sleep_wakeup(priv); priv->is_deep_sleep = 0; lbs_cmd_async(priv, CMD_802_11_WAKEUP_CONFIRM, &cmd, sizeof(cmd)); priv->is_host_sleep_activated = 0; wake_up_interruptible(&priv->host_sleep_q); break; case MACREG_INT_CODE_DEEP_SLEEP_AWAKE: if (priv->reset_deep_sleep_wakeup) priv->reset_deep_sleep_wakeup(priv); lbs_deb_cmd("EVENT: ds awake\n"); priv->is_deep_sleep = 0; priv->wakeup_dev_required = 0; wake_up_interruptible(&priv->ds_awake_q); break; case MACREG_INT_CODE_PS_AWAKE: lbs_deb_cmd("EVENT: ps awake\n"); /* handle unexpected PS AWAKE event */ if (priv->psstate == PS_STATE_FULL_POWER) { lbs_deb_cmd( "EVENT: In FULL POWER mode - ignore PS AWAKE\n"); break; } priv->psstate = PS_STATE_AWAKE; if (priv->needtowakeup) { /* * wait for the command processing to finish * before resuming sending * priv->needtowakeup will be set to FALSE * in lbs_ps_wakeup() */ lbs_deb_cmd("waking up ...\n"); lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, false); } break; case MACREG_INT_CODE_MIC_ERR_UNICAST: lbs_deb_cmd("EVENT: UNICAST MIC ERROR\n"); lbs_send_mic_failureevent(priv, event); break; case MACREG_INT_CODE_MIC_ERR_MULTICAST: lbs_deb_cmd("EVENT: MULTICAST MIC ERROR\n"); lbs_send_mic_failureevent(priv, event); break; case MACREG_INT_CODE_MIB_CHANGED: lbs_deb_cmd("EVENT: MIB CHANGED\n"); break; case MACREG_INT_CODE_INIT_DONE: lbs_deb_cmd("EVENT: INIT DONE\n"); break; case MACREG_INT_CODE_ADHOC_BCN_LOST: lbs_deb_cmd("EVENT: ADHOC beacon lost\n"); break; case MACREG_INT_CODE_RSSI_LOW: netdev_alert(priv->dev, "EVENT: rssi low\n"); break; case MACREG_INT_CODE_SNR_LOW: netdev_alert(priv->dev, "EVENT: snr low\n"); break; case MACREG_INT_CODE_MAX_FAIL: netdev_alert(priv->dev, "EVENT: max fail\n"); break; case MACREG_INT_CODE_RSSI_HIGH: netdev_alert(priv->dev, "EVENT: rssi high\n"); break; case MACREG_INT_CODE_SNR_HIGH: netdev_alert(priv->dev, "EVENT: snr high\n"); break; case MACREG_INT_CODE_MESH_AUTO_STARTED: /* Ignore spurious autostart events */ netdev_info(priv->dev, "EVENT: MESH_AUTO_STARTED (ignoring)\n"); break; default: netdev_alert(priv->dev, "EVENT: unknown event id %d\n", event); break; } lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); return ret; }
gpl-2.0
chijure/android_kernel_huawei_y210
arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
8732
64294
#include <linux/init.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <cpu/sh7722.h> enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA, PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA, PTC7_DATA, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, PTC0_DATA, PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA, PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, PTE1_DATA, PTE0_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA, PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA, PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA, PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA, PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, PTJ1_DATA, PTJ0_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA, PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA, PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA, PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA, PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA, PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA, PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA, PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA, PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA, PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA, PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA, PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA, PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA, PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA, PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA, PINMUX_DATA_END, PINMUX_INPUT_BEGIN, PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN, PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN, PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN, PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN, PTC7_IN, PTC5_IN, PTC4_IN, PTC3_IN, PTC2_IN, PTC0_IN, PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN, PTD3_IN, PTD2_IN, PTD1_IN, PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN, PTE1_IN, PTE0_IN, PTF6_IN, PTF5_IN, PTF4_IN, PTF3_IN, PTF2_IN, PTF1_IN, PTH6_IN, PTH5_IN, PTH1_IN, PTH0_IN, PTJ1_IN, PTJ0_IN, PTK6_IN, PTK5_IN, PTK4_IN, PTK3_IN, PTK2_IN, PTK0_IN, PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN, PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN, PTM7_IN, PTM6_IN, PTM5_IN, PTM4_IN, PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN, PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN, PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN, PTQ5_IN, PTQ4_IN, PTQ3_IN, PTQ2_IN, PTQ0_IN, PTR2_IN, PTS4_IN, PTS2_IN, PTS1_IN, PTT4_IN, PTT3_IN, PTT2_IN, PTT1_IN, PTU4_IN, PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN, PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN, PTW6_IN, PTW4_IN, PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN, PTX6_IN, PTX5_IN, PTX4_IN, PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN, PTY5_IN, PTY4_IN, PTY3_IN, PTY2_IN, PTY0_IN, PTZ5_IN, PTZ4_IN, PTZ3_IN, PTZ2_IN, PTZ1_IN, PINMUX_INPUT_END, PINMUX_INPUT_PULLDOWN_BEGIN, PTA7_IN_PD, PTA6_IN_PD, PTA5_IN_PD, PTA4_IN_PD, PTA3_IN_PD, PTA2_IN_PD, PTA1_IN_PD, PTA0_IN_PD, PTE7_IN_PD, PTE6_IN_PD, PTE5_IN_PD, PTE4_IN_PD, PTE1_IN_PD, PTE0_IN_PD, PTF6_IN_PD, PTF5_IN_PD, PTF4_IN_PD, PTF3_IN_PD, PTF2_IN_PD, PTF1_IN_PD, PTH6_IN_PD, PTH5_IN_PD, PTH1_IN_PD, PTH0_IN_PD, PTK6_IN_PD, PTK5_IN_PD, PTK4_IN_PD, PTK3_IN_PD, PTK2_IN_PD, PTK0_IN_PD, PTL7_IN_PD, PTL6_IN_PD, PTL5_IN_PD, PTL4_IN_PD, PTL3_IN_PD, PTL2_IN_PD, PTL1_IN_PD, PTL0_IN_PD, PTM7_IN_PD, PTM6_IN_PD, PTM5_IN_PD, PTM4_IN_PD, PTM3_IN_PD, PTM2_IN_PD, PTM1_IN_PD, PTM0_IN_PD, PTQ5_IN_PD, PTQ4_IN_PD, PTQ3_IN_PD, PTQ2_IN_PD, PTS4_IN_PD, PTS2_IN_PD, PTS1_IN_PD, PTT4_IN_PD, PTT3_IN_PD, PTT2_IN_PD, PTT1_IN_PD, PTU4_IN_PD, PTU3_IN_PD, PTU2_IN_PD, PTU1_IN_PD, PTU0_IN_PD, PTV4_IN_PD, PTV3_IN_PD, PTV2_IN_PD, PTV1_IN_PD, PTV0_IN_PD, PTW6_IN_PD, PTW4_IN_PD, PTW3_IN_PD, PTW2_IN_PD, PTW1_IN_PD, PTW0_IN_PD, PTX6_IN_PD, PTX5_IN_PD, PTX4_IN_PD, PTX3_IN_PD, PTX2_IN_PD, PTX1_IN_PD, PTX0_IN_PD, PINMUX_INPUT_PULLDOWN_END, PINMUX_INPUT_PULLUP_BEGIN, PTC7_IN_PU, PTC5_IN_PU, PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU, PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU, PTQ0_IN_PU, PTR2_IN_PU, PTX6_IN_PU, PTY5_IN_PU, PTY4_IN_PU, PTY3_IN_PU, PTY2_IN_PU, PTY0_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU, PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PINMUX_INPUT_PULLUP_END, PINMUX_OUTPUT_BEGIN, PTA7_OUT, PTA5_OUT, PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT, PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT, PTC4_OUT, PTC3_OUT, PTC2_OUT, PTC0_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT, PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT, PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT, PTE1_OUT, PTE0_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT, PTF3_OUT, PTF2_OUT, PTF0_OUT, PTG4_OUT, PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT, PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT, PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT, PTJ7_OUT, PTJ6_OUT, PTJ5_OUT, PTJ1_OUT, PTJ0_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT, PTK3_OUT, PTK1_OUT, PTK0_OUT, PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT, PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT, PTM7_OUT, PTM6_OUT, PTM5_OUT, PTM4_OUT, PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT, PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT, PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT, PTQ6_OUT, PTQ5_OUT, PTQ4_OUT, PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT, PTR4_OUT, PTR3_OUT, PTR1_OUT, PTR0_OUT, PTS3_OUT, PTS2_OUT, PTS0_OUT, PTT4_OUT, PTT3_OUT, PTT2_OUT, PTT0_OUT, PTU4_OUT, PTU3_OUT, PTU2_OUT, PTU0_OUT, PTV4_OUT, PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT, PTW5_OUT, PTW4_OUT, PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT, PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT, PTY5_OUT, PTY4_OUT, PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT, PINMUX_OUTPUT_END, PINMUX_MARK_BEGIN, SCIF0_TXD_MARK, SCIF0_RXD_MARK, SCIF0_RTS_MARK, SCIF0_CTS_MARK, SCIF0_SCK_MARK, SCIF1_TXD_MARK, SCIF1_RXD_MARK, SCIF1_RTS_MARK, SCIF1_CTS_MARK, SCIF1_SCK_MARK, SCIF2_TXD_MARK, SCIF2_RXD_MARK, SCIF2_RTS_MARK, SCIF2_CTS_MARK, SCIF2_SCK_MARK, SIOTXD_MARK, SIORXD_MARK, SIOD_MARK, SIOSTRB0_MARK, SIOSTRB1_MARK, SIOSCK_MARK, SIOMCK_MARK, VIO_D15_MARK, VIO_D14_MARK, VIO_D13_MARK, VIO_D12_MARK, VIO_D11_MARK, VIO_D10_MARK, VIO_D9_MARK, VIO_D8_MARK, VIO_D7_MARK, VIO_D6_MARK, VIO_D5_MARK, VIO_D4_MARK, VIO_D3_MARK, VIO_D2_MARK, VIO_D1_MARK, VIO_D0_MARK, VIO_CLK_MARK, VIO_VD_MARK, VIO_HD_MARK, VIO_FLD_MARK, VIO_CKO_MARK, VIO_STEX_MARK, VIO_STEM_MARK, VIO_VD2_MARK, VIO_HD2_MARK, VIO_CLK2_MARK, LCDD23_MARK, LCDD22_MARK, LCDD21_MARK, LCDD20_MARK, LCDD19_MARK, LCDD18_MARK, LCDD17_MARK, LCDD16_MARK, LCDD15_MARK, LCDD14_MARK, LCDD13_MARK, LCDD12_MARK, LCDD11_MARK, LCDD10_MARK, LCDD9_MARK, LCDD8_MARK, LCDD7_MARK, LCDD6_MARK, LCDD5_MARK, LCDD4_MARK, LCDD3_MARK, LCDD2_MARK, LCDD1_MARK, LCDD0_MARK, LCDLCLK_MARK, LCDDON_MARK, LCDVCPWC_MARK, LCDVEPWC_MARK, LCDVSYN_MARK, LCDDCK_MARK, LCDHSYN_MARK, LCDDISP_MARK, LCDRS_MARK, LCDCS_MARK, LCDWR_MARK, LCDRD_MARK, LCDDON2_MARK, LCDVCPWC2_MARK, LCDVEPWC2_MARK, LCDVSYN2_MARK, LCDCS2_MARK, IOIS16_MARK, A25_MARK, A24_MARK, A23_MARK, A22_MARK, BS_MARK, CS6B_CE1B_MARK, WAIT_MARK, CS6A_CE2B_MARK, HPD63_MARK, HPD62_MARK, HPD61_MARK, HPD60_MARK, HPD59_MARK, HPD58_MARK, HPD57_MARK, HPD56_MARK, HPD55_MARK, HPD54_MARK, HPD53_MARK, HPD52_MARK, HPD51_MARK, HPD50_MARK, HPD49_MARK, HPD48_MARK, HPDQM7_MARK, HPDQM6_MARK, HPDQM5_MARK, HPDQM4_MARK, IRQ0_MARK, IRQ1_MARK, IRQ2_MARK, IRQ3_MARK, IRQ4_MARK, IRQ5_MARK, IRQ6_MARK, IRQ7_MARK, SDHICD_MARK, SDHIWP_MARK, SDHID3_MARK, SDHID2_MARK, SDHID1_MARK, SDHID0_MARK, SDHICMD_MARK, SDHICLK_MARK, SIUAOLR_MARK, SIUAOBT_MARK, SIUAISLD_MARK, SIUAILR_MARK, SIUAIBT_MARK, SIUAOSLD_MARK, SIUMCKA_MARK, SIUFCKA_MARK, SIUBOLR_MARK, SIUBOBT_MARK, SIUBISLD_MARK, SIUBILR_MARK, SIUBIBT_MARK, SIUBOSLD_MARK, SIUMCKB_MARK, SIUFCKB_MARK, AUDSYNC_MARK, AUDATA3_MARK, AUDATA2_MARK, AUDATA1_MARK, AUDATA0_MARK, DACK_MARK, DREQ0_MARK, DV_CLKI_MARK, DV_CLK_MARK, DV_HSYNC_MARK, DV_VSYNC_MARK, DV_D15_MARK, DV_D14_MARK, DV_D13_MARK, DV_D12_MARK, DV_D11_MARK, DV_D10_MARK, DV_D9_MARK, DV_D8_MARK, DV_D7_MARK, DV_D6_MARK, DV_D5_MARK, DV_D4_MARK, DV_D3_MARK, DV_D2_MARK, DV_D1_MARK, DV_D0_MARK, STATUS0_MARK, PDSTATUS_MARK, SIOF0_MCK_MARK, SIOF0_SCK_MARK, SIOF0_SYNC_MARK, SIOF0_SS1_MARK, SIOF0_SS2_MARK, SIOF0_TXD_MARK, SIOF0_RXD_MARK, SIOF1_MCK_MARK, SIOF1_SCK_MARK, SIOF1_SYNC_MARK, SIOF1_SS1_MARK, SIOF1_SS2_MARK, SIOF1_TXD_MARK, SIOF1_RXD_MARK, SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK, TS_SDAT_MARK, TS_SCK_MARK, TS_SDEN_MARK, TS_SPSYNC_MARK, IRDA_IN_MARK, IRDA_OUT_MARK, TPUTO_MARK, FCE_MARK, NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK, NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK, FCDE_MARK, FOE_MARK, FSC_MARK, FWE_MARK, FRB_MARK, KEYIN0_MARK, KEYIN1_MARK, KEYIN2_MARK, KEYIN3_MARK, KEYIN4_MARK, KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK, KEYOUT4_IN6_MARK, KEYOUT5_IN5_MARK, PINMUX_MARK_END, PINMUX_FUNCTION_BEGIN, VIO_D7_SCIF1_SCK, VIO_D6_SCIF1_RXD, VIO_D5_SCIF1_TXD, VIO_D4, VIO_D3, VIO_D2, VIO_D1, VIO_D0_LCDLCLK, HPD55, HPD54, HPD53, HPD52, HPD51, HPD50, HPD49, HPD48, IOIS16, HPDQM7, HPDQM6, HPDQM5, HPDQM4, SDHICD, SDHIWP, SDHID3, IRQ2_SDHID2, SDHID1, SDHID0, SDHICMD, SDHICLK, A25, A24, A23, A22, IRQ5, IRQ4_BS, PTF6, SIOSCK_SIUBOBT, SIOSTRB1_SIUBOLR, SIOSTRB0_SIUBIBT, SIOD_SIUBILR, SIORXD_SIUBISLD, SIOTXD_SIUBOSLD, AUDSYNC, AUDATA3, AUDATA2, AUDATA1, AUDATA0, LCDVCPWC_LCDVCPWC2, LCDVSYN2_DACK, LCDVSYN, LCDDISP_LCDRS, LCDHSYN_LCDCS, LCDDON_LCDDON2, LCDD17_DV_HSYNC, LCDD16_DV_VSYNC, STATUS0, PDSTATUS, IRQ1, IRQ0, SIUAILR_SIOF1_SS2, SIUAIBT_SIOF1_SS1, SIUAOLR_SIOF1_SYNC, SIUAOBT_SIOF1_SCK, SIUAISLD_SIOF1_RXD, SIUAOSLD_SIOF1_TXD, PTK0, LCDD15_DV_D15, LCDD14_DV_D14, LCDD13_DV_D13, LCDD12_DV_D12, LCDD11_DV_D11, LCDD10_DV_D10, LCDD9_DV_D9, LCDD8_DV_D8, LCDD7_DV_D7, LCDD6_DV_D6, LCDD5_DV_D5, LCDD4_DV_D4, LCDD3_DV_D3, LCDD2_DV_D2, LCDD1_DV_D1, LCDD0_DV_D0, HPD63, HPD62, HPD61, HPD60, HPD59, HPD58, HPD57, HPD56, SIOF0_SS2_SIM_RST, SIOF0_SS1_TS_SPSYNC, SIOF0_SYNC_TS_SDEN, SIOF0_SCK_TS_SCK, PTQ2, PTQ1, PTQ0, LCDRD, CS6B_CE1B_LCDCS2, WAIT, LCDDCK_LCDWR, LCDVEPWC_LCDVEPWC2, SCIF0_CTS_SIUAISPD, SCIF0_RTS_SIUAOSPD, SCIF0_SCK_TPUTO, SCIF0_RXD, SCIF0_TXD, FOE_VIO_VD2, FWE, FSC, DREQ0, FCDE, NAF2_VIO_D10, NAF1_VIO_D9, NAF0_VIO_D8, FRB_VIO_CLK2, FCE_VIO_HD2, NAF7_VIO_D15, NAF6_VIO_D14, NAF5_VIO_D13, NAF4_VIO_D12, NAF3_VIO_D11, VIO_FLD_SCIF2_CTS, VIO_CKO_SCIF2_RTS, VIO_STEX_SCIF2_SCK, VIO_STEM_SCIF2_TXD, VIO_HD_SCIF2_RXD, VIO_VD_SCIF1_CTS, VIO_CLK_SCIF1_RTS, CS6A_CE2B, LCDD23, LCDD22, LCDD21, LCDD20, LCDD19_DV_CLKI, LCDD18_DV_CLK, KEYOUT5_IN5, KEYOUT4_IN6, KEYOUT3, KEYOUT2, KEYOUT1, KEYOUT0, KEYIN4_IRQ7, KEYIN3, KEYIN2, KEYIN1, KEYIN0_IRQ6, PSA15_KEYIN0, PSA15_IRQ6, PSA14_KEYIN4, PSA14_IRQ7, PSA9_IRQ4, PSA9_BS, PSA4_IRQ2, PSA4_SDHID2, PSB15_SIOTXD, PSB15_SIUBOSLD, PSB14_SIORXD, PSB14_SIUBISLD, PSB13_SIOD, PSB13_SIUBILR, PSB12_SIOSTRB0, PSB12_SIUBIBT, PSB11_SIOSTRB1, PSB11_SIUBOLR, PSB10_SIOSCK, PSB10_SIUBOBT, PSB9_SIOMCK, PSB9_SIUMCKB, PSB8_SIOF0_MCK, PSB8_IRQ3, PSB7_SIOF0_TXD, PSB7_IRDA_OUT, PSB6_SIOF0_RXD, PSB6_IRDA_IN, PSB5_SIOF0_SCK, PSB5_TS_SCK, PSB4_SIOF0_SYNC, PSB4_TS_SDEN, PSB3_SIOF0_SS1, PSB3_TS_SPSYNC, PSB2_SIOF0_SS2, PSB2_SIM_RST, PSB1_SIUMCKA, PSB1_SIOF1_MCK, PSB0_SIUAOSLD, PSB0_SIOF1_TXD, PSC15_SIUAISLD, PSC15_SIOF1_RXD, PSC14_SIUAOBT, PSC14_SIOF1_SCK, PSC13_SIUAOLR, PSC13_SIOF1_SYNC, PSC12_SIUAIBT, PSC12_SIOF1_SS1, PSC11_SIUAILR, PSC11_SIOF1_SS2, PSC0_NAF, PSC0_VIO, PSD13_VIO, PSD13_SCIF2, PSD12_VIO, PSD12_SCIF1, PSD11_VIO, PSD11_SCIF1, PSD10_VIO_D0, PSD10_LCDLCLK, PSD9_SIOMCK_SIUMCKB, PSD9_SIUFCKB, PSD8_SCIF0_SCK, PSD8_TPUTO, PSD7_SCIF0_RTS, PSD7_SIUAOSPD, PSD6_SCIF0_CTS, PSD6_SIUAISPD, PSD5_CS6B_CE1B, PSD5_LCDCS2, PSD3_LCDVEPWC_LCDVCPWC, PSD3_LCDVEPWC2_LCDVCPWC2, PSD2_LCDDON, PSD2_LCDDON2, PSD0_LCDD19_LCDD0, PSD0_DV, PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D, PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK, PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT, PSE12_LCDVSYN2, PSE12_DACK, PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA, PSE3_FLCTL, PSE3_VIO, PSE2_NAF2, PSE2_VIO_D10, PSE1_NAF1, PSE1_VIO_D9, PSE0_NAF0, PSE0_VIO_D8, HIZA14_KEYSC, HIZA14_HIZ, HIZA10_NAF, HIZA10_HIZ, HIZA9_VIO, HIZA9_HIZ, HIZA8_LCDC, HIZA8_HIZ, HIZA7_LCDC, HIZA7_HIZ, HIZA6_LCDC, HIZA6_HIZ, HIZB4_SIUA, HIZB4_HIZ, HIZB1_VIO, HIZB1_HIZ, HIZB0_VIO, HIZB0_HIZ, HIZC15_IRQ7, HIZC15_HIZ, HIZC14_IRQ6, HIZC14_HIZ, HIZC13_IRQ5, HIZC13_HIZ, HIZC12_IRQ4, HIZC12_HIZ, HIZC11_IRQ3, HIZC11_HIZ, HIZC10_IRQ2, HIZC10_HIZ, HIZC9_IRQ1, HIZC9_HIZ, HIZC8_IRQ0, HIZC8_HIZ, MSELB9_VIO, MSELB9_VIO2, MSELB8_RGB, MSELB8_SYS, PINMUX_FUNCTION_END, }; static pinmux_enum_t pinmux_data[] = { /* PTA */ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_IN_PD, PTA7_OUT), PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_IN_PD), PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_IN_PD, PTA5_OUT), PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_IN_PD), PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_IN_PD), PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_IN_PD), PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_IN_PD), PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_IN_PD), /* PTB */ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT), PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT), PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT), PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT), PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT), PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT), PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT), PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT), /* PTC */ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_IN_PU), PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_IN_PU), PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT), PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT), PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT), PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT), /* PTD */ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_IN_PU), PINMUX_DATA(PTD6_DATA, PTD6_OUT, PTD6_IN, PTD6_IN_PU), PINMUX_DATA(PTD5_DATA, PTD5_OUT, PTD5_IN, PTD5_IN_PU), PINMUX_DATA(PTD4_DATA, PTD4_OUT, PTD4_IN, PTD4_IN_PU), PINMUX_DATA(PTD3_DATA, PTD3_OUT, PTD3_IN, PTD3_IN_PU), PINMUX_DATA(PTD2_DATA, PTD2_OUT, PTD2_IN, PTD2_IN_PU), PINMUX_DATA(PTD1_DATA, PTD1_OUT, PTD1_IN, PTD1_IN_PU), PINMUX_DATA(PTD0_DATA, PTD0_OUT), /* PTE */ PINMUX_DATA(PTE7_DATA, PTE7_OUT, PTE7_IN, PTE7_IN_PD), PINMUX_DATA(PTE6_DATA, PTE6_OUT, PTE6_IN, PTE6_IN_PD), PINMUX_DATA(PTE5_DATA, PTE5_OUT, PTE5_IN, PTE5_IN_PD), PINMUX_DATA(PTE4_DATA, PTE4_OUT, PTE4_IN, PTE4_IN_PD), PINMUX_DATA(PTE1_DATA, PTE1_OUT, PTE1_IN, PTE1_IN_PD), PINMUX_DATA(PTE0_DATA, PTE0_OUT, PTE0_IN, PTE0_IN_PD), /* PTF */ PINMUX_DATA(PTF6_DATA, PTF6_OUT, PTF6_IN, PTF6_IN_PD), PINMUX_DATA(PTF5_DATA, PTF5_OUT, PTF5_IN, PTF5_IN_PD), PINMUX_DATA(PTF4_DATA, PTF4_OUT, PTF4_IN, PTF4_IN_PD), PINMUX_DATA(PTF3_DATA, PTF3_OUT, PTF3_IN, PTF3_IN_PD), PINMUX_DATA(PTF2_DATA, PTF2_OUT, PTF2_IN, PTF2_IN_PD), PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_IN_PD), PINMUX_DATA(PTF0_DATA, PTF0_OUT), /* PTG */ PINMUX_DATA(PTG4_DATA, PTG4_OUT), PINMUX_DATA(PTG3_DATA, PTG3_OUT), PINMUX_DATA(PTG2_DATA, PTG2_OUT), PINMUX_DATA(PTG1_DATA, PTG1_OUT), PINMUX_DATA(PTG0_DATA, PTG0_OUT), /* PTH */ PINMUX_DATA(PTH7_DATA, PTH7_OUT), PINMUX_DATA(PTH6_DATA, PTH6_OUT, PTH6_IN, PTH6_IN_PD), PINMUX_DATA(PTH5_DATA, PTH5_OUT, PTH5_IN, PTH5_IN_PD), PINMUX_DATA(PTH4_DATA, PTH4_OUT), PINMUX_DATA(PTH3_DATA, PTH3_OUT), PINMUX_DATA(PTH2_DATA, PTH2_OUT), PINMUX_DATA(PTH1_DATA, PTH1_OUT, PTH1_IN, PTH1_IN_PD), PINMUX_DATA(PTH0_DATA, PTH0_OUT, PTH0_IN, PTH0_IN_PD), /* PTJ */ PINMUX_DATA(PTJ7_DATA, PTJ7_OUT), PINMUX_DATA(PTJ6_DATA, PTJ6_OUT), PINMUX_DATA(PTJ5_DATA, PTJ5_OUT), PINMUX_DATA(PTJ1_DATA, PTJ1_OUT, PTJ1_IN, PTJ1_IN_PU), PINMUX_DATA(PTJ0_DATA, PTJ0_OUT, PTJ0_IN, PTJ0_IN_PU), /* PTK */ PINMUX_DATA(PTK6_DATA, PTK6_OUT, PTK6_IN, PTK6_IN_PD), PINMUX_DATA(PTK5_DATA, PTK5_OUT, PTK5_IN, PTK5_IN_PD), PINMUX_DATA(PTK4_DATA, PTK4_OUT, PTK4_IN, PTK4_IN_PD), PINMUX_DATA(PTK3_DATA, PTK3_OUT, PTK3_IN, PTK3_IN_PD), PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_IN_PD), PINMUX_DATA(PTK1_DATA, PTK1_OUT), PINMUX_DATA(PTK0_DATA, PTK0_OUT, PTK0_IN, PTK0_IN_PD), /* PTL */ PINMUX_DATA(PTL7_DATA, PTL7_OUT, PTL7_IN, PTL7_IN_PD), PINMUX_DATA(PTL6_DATA, PTL6_OUT, PTL6_IN, PTL6_IN_PD), PINMUX_DATA(PTL5_DATA, PTL5_OUT, PTL5_IN, PTL5_IN_PD), PINMUX_DATA(PTL4_DATA, PTL4_OUT, PTL4_IN, PTL4_IN_PD), PINMUX_DATA(PTL3_DATA, PTL3_OUT, PTL3_IN, PTL3_IN_PD), PINMUX_DATA(PTL2_DATA, PTL2_OUT, PTL2_IN, PTL2_IN_PD), PINMUX_DATA(PTL1_DATA, PTL1_OUT, PTL1_IN, PTL1_IN_PD), PINMUX_DATA(PTL0_DATA, PTL0_OUT, PTL0_IN, PTL0_IN_PD), /* PTM */ PINMUX_DATA(PTM7_DATA, PTM7_OUT, PTM7_IN, PTM7_IN_PD), PINMUX_DATA(PTM6_DATA, PTM6_OUT, PTM6_IN, PTM6_IN_PD), PINMUX_DATA(PTM5_DATA, PTM5_OUT, PTM5_IN, PTM5_IN_PD), PINMUX_DATA(PTM4_DATA, PTM4_OUT, PTM4_IN, PTM4_IN_PD), PINMUX_DATA(PTM3_DATA, PTM3_OUT, PTM3_IN, PTM3_IN_PD), PINMUX_DATA(PTM2_DATA, PTM2_OUT, PTM2_IN, PTM2_IN_PD), PINMUX_DATA(PTM1_DATA, PTM1_OUT, PTM1_IN, PTM1_IN_PD), PINMUX_DATA(PTM0_DATA, PTM0_OUT, PTM0_IN, PTM0_IN_PD), /* PTN */ PINMUX_DATA(PTN7_DATA, PTN7_OUT, PTN7_IN), PINMUX_DATA(PTN6_DATA, PTN6_OUT, PTN6_IN), PINMUX_DATA(PTN5_DATA, PTN5_OUT, PTN5_IN), PINMUX_DATA(PTN4_DATA, PTN4_OUT, PTN4_IN), PINMUX_DATA(PTN3_DATA, PTN3_OUT, PTN3_IN), PINMUX_DATA(PTN2_DATA, PTN2_OUT, PTN2_IN), PINMUX_DATA(PTN1_DATA, PTN1_OUT, PTN1_IN), PINMUX_DATA(PTN0_DATA, PTN0_OUT, PTN0_IN), /* PTQ */ PINMUX_DATA(PTQ6_DATA, PTQ6_OUT), PINMUX_DATA(PTQ5_DATA, PTQ5_OUT, PTQ5_IN, PTQ5_IN_PD), PINMUX_DATA(PTQ4_DATA, PTQ4_OUT, PTQ4_IN, PTQ4_IN_PD), PINMUX_DATA(PTQ3_DATA, PTQ3_OUT, PTQ3_IN, PTQ3_IN_PD), PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_IN_PD), PINMUX_DATA(PTQ1_DATA, PTQ1_OUT), PINMUX_DATA(PTQ0_DATA, PTQ0_OUT, PTQ0_IN, PTQ0_IN_PU), /* PTR */ PINMUX_DATA(PTR4_DATA, PTR4_OUT), PINMUX_DATA(PTR3_DATA, PTR3_OUT), PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU), PINMUX_DATA(PTR1_DATA, PTR1_OUT), PINMUX_DATA(PTR0_DATA, PTR0_OUT), /* PTS */ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_IN_PD), PINMUX_DATA(PTS3_DATA, PTS3_OUT), PINMUX_DATA(PTS2_DATA, PTS2_OUT, PTS2_IN, PTS2_IN_PD), PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_IN_PD), PINMUX_DATA(PTS0_DATA, PTS0_OUT), /* PTT */ PINMUX_DATA(PTT4_DATA, PTT4_OUT, PTT4_IN, PTT4_IN_PD), PINMUX_DATA(PTT3_DATA, PTT3_OUT, PTT3_IN, PTT3_IN_PD), PINMUX_DATA(PTT2_DATA, PTT2_OUT, PTT2_IN, PTT2_IN_PD), PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_IN_PD), PINMUX_DATA(PTT0_DATA, PTT0_OUT), /* PTU */ PINMUX_DATA(PTU4_DATA, PTU4_OUT, PTU4_IN, PTU4_IN_PD), PINMUX_DATA(PTU3_DATA, PTU3_OUT, PTU3_IN, PTU3_IN_PD), PINMUX_DATA(PTU2_DATA, PTU2_OUT, PTU2_IN, PTU2_IN_PD), PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_IN_PD), PINMUX_DATA(PTU0_DATA, PTU0_OUT, PTU0_IN, PTU0_IN_PD), /* PTV */ PINMUX_DATA(PTV4_DATA, PTV4_OUT, PTV4_IN, PTV4_IN_PD), PINMUX_DATA(PTV3_DATA, PTV3_OUT, PTV3_IN, PTV3_IN_PD), PINMUX_DATA(PTV2_DATA, PTV2_OUT, PTV2_IN, PTV2_IN_PD), PINMUX_DATA(PTV1_DATA, PTV1_OUT, PTV1_IN, PTV1_IN_PD), PINMUX_DATA(PTV0_DATA, PTV0_OUT, PTV0_IN, PTV0_IN_PD), /* PTW */ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_IN_PD), PINMUX_DATA(PTW5_DATA, PTW5_OUT), PINMUX_DATA(PTW4_DATA, PTW4_OUT, PTW4_IN, PTW4_IN_PD), PINMUX_DATA(PTW3_DATA, PTW3_OUT, PTW3_IN, PTW3_IN_PD), PINMUX_DATA(PTW2_DATA, PTW2_OUT, PTW2_IN, PTW2_IN_PD), PINMUX_DATA(PTW1_DATA, PTW1_OUT, PTW1_IN, PTW1_IN_PD), PINMUX_DATA(PTW0_DATA, PTW0_OUT, PTW0_IN, PTW0_IN_PD), /* PTX */ PINMUX_DATA(PTX6_DATA, PTX6_OUT, PTX6_IN, PTX6_IN_PD), PINMUX_DATA(PTX5_DATA, PTX5_OUT, PTX5_IN, PTX5_IN_PD), PINMUX_DATA(PTX4_DATA, PTX4_OUT, PTX4_IN, PTX4_IN_PD), PINMUX_DATA(PTX3_DATA, PTX3_OUT, PTX3_IN, PTX3_IN_PD), PINMUX_DATA(PTX2_DATA, PTX2_OUT, PTX2_IN, PTX2_IN_PD), PINMUX_DATA(PTX1_DATA, PTX1_OUT, PTX1_IN, PTX1_IN_PD), PINMUX_DATA(PTX0_DATA, PTX0_OUT, PTX0_IN, PTX0_IN_PD), /* PTY */ PINMUX_DATA(PTY5_DATA, PTY5_OUT, PTY5_IN, PTY5_IN_PU), PINMUX_DATA(PTY4_DATA, PTY4_OUT, PTY4_IN, PTY4_IN_PU), PINMUX_DATA(PTY3_DATA, PTY3_OUT, PTY3_IN, PTY3_IN_PU), PINMUX_DATA(PTY2_DATA, PTY2_OUT, PTY2_IN, PTY2_IN_PU), PINMUX_DATA(PTY1_DATA, PTY1_OUT), PINMUX_DATA(PTY0_DATA, PTY0_OUT, PTY0_IN, PTY0_IN_PU), /* PTZ */ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_IN_PU), PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_IN_PU), PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_IN_PU), PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_IN_PU), PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_IN_PU), /* SCIF0 */ PINMUX_DATA(SCIF0_TXD_MARK, SCIF0_TXD), PINMUX_DATA(SCIF0_RXD_MARK, SCIF0_RXD), PINMUX_DATA(SCIF0_RTS_MARK, PSD7_SCIF0_RTS, SCIF0_RTS_SIUAOSPD), PINMUX_DATA(SCIF0_CTS_MARK, PSD6_SCIF0_CTS, SCIF0_CTS_SIUAISPD), PINMUX_DATA(SCIF0_SCK_MARK, PSD8_SCIF0_SCK, SCIF0_SCK_TPUTO), /* SCIF1 */ PINMUX_DATA(SCIF1_TXD_MARK, PSD11_SCIF1, VIO_D5_SCIF1_TXD), PINMUX_DATA(SCIF1_RXD_MARK, PSD11_SCIF1, VIO_D6_SCIF1_RXD), PINMUX_DATA(SCIF1_RTS_MARK, PSD12_SCIF1, VIO_CLK_SCIF1_RTS), PINMUX_DATA(SCIF1_CTS_MARK, PSD12_SCIF1, VIO_VD_SCIF1_CTS), PINMUX_DATA(SCIF1_SCK_MARK, PSD11_SCIF1, VIO_D7_SCIF1_SCK), /* SCIF2 */ PINMUX_DATA(SCIF2_TXD_MARK, PSD13_SCIF2, VIO_STEM_SCIF2_TXD), PINMUX_DATA(SCIF2_RXD_MARK, PSD13_SCIF2, VIO_HD_SCIF2_RXD), PINMUX_DATA(SCIF2_RTS_MARK, PSD13_SCIF2, VIO_CKO_SCIF2_RTS), PINMUX_DATA(SCIF2_CTS_MARK, PSD13_SCIF2, VIO_FLD_SCIF2_CTS), PINMUX_DATA(SCIF2_SCK_MARK, PSD13_SCIF2, VIO_STEX_SCIF2_SCK), /* SIO */ PINMUX_DATA(SIOTXD_MARK, PSB15_SIOTXD, SIOTXD_SIUBOSLD), PINMUX_DATA(SIORXD_MARK, PSB14_SIORXD, SIORXD_SIUBISLD), PINMUX_DATA(SIOD_MARK, PSB13_SIOD, SIOD_SIUBILR), PINMUX_DATA(SIOSTRB0_MARK, PSB12_SIOSTRB0, SIOSTRB0_SIUBIBT), PINMUX_DATA(SIOSTRB1_MARK, PSB11_SIOSTRB1, SIOSTRB1_SIUBOLR), PINMUX_DATA(SIOSCK_MARK, PSB10_SIOSCK, SIOSCK_SIUBOBT), PINMUX_DATA(SIOMCK_MARK, PSD9_SIOMCK_SIUMCKB, PSB9_SIOMCK, PTF6), /* CEU */ PINMUX_DATA(VIO_D15_MARK, PSC0_VIO, HIZA10_NAF, NAF7_VIO_D15), PINMUX_DATA(VIO_D14_MARK, PSC0_VIO, HIZA10_NAF, NAF6_VIO_D14), PINMUX_DATA(VIO_D13_MARK, PSC0_VIO, HIZA10_NAF, NAF5_VIO_D13), PINMUX_DATA(VIO_D12_MARK, PSC0_VIO, HIZA10_NAF, NAF4_VIO_D12), PINMUX_DATA(VIO_D11_MARK, PSC0_VIO, HIZA10_NAF, NAF3_VIO_D11), PINMUX_DATA(VIO_D10_MARK, PSE2_VIO_D10, HIZB0_VIO, NAF2_VIO_D10), PINMUX_DATA(VIO_D9_MARK, PSE1_VIO_D9, HIZB0_VIO, NAF1_VIO_D9), PINMUX_DATA(VIO_D8_MARK, PSE0_VIO_D8, HIZB0_VIO, NAF0_VIO_D8), PINMUX_DATA(VIO_D7_MARK, PSD11_VIO, VIO_D7_SCIF1_SCK), PINMUX_DATA(VIO_D6_MARK, PSD11_VIO, VIO_D6_SCIF1_RXD), PINMUX_DATA(VIO_D5_MARK, PSD11_VIO, VIO_D5_SCIF1_TXD), PINMUX_DATA(VIO_D4_MARK, VIO_D4), PINMUX_DATA(VIO_D3_MARK, VIO_D3), PINMUX_DATA(VIO_D2_MARK, VIO_D2), PINMUX_DATA(VIO_D1_MARK, VIO_D1), PINMUX_DATA(VIO_D0_MARK, PSD10_VIO_D0, VIO_D0_LCDLCLK), PINMUX_DATA(VIO_CLK_MARK, PSD12_VIO, MSELB9_VIO, VIO_CLK_SCIF1_RTS), PINMUX_DATA(VIO_VD_MARK, PSD12_VIO, MSELB9_VIO, VIO_VD_SCIF1_CTS), PINMUX_DATA(VIO_HD_MARK, PSD13_VIO, MSELB9_VIO, VIO_HD_SCIF2_RXD), PINMUX_DATA(VIO_FLD_MARK, PSD13_VIO, HIZA9_VIO, VIO_FLD_SCIF2_CTS), PINMUX_DATA(VIO_CKO_MARK, PSD13_VIO, HIZA9_VIO, VIO_CKO_SCIF2_RTS), PINMUX_DATA(VIO_STEX_MARK, PSD13_VIO, HIZA9_VIO, VIO_STEX_SCIF2_SCK), PINMUX_DATA(VIO_STEM_MARK, PSD13_VIO, HIZA9_VIO, VIO_STEM_SCIF2_TXD), PINMUX_DATA(VIO_VD2_MARK, PSE3_VIO, MSELB9_VIO2, HIZB0_VIO, FOE_VIO_VD2), PINMUX_DATA(VIO_HD2_MARK, PSE3_VIO, MSELB9_VIO2, HIZB1_VIO, FCE_VIO_HD2), PINMUX_DATA(VIO_CLK2_MARK, PSE3_VIO, MSELB9_VIO2, HIZB1_VIO, FRB_VIO_CLK2), /* LCDC */ PINMUX_DATA(LCDD23_MARK, HIZA8_LCDC, LCDD23), PINMUX_DATA(LCDD22_MARK, HIZA8_LCDC, LCDD22), PINMUX_DATA(LCDD21_MARK, HIZA8_LCDC, LCDD21), PINMUX_DATA(LCDD20_MARK, HIZA8_LCDC, LCDD20), PINMUX_DATA(LCDD19_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD19_DV_CLKI), PINMUX_DATA(LCDD18_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD18_DV_CLK), PINMUX_DATA(LCDD17_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD17_DV_HSYNC), PINMUX_DATA(LCDD16_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD16_DV_VSYNC), PINMUX_DATA(LCDD15_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD15_DV_D15), PINMUX_DATA(LCDD14_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD14_DV_D14), PINMUX_DATA(LCDD13_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD13_DV_D13), PINMUX_DATA(LCDD12_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD12_DV_D12), PINMUX_DATA(LCDD11_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD11_DV_D11), PINMUX_DATA(LCDD10_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD10_DV_D10), PINMUX_DATA(LCDD9_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD9_DV_D9), PINMUX_DATA(LCDD8_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD8_DV_D8), PINMUX_DATA(LCDD7_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD7_DV_D7), PINMUX_DATA(LCDD6_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD6_DV_D6), PINMUX_DATA(LCDD5_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD5_DV_D5), PINMUX_DATA(LCDD4_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD4_DV_D4), PINMUX_DATA(LCDD3_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD3_DV_D3), PINMUX_DATA(LCDD2_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD2_DV_D2), PINMUX_DATA(LCDD1_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD1_DV_D1), PINMUX_DATA(LCDD0_MARK, PSD0_LCDD19_LCDD0, HIZA8_LCDC, LCDD0_DV_D0), PINMUX_DATA(LCDLCLK_MARK, PSD10_LCDLCLK, VIO_D0_LCDLCLK), /* Main LCD */ PINMUX_DATA(LCDDON_MARK, PSD2_LCDDON, HIZA7_LCDC, LCDDON_LCDDON2), PINMUX_DATA(LCDVCPWC_MARK, PSD3_LCDVEPWC_LCDVCPWC, HIZA6_LCDC, LCDVCPWC_LCDVCPWC2), PINMUX_DATA(LCDVEPWC_MARK, PSD3_LCDVEPWC_LCDVCPWC, HIZA6_LCDC, LCDVEPWC_LCDVEPWC2), PINMUX_DATA(LCDVSYN_MARK, HIZA7_LCDC, LCDVSYN), /* Main LCD - RGB Mode */ PINMUX_DATA(LCDDCK_MARK, MSELB8_RGB, HIZA8_LCDC, LCDDCK_LCDWR), PINMUX_DATA(LCDHSYN_MARK, MSELB8_RGB, HIZA7_LCDC, LCDHSYN_LCDCS), PINMUX_DATA(LCDDISP_MARK, MSELB8_RGB, HIZA7_LCDC, LCDDISP_LCDRS), /* Main LCD - SYS Mode */ PINMUX_DATA(LCDRS_MARK, MSELB8_SYS, HIZA7_LCDC, LCDDISP_LCDRS), PINMUX_DATA(LCDCS_MARK, MSELB8_SYS, HIZA7_LCDC, LCDHSYN_LCDCS), PINMUX_DATA(LCDWR_MARK, MSELB8_SYS, HIZA8_LCDC, LCDDCK_LCDWR), PINMUX_DATA(LCDRD_MARK, HIZA7_LCDC, LCDRD), /* Sub LCD - SYS Mode */ PINMUX_DATA(LCDDON2_MARK, PSD2_LCDDON2, HIZA7_LCDC, LCDDON_LCDDON2), PINMUX_DATA(LCDVCPWC2_MARK, PSD3_LCDVEPWC2_LCDVCPWC2, HIZA6_LCDC, LCDVCPWC_LCDVCPWC2), PINMUX_DATA(LCDVEPWC2_MARK, PSD3_LCDVEPWC2_LCDVCPWC2, HIZA6_LCDC, LCDVEPWC_LCDVEPWC2), PINMUX_DATA(LCDVSYN2_MARK, PSE12_LCDVSYN2, HIZA8_LCDC, LCDVSYN2_DACK), PINMUX_DATA(LCDCS2_MARK, PSD5_LCDCS2, CS6B_CE1B_LCDCS2), /* BSC */ PINMUX_DATA(IOIS16_MARK, IOIS16), PINMUX_DATA(A25_MARK, A25), PINMUX_DATA(A24_MARK, A24), PINMUX_DATA(A23_MARK, A23), PINMUX_DATA(A22_MARK, A22), PINMUX_DATA(BS_MARK, PSA9_BS, IRQ4_BS), PINMUX_DATA(CS6B_CE1B_MARK, PSD5_CS6B_CE1B, CS6B_CE1B_LCDCS2), PINMUX_DATA(WAIT_MARK, WAIT), PINMUX_DATA(CS6A_CE2B_MARK, CS6A_CE2B), /* SBSC */ PINMUX_DATA(HPD63_MARK, HPD63), PINMUX_DATA(HPD62_MARK, HPD62), PINMUX_DATA(HPD61_MARK, HPD61), PINMUX_DATA(HPD60_MARK, HPD60), PINMUX_DATA(HPD59_MARK, HPD59), PINMUX_DATA(HPD58_MARK, HPD58), PINMUX_DATA(HPD57_MARK, HPD57), PINMUX_DATA(HPD56_MARK, HPD56), PINMUX_DATA(HPD55_MARK, HPD55), PINMUX_DATA(HPD54_MARK, HPD54), PINMUX_DATA(HPD53_MARK, HPD53), PINMUX_DATA(HPD52_MARK, HPD52), PINMUX_DATA(HPD51_MARK, HPD51), PINMUX_DATA(HPD50_MARK, HPD50), PINMUX_DATA(HPD49_MARK, HPD49), PINMUX_DATA(HPD48_MARK, HPD48), PINMUX_DATA(HPDQM7_MARK, HPDQM7), PINMUX_DATA(HPDQM6_MARK, HPDQM6), PINMUX_DATA(HPDQM5_MARK, HPDQM5), PINMUX_DATA(HPDQM4_MARK, HPDQM4), /* IRQ */ PINMUX_DATA(IRQ0_MARK, HIZC8_IRQ0, IRQ0), PINMUX_DATA(IRQ1_MARK, HIZC9_IRQ1, IRQ1), PINMUX_DATA(IRQ2_MARK, PSA4_IRQ2, HIZC10_IRQ2, IRQ2_SDHID2), PINMUX_DATA(IRQ3_MARK, PSE15_SIOF0_MCK_IRQ3, PSB8_IRQ3, HIZC11_IRQ3, PTQ0), PINMUX_DATA(IRQ4_MARK, PSA9_IRQ4, HIZC12_IRQ4, IRQ4_BS), PINMUX_DATA(IRQ5_MARK, HIZC13_IRQ5, IRQ5), PINMUX_DATA(IRQ6_MARK, PSA15_IRQ6, HIZC14_IRQ6, KEYIN0_IRQ6), PINMUX_DATA(IRQ7_MARK, PSA14_IRQ7, HIZC15_IRQ7, KEYIN4_IRQ7), /* SDHI */ PINMUX_DATA(SDHICD_MARK, SDHICD), PINMUX_DATA(SDHIWP_MARK, SDHIWP), PINMUX_DATA(SDHID3_MARK, SDHID3), PINMUX_DATA(SDHID2_MARK, PSA4_SDHID2, IRQ2_SDHID2), PINMUX_DATA(SDHID1_MARK, SDHID1), PINMUX_DATA(SDHID0_MARK, SDHID0), PINMUX_DATA(SDHICMD_MARK, SDHICMD), PINMUX_DATA(SDHICLK_MARK, SDHICLK), /* SIU - Port A */ PINMUX_DATA(SIUAOLR_MARK, PSC13_SIUAOLR, HIZB4_SIUA, SIUAOLR_SIOF1_SYNC), PINMUX_DATA(SIUAOBT_MARK, PSC14_SIUAOBT, HIZB4_SIUA, SIUAOBT_SIOF1_SCK), PINMUX_DATA(SIUAISLD_MARK, PSC15_SIUAISLD, HIZB4_SIUA, SIUAISLD_SIOF1_RXD), PINMUX_DATA(SIUAILR_MARK, PSC11_SIUAILR, HIZB4_SIUA, SIUAILR_SIOF1_SS2), PINMUX_DATA(SIUAIBT_MARK, PSC12_SIUAIBT, HIZB4_SIUA, SIUAIBT_SIOF1_SS1), PINMUX_DATA(SIUAOSLD_MARK, PSB0_SIUAOSLD, HIZB4_SIUA, SIUAOSLD_SIOF1_TXD), PINMUX_DATA(SIUMCKA_MARK, PSE11_SIUMCKA_SIOF1_MCK, HIZB4_SIUA, PSB1_SIUMCKA, PTK0), PINMUX_DATA(SIUFCKA_MARK, PSE11_SIUFCKA, HIZB4_SIUA, PTK0), /* SIU - Port B */ PINMUX_DATA(SIUBOLR_MARK, PSB11_SIUBOLR, SIOSTRB1_SIUBOLR), PINMUX_DATA(SIUBOBT_MARK, PSB10_SIUBOBT, SIOSCK_SIUBOBT), PINMUX_DATA(SIUBISLD_MARK, PSB14_SIUBISLD, SIORXD_SIUBISLD), PINMUX_DATA(SIUBILR_MARK, PSB13_SIUBILR, SIOD_SIUBILR), PINMUX_DATA(SIUBIBT_MARK, PSB12_SIUBIBT, SIOSTRB0_SIUBIBT), PINMUX_DATA(SIUBOSLD_MARK, PSB15_SIUBOSLD, SIOTXD_SIUBOSLD), PINMUX_DATA(SIUMCKB_MARK, PSD9_SIOMCK_SIUMCKB, PSB9_SIUMCKB, PTF6), PINMUX_DATA(SIUFCKB_MARK, PSD9_SIUFCKB, PTF6), /* AUD */ PINMUX_DATA(AUDSYNC_MARK, AUDSYNC), PINMUX_DATA(AUDATA3_MARK, AUDATA3), PINMUX_DATA(AUDATA2_MARK, AUDATA2), PINMUX_DATA(AUDATA1_MARK, AUDATA1), PINMUX_DATA(AUDATA0_MARK, AUDATA0), /* DMAC */ PINMUX_DATA(DACK_MARK, PSE12_DACK, LCDVSYN2_DACK), PINMUX_DATA(DREQ0_MARK, DREQ0), /* VOU */ PINMUX_DATA(DV_CLKI_MARK, PSD0_DV, LCDD19_DV_CLKI), PINMUX_DATA(DV_CLK_MARK, PSD0_DV, LCDD18_DV_CLK), PINMUX_DATA(DV_HSYNC_MARK, PSD0_DV, LCDD17_DV_HSYNC), PINMUX_DATA(DV_VSYNC_MARK, PSD0_DV, LCDD16_DV_VSYNC), PINMUX_DATA(DV_D15_MARK, PSD0_DV, LCDD15_DV_D15), PINMUX_DATA(DV_D14_MARK, PSD0_DV, LCDD14_DV_D14), PINMUX_DATA(DV_D13_MARK, PSD0_DV, LCDD13_DV_D13), PINMUX_DATA(DV_D12_MARK, PSD0_DV, LCDD12_DV_D12), PINMUX_DATA(DV_D11_MARK, PSD0_DV, LCDD11_DV_D11), PINMUX_DATA(DV_D10_MARK, PSD0_DV, LCDD10_DV_D10), PINMUX_DATA(DV_D9_MARK, PSD0_DV, LCDD9_DV_D9), PINMUX_DATA(DV_D8_MARK, PSD0_DV, LCDD8_DV_D8), PINMUX_DATA(DV_D7_MARK, PSD0_DV, LCDD7_DV_D7), PINMUX_DATA(DV_D6_MARK, PSD0_DV, LCDD6_DV_D6), PINMUX_DATA(DV_D5_MARK, PSD0_DV, LCDD5_DV_D5), PINMUX_DATA(DV_D4_MARK, PSD0_DV, LCDD4_DV_D4), PINMUX_DATA(DV_D3_MARK, PSD0_DV, LCDD3_DV_D3), PINMUX_DATA(DV_D2_MARK, PSD0_DV, LCDD2_DV_D2), PINMUX_DATA(DV_D1_MARK, PSD0_DV, LCDD1_DV_D1), PINMUX_DATA(DV_D0_MARK, PSD0_DV, LCDD0_DV_D0), /* CPG */ PINMUX_DATA(STATUS0_MARK, STATUS0), PINMUX_DATA(PDSTATUS_MARK, PDSTATUS), /* SIOF0 */ PINMUX_DATA(SIOF0_MCK_MARK, PSE15_SIOF0_MCK_IRQ3, PSB8_SIOF0_MCK, PTQ0), PINMUX_DATA(SIOF0_SCK_MARK, PSB5_SIOF0_SCK, SIOF0_SCK_TS_SCK), PINMUX_DATA(SIOF0_SYNC_MARK, PSB4_SIOF0_SYNC, SIOF0_SYNC_TS_SDEN), PINMUX_DATA(SIOF0_SS1_MARK, PSB3_SIOF0_SS1, SIOF0_SS1_TS_SPSYNC), PINMUX_DATA(SIOF0_SS2_MARK, PSB2_SIOF0_SS2, SIOF0_SS2_SIM_RST), PINMUX_DATA(SIOF0_TXD_MARK, PSE14_SIOF0_TXD_IRDA_OUT, PSB7_SIOF0_TXD, PTQ1), PINMUX_DATA(SIOF0_RXD_MARK, PSE13_SIOF0_RXD_IRDA_IN, PSB6_SIOF0_RXD, PTQ2), /* SIOF1 */ PINMUX_DATA(SIOF1_MCK_MARK, PSE11_SIUMCKA_SIOF1_MCK, PSB1_SIOF1_MCK, PTK0), PINMUX_DATA(SIOF1_SCK_MARK, PSC14_SIOF1_SCK, SIUAOBT_SIOF1_SCK), PINMUX_DATA(SIOF1_SYNC_MARK, PSC13_SIOF1_SYNC, SIUAOLR_SIOF1_SYNC), PINMUX_DATA(SIOF1_SS1_MARK, PSC12_SIOF1_SS1, SIUAIBT_SIOF1_SS1), PINMUX_DATA(SIOF1_SS2_MARK, PSC11_SIOF1_SS2, SIUAILR_SIOF1_SS2), PINMUX_DATA(SIOF1_TXD_MARK, PSB0_SIOF1_TXD, SIUAOSLD_SIOF1_TXD), PINMUX_DATA(SIOF1_RXD_MARK, PSC15_SIOF1_RXD, SIUAISLD_SIOF1_RXD), /* SIM */ PINMUX_DATA(SIM_D_MARK, PSE15_SIM_D, PTQ0), PINMUX_DATA(SIM_CLK_MARK, PSE14_SIM_CLK, PTQ1), PINMUX_DATA(SIM_RST_MARK, PSB2_SIM_RST, SIOF0_SS2_SIM_RST), /* TSIF */ PINMUX_DATA(TS_SDAT_MARK, PSE13_TS_SDAT, PTQ2), PINMUX_DATA(TS_SCK_MARK, PSB5_TS_SCK, SIOF0_SCK_TS_SCK), PINMUX_DATA(TS_SDEN_MARK, PSB4_TS_SDEN, SIOF0_SYNC_TS_SDEN), PINMUX_DATA(TS_SPSYNC_MARK, PSB3_TS_SPSYNC, SIOF0_SS1_TS_SPSYNC), /* IRDA */ PINMUX_DATA(IRDA_IN_MARK, PSE13_SIOF0_RXD_IRDA_IN, PSB6_IRDA_IN, PTQ2), PINMUX_DATA(IRDA_OUT_MARK, PSE14_SIOF0_TXD_IRDA_OUT, PSB7_IRDA_OUT, PTQ1), /* TPU */ PINMUX_DATA(TPUTO_MARK, PSD8_TPUTO, SCIF0_SCK_TPUTO), /* FLCTL */ PINMUX_DATA(FCE_MARK, PSE3_FLCTL, FCE_VIO_HD2), PINMUX_DATA(NAF7_MARK, PSC0_NAF, HIZA10_NAF, NAF7_VIO_D15), PINMUX_DATA(NAF6_MARK, PSC0_NAF, HIZA10_NAF, NAF6_VIO_D14), PINMUX_DATA(NAF5_MARK, PSC0_NAF, HIZA10_NAF, NAF5_VIO_D13), PINMUX_DATA(NAF4_MARK, PSC0_NAF, HIZA10_NAF, NAF4_VIO_D12), PINMUX_DATA(NAF3_MARK, PSC0_NAF, HIZA10_NAF, NAF3_VIO_D11), PINMUX_DATA(NAF2_MARK, PSE2_NAF2, HIZB0_VIO, NAF2_VIO_D10), PINMUX_DATA(NAF1_MARK, PSE1_NAF1, HIZB0_VIO, NAF1_VIO_D9), PINMUX_DATA(NAF0_MARK, PSE0_NAF0, HIZB0_VIO, NAF0_VIO_D8), PINMUX_DATA(FCDE_MARK, FCDE), PINMUX_DATA(FOE_MARK, PSE3_FLCTL, HIZB0_VIO, FOE_VIO_VD2), PINMUX_DATA(FSC_MARK, FSC), PINMUX_DATA(FWE_MARK, FWE), PINMUX_DATA(FRB_MARK, PSE3_FLCTL, FRB_VIO_CLK2), /* KEYSC */ PINMUX_DATA(KEYIN0_MARK, PSA15_KEYIN0, HIZC14_IRQ6, KEYIN0_IRQ6), PINMUX_DATA(KEYIN1_MARK, HIZA14_KEYSC, KEYIN1), PINMUX_DATA(KEYIN2_MARK, HIZA14_KEYSC, KEYIN2), PINMUX_DATA(KEYIN3_MARK, HIZA14_KEYSC, KEYIN3), PINMUX_DATA(KEYIN4_MARK, PSA14_KEYIN4, HIZC15_IRQ7, KEYIN4_IRQ7), PINMUX_DATA(KEYOUT0_MARK, HIZA14_KEYSC, KEYOUT0), PINMUX_DATA(KEYOUT1_MARK, HIZA14_KEYSC, KEYOUT1), PINMUX_DATA(KEYOUT2_MARK, HIZA14_KEYSC, KEYOUT2), PINMUX_DATA(KEYOUT3_MARK, HIZA14_KEYSC, KEYOUT3), PINMUX_DATA(KEYOUT4_IN6_MARK, HIZA14_KEYSC, KEYOUT4_IN6), PINMUX_DATA(KEYOUT5_IN5_MARK, HIZA14_KEYSC, KEYOUT5_IN5), }; static struct pinmux_gpio pinmux_gpios[] = { /* PTA */ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA), PINMUX_GPIO(GPIO_PTA6, PTA6_DATA), PINMUX_GPIO(GPIO_PTA5, PTA5_DATA), PINMUX_GPIO(GPIO_PTA4, PTA4_DATA), PINMUX_GPIO(GPIO_PTA3, PTA3_DATA), PINMUX_GPIO(GPIO_PTA2, PTA2_DATA), PINMUX_GPIO(GPIO_PTA1, PTA1_DATA), PINMUX_GPIO(GPIO_PTA0, PTA0_DATA), /* PTB */ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA), PINMUX_GPIO(GPIO_PTB6, PTB6_DATA), PINMUX_GPIO(GPIO_PTB5, PTB5_DATA), PINMUX_GPIO(GPIO_PTB4, PTB4_DATA), PINMUX_GPIO(GPIO_PTB3, PTB3_DATA), PINMUX_GPIO(GPIO_PTB2, PTB2_DATA), PINMUX_GPIO(GPIO_PTB1, PTB1_DATA), PINMUX_GPIO(GPIO_PTB0, PTB0_DATA), /* PTC */ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA), PINMUX_GPIO(GPIO_PTC5, PTC5_DATA), PINMUX_GPIO(GPIO_PTC4, PTC4_DATA), PINMUX_GPIO(GPIO_PTC3, PTC3_DATA), PINMUX_GPIO(GPIO_PTC2, PTC2_DATA), PINMUX_GPIO(GPIO_PTC0, PTC0_DATA), /* PTD */ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA), PINMUX_GPIO(GPIO_PTD6, PTD6_DATA), PINMUX_GPIO(GPIO_PTD5, PTD5_DATA), PINMUX_GPIO(GPIO_PTD4, PTD4_DATA), PINMUX_GPIO(GPIO_PTD3, PTD3_DATA), PINMUX_GPIO(GPIO_PTD2, PTD2_DATA), PINMUX_GPIO(GPIO_PTD1, PTD1_DATA), PINMUX_GPIO(GPIO_PTD0, PTD0_DATA), /* PTE */ PINMUX_GPIO(GPIO_PTE7, PTE7_DATA), PINMUX_GPIO(GPIO_PTE6, PTE6_DATA), PINMUX_GPIO(GPIO_PTE5, PTE5_DATA), PINMUX_GPIO(GPIO_PTE4, PTE4_DATA), PINMUX_GPIO(GPIO_PTE1, PTE1_DATA), PINMUX_GPIO(GPIO_PTE0, PTE0_DATA), /* PTF */ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA), PINMUX_GPIO(GPIO_PTF5, PTF5_DATA), PINMUX_GPIO(GPIO_PTF4, PTF4_DATA), PINMUX_GPIO(GPIO_PTF3, PTF3_DATA), PINMUX_GPIO(GPIO_PTF2, PTF2_DATA), PINMUX_GPIO(GPIO_PTF1, PTF1_DATA), PINMUX_GPIO(GPIO_PTF0, PTF0_DATA), /* PTG */ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA), PINMUX_GPIO(GPIO_PTG3, PTG3_DATA), PINMUX_GPIO(GPIO_PTG2, PTG2_DATA), PINMUX_GPIO(GPIO_PTG1, PTG1_DATA), PINMUX_GPIO(GPIO_PTG0, PTG0_DATA), /* PTH */ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA), PINMUX_GPIO(GPIO_PTH6, PTH6_DATA), PINMUX_GPIO(GPIO_PTH5, PTH5_DATA), PINMUX_GPIO(GPIO_PTH4, PTH4_DATA), PINMUX_GPIO(GPIO_PTH3, PTH3_DATA), PINMUX_GPIO(GPIO_PTH2, PTH2_DATA), PINMUX_GPIO(GPIO_PTH1, PTH1_DATA), PINMUX_GPIO(GPIO_PTH0, PTH0_DATA), /* PTJ */ PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA), PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA), PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA), PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA), PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA), /* PTK */ PINMUX_GPIO(GPIO_PTK6, PTK6_DATA), PINMUX_GPIO(GPIO_PTK5, PTK5_DATA), PINMUX_GPIO(GPIO_PTK4, PTK4_DATA), PINMUX_GPIO(GPIO_PTK3, PTK3_DATA), PINMUX_GPIO(GPIO_PTK2, PTK2_DATA), PINMUX_GPIO(GPIO_PTK1, PTK1_DATA), PINMUX_GPIO(GPIO_PTK0, PTK0_DATA), /* PTL */ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA), PINMUX_GPIO(GPIO_PTL6, PTL6_DATA), PINMUX_GPIO(GPIO_PTL5, PTL5_DATA), PINMUX_GPIO(GPIO_PTL4, PTL4_DATA), PINMUX_GPIO(GPIO_PTL3, PTL3_DATA), PINMUX_GPIO(GPIO_PTL2, PTL2_DATA), PINMUX_GPIO(GPIO_PTL1, PTL1_DATA), PINMUX_GPIO(GPIO_PTL0, PTL0_DATA), /* PTM */ PINMUX_GPIO(GPIO_PTM7, PTM7_DATA), PINMUX_GPIO(GPIO_PTM6, PTM6_DATA), PINMUX_GPIO(GPIO_PTM5, PTM5_DATA), PINMUX_GPIO(GPIO_PTM4, PTM4_DATA), PINMUX_GPIO(GPIO_PTM3, PTM3_DATA), PINMUX_GPIO(GPIO_PTM2, PTM2_DATA), PINMUX_GPIO(GPIO_PTM1, PTM1_DATA), PINMUX_GPIO(GPIO_PTM0, PTM0_DATA), /* PTN */ PINMUX_GPIO(GPIO_PTN7, PTN7_DATA), PINMUX_GPIO(GPIO_PTN6, PTN6_DATA), PINMUX_GPIO(GPIO_PTN5, PTN5_DATA), PINMUX_GPIO(GPIO_PTN4, PTN4_DATA), PINMUX_GPIO(GPIO_PTN3, PTN3_DATA), PINMUX_GPIO(GPIO_PTN2, PTN2_DATA), PINMUX_GPIO(GPIO_PTN1, PTN1_DATA), PINMUX_GPIO(GPIO_PTN0, PTN0_DATA), /* PTQ */ PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA), PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA), PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA), PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA), PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA), PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA), PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA), /* PTR */ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA), PINMUX_GPIO(GPIO_PTR3, PTR3_DATA), PINMUX_GPIO(GPIO_PTR2, PTR2_DATA), PINMUX_GPIO(GPIO_PTR1, PTR1_DATA), PINMUX_GPIO(GPIO_PTR0, PTR0_DATA), /* PTS */ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA), PINMUX_GPIO(GPIO_PTS3, PTS3_DATA), PINMUX_GPIO(GPIO_PTS2, PTS2_DATA), PINMUX_GPIO(GPIO_PTS1, PTS1_DATA), PINMUX_GPIO(GPIO_PTS0, PTS0_DATA), /* PTT */ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA), PINMUX_GPIO(GPIO_PTT3, PTT3_DATA), PINMUX_GPIO(GPIO_PTT2, PTT2_DATA), PINMUX_GPIO(GPIO_PTT1, PTT1_DATA), PINMUX_GPIO(GPIO_PTT0, PTT0_DATA), /* PTU */ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA), PINMUX_GPIO(GPIO_PTU3, PTU3_DATA), PINMUX_GPIO(GPIO_PTU2, PTU2_DATA), PINMUX_GPIO(GPIO_PTU1, PTU1_DATA), PINMUX_GPIO(GPIO_PTU0, PTU0_DATA), /* PTV */ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA), PINMUX_GPIO(GPIO_PTV3, PTV3_DATA), PINMUX_GPIO(GPIO_PTV2, PTV2_DATA), PINMUX_GPIO(GPIO_PTV1, PTV1_DATA), PINMUX_GPIO(GPIO_PTV0, PTV0_DATA), /* PTW */ PINMUX_GPIO(GPIO_PTW6, PTW6_DATA), PINMUX_GPIO(GPIO_PTW5, PTW5_DATA), PINMUX_GPIO(GPIO_PTW4, PTW4_DATA), PINMUX_GPIO(GPIO_PTW3, PTW3_DATA), PINMUX_GPIO(GPIO_PTW2, PTW2_DATA), PINMUX_GPIO(GPIO_PTW1, PTW1_DATA), PINMUX_GPIO(GPIO_PTW0, PTW0_DATA), /* PTX */ PINMUX_GPIO(GPIO_PTX6, PTX6_DATA), PINMUX_GPIO(GPIO_PTX5, PTX5_DATA), PINMUX_GPIO(GPIO_PTX4, PTX4_DATA), PINMUX_GPIO(GPIO_PTX3, PTX3_DATA), PINMUX_GPIO(GPIO_PTX2, PTX2_DATA), PINMUX_GPIO(GPIO_PTX1, PTX1_DATA), PINMUX_GPIO(GPIO_PTX0, PTX0_DATA), /* PTY */ PINMUX_GPIO(GPIO_PTY5, PTY5_DATA), PINMUX_GPIO(GPIO_PTY4, PTY4_DATA), PINMUX_GPIO(GPIO_PTY3, PTY3_DATA), PINMUX_GPIO(GPIO_PTY2, PTY2_DATA), PINMUX_GPIO(GPIO_PTY1, PTY1_DATA), PINMUX_GPIO(GPIO_PTY0, PTY0_DATA), /* PTZ */ PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA), PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA), PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA), PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA), PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA), /* SCIF0 */ PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK), /* SCIF1 */ PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_RTS, SCIF1_RTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_CTS, SCIF1_CTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK), /* SCIF2 */ PINMUX_GPIO(GPIO_FN_SCIF2_TXD, SCIF2_TXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_RXD, SCIF2_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_RTS, SCIF2_RTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_CTS, SCIF2_CTS_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_SCK, SCIF2_SCK_MARK), /* SIO */ PINMUX_GPIO(GPIO_FN_SIOTXD, SIOTXD_MARK), PINMUX_GPIO(GPIO_FN_SIORXD, SIORXD_MARK), PINMUX_GPIO(GPIO_FN_SIOD, SIOD_MARK), PINMUX_GPIO(GPIO_FN_SIOSTRB0, SIOSTRB0_MARK), PINMUX_GPIO(GPIO_FN_SIOSTRB1, SIOSTRB1_MARK), PINMUX_GPIO(GPIO_FN_SIOSCK, SIOSCK_MARK), PINMUX_GPIO(GPIO_FN_SIOMCK, SIOMCK_MARK), /* CEU */ PINMUX_GPIO(GPIO_FN_VIO_D15, VIO_D15_MARK), PINMUX_GPIO(GPIO_FN_VIO_D14, VIO_D14_MARK), PINMUX_GPIO(GPIO_FN_VIO_D13, VIO_D13_MARK), PINMUX_GPIO(GPIO_FN_VIO_D12, VIO_D12_MARK), PINMUX_GPIO(GPIO_FN_VIO_D11, VIO_D11_MARK), PINMUX_GPIO(GPIO_FN_VIO_D10, VIO_D10_MARK), PINMUX_GPIO(GPIO_FN_VIO_D9, VIO_D9_MARK), PINMUX_GPIO(GPIO_FN_VIO_D8, VIO_D8_MARK), PINMUX_GPIO(GPIO_FN_VIO_D7, VIO_D7_MARK), PINMUX_GPIO(GPIO_FN_VIO_D6, VIO_D6_MARK), PINMUX_GPIO(GPIO_FN_VIO_D5, VIO_D5_MARK), PINMUX_GPIO(GPIO_FN_VIO_D4, VIO_D4_MARK), PINMUX_GPIO(GPIO_FN_VIO_D3, VIO_D3_MARK), PINMUX_GPIO(GPIO_FN_VIO_D2, VIO_D2_MARK), PINMUX_GPIO(GPIO_FN_VIO_D1, VIO_D1_MARK), PINMUX_GPIO(GPIO_FN_VIO_D0, VIO_D0_MARK), PINMUX_GPIO(GPIO_FN_VIO_CLK, VIO_CLK_MARK), PINMUX_GPIO(GPIO_FN_VIO_VD, VIO_VD_MARK), PINMUX_GPIO(GPIO_FN_VIO_HD, VIO_HD_MARK), PINMUX_GPIO(GPIO_FN_VIO_FLD, VIO_FLD_MARK), PINMUX_GPIO(GPIO_FN_VIO_CKO, VIO_CKO_MARK), PINMUX_GPIO(GPIO_FN_VIO_STEX, VIO_STEX_MARK), PINMUX_GPIO(GPIO_FN_VIO_STEM, VIO_STEM_MARK), PINMUX_GPIO(GPIO_FN_VIO_VD2, VIO_VD2_MARK), PINMUX_GPIO(GPIO_FN_VIO_HD2, VIO_HD2_MARK), PINMUX_GPIO(GPIO_FN_VIO_CLK2, VIO_CLK2_MARK), /* LCDC */ PINMUX_GPIO(GPIO_FN_LCDD23, LCDD23_MARK), PINMUX_GPIO(GPIO_FN_LCDD22, LCDD22_MARK), PINMUX_GPIO(GPIO_FN_LCDD21, LCDD21_MARK), PINMUX_GPIO(GPIO_FN_LCDD20, LCDD20_MARK), PINMUX_GPIO(GPIO_FN_LCDD19, LCDD19_MARK), PINMUX_GPIO(GPIO_FN_LCDD18, LCDD18_MARK), PINMUX_GPIO(GPIO_FN_LCDD17, LCDD17_MARK), PINMUX_GPIO(GPIO_FN_LCDD16, LCDD16_MARK), PINMUX_GPIO(GPIO_FN_LCDD15, LCDD15_MARK), PINMUX_GPIO(GPIO_FN_LCDD14, LCDD14_MARK), PINMUX_GPIO(GPIO_FN_LCDD13, LCDD13_MARK), PINMUX_GPIO(GPIO_FN_LCDD12, LCDD12_MARK), PINMUX_GPIO(GPIO_FN_LCDD11, LCDD11_MARK), PINMUX_GPIO(GPIO_FN_LCDD10, LCDD10_MARK), PINMUX_GPIO(GPIO_FN_LCDD9, LCDD9_MARK), PINMUX_GPIO(GPIO_FN_LCDD8, LCDD8_MARK), PINMUX_GPIO(GPIO_FN_LCDD7, LCDD7_MARK), PINMUX_GPIO(GPIO_FN_LCDD6, LCDD6_MARK), PINMUX_GPIO(GPIO_FN_LCDD5, LCDD5_MARK), PINMUX_GPIO(GPIO_FN_LCDD4, LCDD4_MARK), PINMUX_GPIO(GPIO_FN_LCDD3, LCDD3_MARK), PINMUX_GPIO(GPIO_FN_LCDD2, LCDD2_MARK), PINMUX_GPIO(GPIO_FN_LCDD1, LCDD1_MARK), PINMUX_GPIO(GPIO_FN_LCDD0, LCDD0_MARK), PINMUX_GPIO(GPIO_FN_LCDLCLK, LCDLCLK_MARK), /* Main LCD */ PINMUX_GPIO(GPIO_FN_LCDDON, LCDDON_MARK), PINMUX_GPIO(GPIO_FN_LCDVCPWC, LCDVCPWC_MARK), PINMUX_GPIO(GPIO_FN_LCDVEPWC, LCDVEPWC_MARK), PINMUX_GPIO(GPIO_FN_LCDVSYN, LCDVSYN_MARK), /* Main LCD - RGB Mode */ PINMUX_GPIO(GPIO_FN_LCDDCK, LCDDCK_MARK), PINMUX_GPIO(GPIO_FN_LCDHSYN, LCDHSYN_MARK), PINMUX_GPIO(GPIO_FN_LCDDISP, LCDDISP_MARK), /* Main LCD - SYS Mode */ PINMUX_GPIO(GPIO_FN_LCDRS, LCDRS_MARK), PINMUX_GPIO(GPIO_FN_LCDCS, LCDCS_MARK), PINMUX_GPIO(GPIO_FN_LCDWR, LCDWR_MARK), PINMUX_GPIO(GPIO_FN_LCDRD, LCDRD_MARK), /* Sub LCD - SYS Mode */ PINMUX_GPIO(GPIO_FN_LCDDON2, LCDDON2_MARK), PINMUX_GPIO(GPIO_FN_LCDVCPWC2, LCDVCPWC2_MARK), PINMUX_GPIO(GPIO_FN_LCDVEPWC2, LCDVEPWC2_MARK), PINMUX_GPIO(GPIO_FN_LCDVSYN2, LCDVSYN2_MARK), PINMUX_GPIO(GPIO_FN_LCDCS2, LCDCS2_MARK), /* BSC */ PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK), PINMUX_GPIO(GPIO_FN_A25, A25_MARK), PINMUX_GPIO(GPIO_FN_A24, A24_MARK), PINMUX_GPIO(GPIO_FN_A23, A23_MARK), PINMUX_GPIO(GPIO_FN_A22, A22_MARK), PINMUX_GPIO(GPIO_FN_BS, BS_MARK), PINMUX_GPIO(GPIO_FN_CS6B_CE1B, CS6B_CE1B_MARK), PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK), PINMUX_GPIO(GPIO_FN_CS6A_CE2B, CS6A_CE2B_MARK), /* SBSC */ PINMUX_GPIO(GPIO_FN_HPD63, HPD63_MARK), PINMUX_GPIO(GPIO_FN_HPD62, HPD62_MARK), PINMUX_GPIO(GPIO_FN_HPD61, HPD61_MARK), PINMUX_GPIO(GPIO_FN_HPD60, HPD60_MARK), PINMUX_GPIO(GPIO_FN_HPD59, HPD59_MARK), PINMUX_GPIO(GPIO_FN_HPD58, HPD58_MARK), PINMUX_GPIO(GPIO_FN_HPD57, HPD57_MARK), PINMUX_GPIO(GPIO_FN_HPD56, HPD56_MARK), PINMUX_GPIO(GPIO_FN_HPD55, HPD55_MARK), PINMUX_GPIO(GPIO_FN_HPD54, HPD54_MARK), PINMUX_GPIO(GPIO_FN_HPD53, HPD53_MARK), PINMUX_GPIO(GPIO_FN_HPD52, HPD52_MARK), PINMUX_GPIO(GPIO_FN_HPD51, HPD51_MARK), PINMUX_GPIO(GPIO_FN_HPD50, HPD50_MARK), PINMUX_GPIO(GPIO_FN_HPD49, HPD49_MARK), PINMUX_GPIO(GPIO_FN_HPD48, HPD48_MARK), PINMUX_GPIO(GPIO_FN_HPDQM7, HPDQM7_MARK), PINMUX_GPIO(GPIO_FN_HPDQM6, HPDQM6_MARK), PINMUX_GPIO(GPIO_FN_HPDQM5, HPDQM5_MARK), PINMUX_GPIO(GPIO_FN_HPDQM4, HPDQM4_MARK), /* IRQ */ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK), PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK), PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK), PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK), PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK), PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK), PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK), PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK), /* SDHI */ PINMUX_GPIO(GPIO_FN_SDHICD, SDHICD_MARK), PINMUX_GPIO(GPIO_FN_SDHIWP, SDHIWP_MARK), PINMUX_GPIO(GPIO_FN_SDHID3, SDHID3_MARK), PINMUX_GPIO(GPIO_FN_SDHID2, SDHID2_MARK), PINMUX_GPIO(GPIO_FN_SDHID1, SDHID1_MARK), PINMUX_GPIO(GPIO_FN_SDHID0, SDHID0_MARK), PINMUX_GPIO(GPIO_FN_SDHICMD, SDHICMD_MARK), PINMUX_GPIO(GPIO_FN_SDHICLK, SDHICLK_MARK), /* SIU - Port A */ PINMUX_GPIO(GPIO_FN_SIUAOLR, SIUAOLR_MARK), PINMUX_GPIO(GPIO_FN_SIUAOBT, SIUAOBT_MARK), PINMUX_GPIO(GPIO_FN_SIUAISLD, SIUAISLD_MARK), PINMUX_GPIO(GPIO_FN_SIUAILR, SIUAILR_MARK), PINMUX_GPIO(GPIO_FN_SIUAIBT, SIUAIBT_MARK), PINMUX_GPIO(GPIO_FN_SIUAOSLD, SIUAOSLD_MARK), PINMUX_GPIO(GPIO_FN_SIUMCKA, SIUMCKA_MARK), PINMUX_GPIO(GPIO_FN_SIUFCKA, SIUFCKA_MARK), /* SIU - Port B */ PINMUX_GPIO(GPIO_FN_SIUBOLR, SIUBOLR_MARK), PINMUX_GPIO(GPIO_FN_SIUBOBT, SIUBOBT_MARK), PINMUX_GPIO(GPIO_FN_SIUBISLD, SIUBISLD_MARK), PINMUX_GPIO(GPIO_FN_SIUBILR, SIUBILR_MARK), PINMUX_GPIO(GPIO_FN_SIUBIBT, SIUBIBT_MARK), PINMUX_GPIO(GPIO_FN_SIUBOSLD, SIUBOSLD_MARK), PINMUX_GPIO(GPIO_FN_SIUMCKB, SIUMCKB_MARK), PINMUX_GPIO(GPIO_FN_SIUFCKB, SIUFCKB_MARK), /* AUD */ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK), PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK), PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK), PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK), PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK), /* DMAC */ PINMUX_GPIO(GPIO_FN_DACK, DACK_MARK), PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK), /* VOU */ PINMUX_GPIO(GPIO_FN_DV_CLKI, DV_CLKI_MARK), PINMUX_GPIO(GPIO_FN_DV_CLK, DV_CLK_MARK), PINMUX_GPIO(GPIO_FN_DV_HSYNC, DV_HSYNC_MARK), PINMUX_GPIO(GPIO_FN_DV_VSYNC, DV_VSYNC_MARK), PINMUX_GPIO(GPIO_FN_DV_D15, DV_D15_MARK), PINMUX_GPIO(GPIO_FN_DV_D14, DV_D14_MARK), PINMUX_GPIO(GPIO_FN_DV_D13, DV_D13_MARK), PINMUX_GPIO(GPIO_FN_DV_D12, DV_D12_MARK), PINMUX_GPIO(GPIO_FN_DV_D11, DV_D11_MARK), PINMUX_GPIO(GPIO_FN_DV_D10, DV_D10_MARK), PINMUX_GPIO(GPIO_FN_DV_D9, DV_D9_MARK), PINMUX_GPIO(GPIO_FN_DV_D8, DV_D8_MARK), PINMUX_GPIO(GPIO_FN_DV_D7, DV_D7_MARK), PINMUX_GPIO(GPIO_FN_DV_D6, DV_D6_MARK), PINMUX_GPIO(GPIO_FN_DV_D5, DV_D5_MARK), PINMUX_GPIO(GPIO_FN_DV_D4, DV_D4_MARK), PINMUX_GPIO(GPIO_FN_DV_D3, DV_D3_MARK), PINMUX_GPIO(GPIO_FN_DV_D2, DV_D2_MARK), PINMUX_GPIO(GPIO_FN_DV_D1, DV_D1_MARK), PINMUX_GPIO(GPIO_FN_DV_D0, DV_D0_MARK), /* CPG */ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK), PINMUX_GPIO(GPIO_FN_PDSTATUS, PDSTATUS_MARK), /* SIOF0 */ PINMUX_GPIO(GPIO_FN_SIOF0_MCK, SIOF0_MCK_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_SCK, SIOF0_SCK_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_SYNC, SIOF0_SYNC_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_SS1, SIOF0_SS1_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_SS2, SIOF0_SS2_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_TXD, SIOF0_TXD_MARK), PINMUX_GPIO(GPIO_FN_SIOF0_RXD, SIOF0_RXD_MARK), /* SIOF1 */ PINMUX_GPIO(GPIO_FN_SIOF1_MCK, SIOF1_MCK_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_SCK, SIOF1_SCK_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_SYNC, SIOF1_SYNC_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_SS1, SIOF1_SS1_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_SS2, SIOF1_SS2_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_TXD, SIOF1_TXD_MARK), PINMUX_GPIO(GPIO_FN_SIOF1_RXD, SIOF1_RXD_MARK), /* SIM */ PINMUX_GPIO(GPIO_FN_SIM_D, SIM_D_MARK), PINMUX_GPIO(GPIO_FN_SIM_CLK, SIM_CLK_MARK), PINMUX_GPIO(GPIO_FN_SIM_RST, SIM_RST_MARK), /* TSIF */ PINMUX_GPIO(GPIO_FN_TS_SDAT, TS_SDAT_MARK), PINMUX_GPIO(GPIO_FN_TS_SCK, TS_SCK_MARK), PINMUX_GPIO(GPIO_FN_TS_SDEN, TS_SDEN_MARK), PINMUX_GPIO(GPIO_FN_TS_SPSYNC, TS_SPSYNC_MARK), /* IRDA */ PINMUX_GPIO(GPIO_FN_IRDA_IN, IRDA_IN_MARK), PINMUX_GPIO(GPIO_FN_IRDA_OUT, IRDA_OUT_MARK), /* TPU */ PINMUX_GPIO(GPIO_FN_TPUTO, TPUTO_MARK), /* FLCTL */ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK), PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK), PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK), PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK), PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK), PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK), PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK), PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK), PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK), PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK), PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK), PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK), PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK), PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK), /* KEYSC */ PINMUX_GPIO(GPIO_FN_KEYIN0, KEYIN0_MARK), PINMUX_GPIO(GPIO_FN_KEYIN1, KEYIN1_MARK), PINMUX_GPIO(GPIO_FN_KEYIN2, KEYIN2_MARK), PINMUX_GPIO(GPIO_FN_KEYIN3, KEYIN3_MARK), PINMUX_GPIO(GPIO_FN_KEYIN4, KEYIN4_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT0, KEYOUT0_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT1, KEYOUT1_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT2, KEYOUT2_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT3, KEYOUT3_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT4_IN6, KEYOUT4_IN6_MARK), PINMUX_GPIO(GPIO_FN_KEYOUT5_IN5, KEYOUT5_IN5_MARK), }; static struct pinmux_cfg_reg pinmux_config_regs[] = { { PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) { VIO_D7_SCIF1_SCK, PTA7_OUT, PTA7_IN_PD, PTA7_IN, VIO_D6_SCIF1_RXD, 0, PTA6_IN_PD, PTA6_IN, VIO_D5_SCIF1_TXD, PTA5_OUT, PTA5_IN_PD, PTA5_IN, VIO_D4, 0, PTA4_IN_PD, PTA4_IN, VIO_D3, 0, PTA3_IN_PD, PTA3_IN, VIO_D2, 0, PTA2_IN_PD, PTA2_IN, VIO_D1, 0, PTA1_IN_PD, PTA1_IN, VIO_D0_LCDLCLK, 0, PTA0_IN_PD, PTA0_IN } }, { PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) { HPD55, PTB7_OUT, 0, PTB7_IN, HPD54, PTB6_OUT, 0, PTB6_IN, HPD53, PTB5_OUT, 0, PTB5_IN, HPD52, PTB4_OUT, 0, PTB4_IN, HPD51, PTB3_OUT, 0, PTB3_IN, HPD50, PTB2_OUT, 0, PTB2_IN, HPD49, PTB1_OUT, 0, PTB1_IN, HPD48, PTB0_OUT, 0, PTB0_IN } }, { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) { 0, 0, PTC7_IN_PU, PTC7_IN, 0, 0, 0, 0, IOIS16, 0, PTC5_IN_PU, PTC5_IN, HPDQM7, PTC4_OUT, 0, PTC4_IN, HPDQM6, PTC3_OUT, 0, PTC3_IN, HPDQM5, PTC2_OUT, 0, PTC2_IN, 0, 0, 0, 0, HPDQM4, PTC0_OUT, 0, PTC0_IN } }, { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) { SDHICD, 0, PTD7_IN_PU, PTD7_IN, SDHIWP, PTD6_OUT, PTD6_IN_PU, PTD6_IN, SDHID3, PTD5_OUT, PTD5_IN_PU, PTD5_IN, IRQ2_SDHID2, PTD4_OUT, PTD4_IN_PU, PTD4_IN, SDHID1, PTD3_OUT, PTD3_IN_PU, PTD3_IN, SDHID0, PTD2_OUT, PTD2_IN_PU, PTD2_IN, SDHICMD, PTD1_OUT, PTD1_IN_PU, PTD1_IN, SDHICLK, PTD0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) { A25, PTE7_OUT, PTE7_IN_PD, PTE7_IN, A24, PTE6_OUT, PTE6_IN_PD, PTE6_IN, A23, PTE5_OUT, PTE5_IN_PD, PTE5_IN, A22, PTE4_OUT, PTE4_IN_PD, PTE4_IN, 0, 0, 0, 0, 0, 0, 0, 0, IRQ5, PTE1_OUT, PTE1_IN_PD, PTE1_IN, IRQ4_BS, PTE0_OUT, PTE0_IN_PD, PTE0_IN } }, { PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) { 0, 0, 0, 0, PTF6, PTF6_OUT, PTF6_IN_PD, PTF6_IN, SIOSCK_SIUBOBT, PTF5_OUT, PTF5_IN_PD, PTF5_IN, SIOSTRB1_SIUBOLR, PTF4_OUT, PTF4_IN_PD, PTF4_IN, SIOSTRB0_SIUBIBT, PTF3_OUT, PTF3_IN_PD, PTF3_IN, SIOD_SIUBILR, PTF2_OUT, PTF2_IN_PD, PTF2_IN, SIORXD_SIUBISLD, 0, PTF1_IN_PD, PTF1_IN, SIOTXD_SIUBOSLD, PTF0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, AUDSYNC, PTG4_OUT, 0, 0, AUDATA3, PTG3_OUT, 0, 0, AUDATA2, PTG2_OUT, 0, 0, AUDATA1, PTG1_OUT, 0, 0, AUDATA0, PTG0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) { LCDVCPWC_LCDVCPWC2, PTH7_OUT, 0, 0, LCDVSYN2_DACK, PTH6_OUT, PTH6_IN_PD, PTH6_IN, LCDVSYN, PTH5_OUT, PTH5_IN_PD, PTH5_IN, LCDDISP_LCDRS, PTH4_OUT, 0, 0, LCDHSYN_LCDCS, PTH3_OUT, 0, 0, LCDDON_LCDDON2, PTH2_OUT, 0, 0, LCDD17_DV_HSYNC, PTH1_OUT, PTH1_IN_PD, PTH1_IN, LCDD16_DV_VSYNC, PTH0_OUT, PTH0_IN_PD, PTH0_IN } }, { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) { STATUS0, PTJ7_OUT, 0, 0, 0, PTJ6_OUT, 0, 0, PDSTATUS, PTJ5_OUT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, IRQ1, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN, IRQ0, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN } }, { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) { 0, 0, 0, 0, SIUAILR_SIOF1_SS2, PTK6_OUT, PTK6_IN_PD, PTK6_IN, SIUAIBT_SIOF1_SS1, PTK5_OUT, PTK5_IN_PD, PTK5_IN, SIUAOLR_SIOF1_SYNC, PTK4_OUT, PTK4_IN_PD, PTK4_IN, SIUAOBT_SIOF1_SCK, PTK3_OUT, PTK3_IN_PD, PTK3_IN, SIUAISLD_SIOF1_RXD, 0, PTK2_IN_PD, PTK2_IN, SIUAOSLD_SIOF1_TXD, PTK1_OUT, 0, 0, PTK0, PTK0_OUT, PTK0_IN_PD, PTK0_IN } }, { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) { LCDD15_DV_D15, PTL7_OUT, PTL7_IN_PD, PTL7_IN, LCDD14_DV_D14, PTL6_OUT, PTL6_IN_PD, PTL6_IN, LCDD13_DV_D13, PTL5_OUT, PTL5_IN_PD, PTL5_IN, LCDD12_DV_D12, PTL4_OUT, PTL4_IN_PD, PTL4_IN, LCDD11_DV_D11, PTL3_OUT, PTL3_IN_PD, PTL3_IN, LCDD10_DV_D10, PTL2_OUT, PTL2_IN_PD, PTL2_IN, LCDD9_DV_D9, PTL1_OUT, PTL1_IN_PD, PTL1_IN, LCDD8_DV_D8, PTL0_OUT, PTL0_IN_PD, PTL0_IN } }, { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) { LCDD7_DV_D7, PTM7_OUT, PTM7_IN_PD, PTM7_IN, LCDD6_DV_D6, PTM6_OUT, PTM6_IN_PD, PTM6_IN, LCDD5_DV_D5, PTM5_OUT, PTM5_IN_PD, PTM5_IN, LCDD4_DV_D4, PTM4_OUT, PTM4_IN_PD, PTM4_IN, LCDD3_DV_D3, PTM3_OUT, PTM3_IN_PD, PTM3_IN, LCDD2_DV_D2, PTM2_OUT, PTM2_IN_PD, PTM2_IN, LCDD1_DV_D1, PTM1_OUT, PTM1_IN_PD, PTM1_IN, LCDD0_DV_D0, PTM0_OUT, PTM0_IN_PD, PTM0_IN } }, { PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) { HPD63, PTN7_OUT, 0, PTN7_IN, HPD62, PTN6_OUT, 0, PTN6_IN, HPD61, PTN5_OUT, 0, PTN5_IN, HPD60, PTN4_OUT, 0, PTN4_IN, HPD59, PTN3_OUT, 0, PTN3_IN, HPD58, PTN2_OUT, 0, PTN2_IN, HPD57, PTN1_OUT, 0, PTN1_IN, HPD56, PTN0_OUT, 0, PTN0_IN } }, { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) { 0, 0, 0, 0, SIOF0_SS2_SIM_RST, PTQ6_OUT, 0, 0, SIOF0_SS1_TS_SPSYNC, PTQ5_OUT, PTQ5_IN_PD, PTQ5_IN, SIOF0_SYNC_TS_SDEN, PTQ4_OUT, PTQ4_IN_PD, PTQ4_IN, SIOF0_SCK_TS_SCK, PTQ3_OUT, PTQ3_IN_PD, PTQ3_IN, PTQ2, 0, PTQ2_IN_PD, PTQ2_IN, PTQ1, PTQ1_OUT, 0, 0, PTQ0, PTQ0_OUT, PTQ0_IN_PU, PTQ0_IN } }, { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, LCDRD, PTR4_OUT, 0, 0, CS6B_CE1B_LCDCS2, PTR3_OUT, 0, 0, WAIT, 0, PTR2_IN_PU, PTR2_IN, LCDDCK_LCDWR, PTR1_OUT, 0, 0, LCDVEPWC_LCDVEPWC2, PTR0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, SCIF0_CTS_SIUAISPD, 0, PTS4_IN_PD, PTS4_IN, SCIF0_RTS_SIUAOSPD, PTS3_OUT, 0, 0, SCIF0_SCK_TPUTO, PTS2_OUT, PTS2_IN_PD, PTS2_IN, SCIF0_RXD, 0, PTS1_IN_PD, PTS1_IN, SCIF0_TXD, PTS0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, FOE_VIO_VD2, PTT4_OUT, PTT4_IN_PD, PTT4_IN, FWE, PTT3_OUT, PTT3_IN_PD, PTT3_IN, FSC, PTT2_OUT, PTT2_IN_PD, PTT2_IN, DREQ0, 0, PTT1_IN_PD, PTT1_IN, FCDE, PTT0_OUT, 0, 0 } }, { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NAF2_VIO_D10, PTU4_OUT, PTU4_IN_PD, PTU4_IN, NAF1_VIO_D9, PTU3_OUT, PTU3_IN_PD, PTU3_IN, NAF0_VIO_D8, PTU2_OUT, PTU2_IN_PD, PTU2_IN, FRB_VIO_CLK2, 0, PTU1_IN_PD, PTU1_IN, FCE_VIO_HD2, PTU0_OUT, PTU0_IN_PD, PTU0_IN } }, { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NAF7_VIO_D15, PTV4_OUT, PTV4_IN_PD, PTV4_IN, NAF6_VIO_D14, PTV3_OUT, PTV3_IN_PD, PTV3_IN, NAF5_VIO_D13, PTV2_OUT, PTV2_IN_PD, PTV2_IN, NAF4_VIO_D12, PTV1_OUT, PTV1_IN_PD, PTV1_IN, NAF3_VIO_D11, PTV0_OUT, PTV0_IN_PD, PTV0_IN } }, { PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) { 0, 0, 0, 0, VIO_FLD_SCIF2_CTS, 0, PTW6_IN_PD, PTW6_IN, VIO_CKO_SCIF2_RTS, PTW5_OUT, 0, 0, VIO_STEX_SCIF2_SCK, PTW4_OUT, PTW4_IN_PD, PTW4_IN, VIO_STEM_SCIF2_TXD, PTW3_OUT, PTW3_IN_PD, PTW3_IN, VIO_HD_SCIF2_RXD, PTW2_OUT, PTW2_IN_PD, PTW2_IN, VIO_VD_SCIF1_CTS, PTW1_OUT, PTW1_IN_PD, PTW1_IN, VIO_CLK_SCIF1_RTS, PTW0_OUT, PTW0_IN_PD, PTW0_IN } }, { PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) { 0, 0, 0, 0, CS6A_CE2B, PTX6_OUT, PTX6_IN_PU, PTX6_IN, LCDD23, PTX5_OUT, PTX5_IN_PD, PTX5_IN, LCDD22, PTX4_OUT, PTX4_IN_PD, PTX4_IN, LCDD21, PTX3_OUT, PTX3_IN_PD, PTX3_IN, LCDD20, PTX2_OUT, PTX2_IN_PD, PTX2_IN, LCDD19_DV_CLKI, PTX1_OUT, PTX1_IN_PD, PTX1_IN, LCDD18_DV_CLK, PTX0_OUT, PTX0_IN_PD, PTX0_IN } }, { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, KEYOUT5_IN5, PTY5_OUT, PTY5_IN_PU, PTY5_IN, KEYOUT4_IN6, PTY4_OUT, PTY4_IN_PU, PTY4_IN, KEYOUT3, PTY3_OUT, PTY3_IN_PU, PTY3_IN, KEYOUT2, PTY2_OUT, PTY2_IN_PU, PTY2_IN, KEYOUT1, PTY1_OUT, 0, 0, KEYOUT0, PTY0_OUT, PTY0_IN_PU, PTY0_IN } }, { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, KEYIN4_IRQ7, 0, PTZ5_IN_PU, PTZ5_IN, KEYIN3, 0, PTZ4_IN_PU, PTZ4_IN, KEYIN2, 0, PTZ3_IN_PU, PTZ3_IN, KEYIN1, 0, PTZ2_IN_PU, PTZ2_IN, KEYIN0_IRQ6, 0, PTZ1_IN_PU, PTZ1_IN, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) { PSA15_KEYIN0, PSA15_IRQ6, PSA14_KEYIN4, PSA14_IRQ7, 0, 0, 0, 0, 0, 0, 0, 0, PSA9_IRQ4, PSA9_BS, 0, 0, 0, 0, 0, 0, 0, 0, PSA4_IRQ2, PSA4_SDHID2, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1) { PSB15_SIOTXD, PSB15_SIUBOSLD, PSB14_SIORXD, PSB14_SIUBISLD, PSB13_SIOD, PSB13_SIUBILR, PSB12_SIOSTRB0, PSB12_SIUBIBT, PSB11_SIOSTRB1, PSB11_SIUBOLR, PSB10_SIOSCK, PSB10_SIUBOBT, PSB9_SIOMCK, PSB9_SIUMCKB, PSB8_SIOF0_MCK, PSB8_IRQ3, PSB7_SIOF0_TXD, PSB7_IRDA_OUT, PSB6_SIOF0_RXD, PSB6_IRDA_IN, PSB5_SIOF0_SCK, PSB5_TS_SCK, PSB4_SIOF0_SYNC, PSB4_TS_SDEN, PSB3_SIOF0_SS1, PSB3_TS_SPSYNC, PSB2_SIOF0_SS2, PSB2_SIM_RST, PSB1_SIUMCKA, PSB1_SIOF1_MCK, PSB0_SIUAOSLD, PSB0_SIOF1_TXD } }, { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1) { PSC15_SIUAISLD, PSC15_SIOF1_RXD, PSC14_SIUAOBT, PSC14_SIOF1_SCK, PSC13_SIUAOLR, PSC13_SIOF1_SYNC, PSC12_SIUAIBT, PSC12_SIOF1_SS1, PSC11_SIUAILR, PSC11_SIOF1_SS2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PSC0_NAF, PSC0_VIO } }, { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1) { 0, 0, 0, 0, PSD13_VIO, PSD13_SCIF2, PSD12_VIO, PSD12_SCIF1, PSD11_VIO, PSD11_SCIF1, PSD10_VIO_D0, PSD10_LCDLCLK, PSD9_SIOMCK_SIUMCKB, PSD9_SIUFCKB, PSD8_SCIF0_SCK, PSD8_TPUTO, PSD7_SCIF0_RTS, PSD7_SIUAOSPD, PSD6_SCIF0_CTS, PSD6_SIUAISPD, PSD5_CS6B_CE1B, PSD5_LCDCS2, 0, 0, PSD3_LCDVEPWC_LCDVCPWC, PSD3_LCDVEPWC2_LCDVCPWC2, PSD2_LCDDON, PSD2_LCDDON2, 0, 0, PSD0_LCDD19_LCDD0, PSD0_DV } }, { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1) { PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D, PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK, PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT, PSE12_LCDVSYN2, PSE12_DACK, PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PSE3_FLCTL, PSE3_VIO, PSE2_NAF2, PSE2_VIO_D10, PSE1_NAF1, PSE1_VIO_D9, PSE0_NAF0, PSE0_VIO_D8 } }, { PINMUX_CFG_REG("HIZCRA", 0xa4050158, 16, 1) { 0, 0, HIZA14_KEYSC, HIZA14_HIZ, 0, 0, 0, 0, 0, 0, HIZA10_NAF, HIZA10_HIZ, HIZA9_VIO, HIZA9_HIZ, HIZA8_LCDC, HIZA8_HIZ, HIZA7_LCDC, HIZA7_HIZ, HIZA6_LCDC, HIZA6_HIZ, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("HIZCRB", 0xa405015a, 16, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, HIZB4_SIUA, HIZB4_HIZ, 0, 0, 0, 0, HIZB1_VIO, HIZB1_HIZ, HIZB0_VIO, HIZB0_HIZ } }, { PINMUX_CFG_REG("HIZCRC", 0xa405015c, 16, 1) { HIZC15_IRQ7, HIZC15_HIZ, HIZC14_IRQ6, HIZC14_HIZ, HIZC13_IRQ5, HIZC13_HIZ, HIZC12_IRQ4, HIZC12_HIZ, HIZC11_IRQ3, HIZC11_HIZ, HIZC10_IRQ2, HIZC10_HIZ, HIZC9_IRQ1, HIZC9_HIZ, HIZC8_IRQ0, HIZC8_HIZ, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("MSELCRB", 0xa4050182, 16, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MSELB9_VIO, MSELB9_VIO2, MSELB8_RGB, MSELB8_SYS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, {} }; static struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PADR", 0xa4050120, 8) { PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA } }, { PINMUX_DATA_REG("PBDR", 0xa4050122, 8) { PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA } }, { PINMUX_DATA_REG("PCDR", 0xa4050124, 8) { PTC7_DATA, 0, PTC5_DATA, PTC4_DATA, PTC3_DATA, PTC2_DATA, 0, PTC0_DATA } }, { PINMUX_DATA_REG("PDDR", 0xa4050126, 8) { PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA } }, { PINMUX_DATA_REG("PEDR", 0xa4050128, 8) { PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, 0, 0, PTE1_DATA, PTE0_DATA } }, { PINMUX_DATA_REG("PFDR", 0xa405012a, 8) { 0, PTF6_DATA, PTF5_DATA, PTF4_DATA, PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA } }, { PINMUX_DATA_REG("PGDR", 0xa405012c, 8) { 0, 0, 0, PTG4_DATA, PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA } }, { PINMUX_DATA_REG("PHDR", 0xa405012e, 8) { PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA, PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA } }, { PINMUX_DATA_REG("PJDR", 0xa4050130, 8) { PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, 0, 0, 0, PTJ1_DATA, PTJ0_DATA } }, { PINMUX_DATA_REG("PKDR", 0xa4050132, 8) { 0, PTK6_DATA, PTK5_DATA, PTK4_DATA, PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA } }, { PINMUX_DATA_REG("PLDR", 0xa4050134, 8) { PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA } }, { PINMUX_DATA_REG("PMDR", 0xa4050136, 8) { PTM7_DATA, PTM6_DATA, PTM5_DATA, PTM4_DATA, PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA } }, { PINMUX_DATA_REG("PNDR", 0xa4050138, 8) { PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA, PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA } }, { PINMUX_DATA_REG("PQDR", 0xa405013a, 8) { 0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA, PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA } }, { PINMUX_DATA_REG("PRDR", 0xa405013c, 8) { 0, 0, 0, PTR4_DATA, PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA } }, { PINMUX_DATA_REG("PSDR", 0xa405013e, 8) { 0, 0, 0, PTS4_DATA, PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA } }, { PINMUX_DATA_REG("PTDR", 0xa4050160, 8) { 0, 0, 0, PTT4_DATA, PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA } }, { PINMUX_DATA_REG("PUDR", 0xa4050162, 8) { 0, 0, 0, PTU4_DATA, PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA } }, { PINMUX_DATA_REG("PVDR", 0xa4050164, 8) { 0, 0, 0, PTV4_DATA, PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA } }, { PINMUX_DATA_REG("PWDR", 0xa4050166, 8) { 0, PTW6_DATA, PTW5_DATA, PTW4_DATA, PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA } }, { PINMUX_DATA_REG("PXDR", 0xa4050168, 8) { 0, PTX6_DATA, PTX5_DATA, PTX4_DATA, PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA } }, { PINMUX_DATA_REG("PYDR", 0xa405016a, 8) { 0, PTY6_DATA, PTY5_DATA, PTY4_DATA, PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA } }, { PINMUX_DATA_REG("PZDR", 0xa405016c, 8) { 0, 0, PTZ5_DATA, PTZ4_DATA, PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA } }, { }, }; static struct pinmux_info sh7722_pinmux_info = { .name = "sh7722_pfc", .reserved_id = PINMUX_RESERVED, .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END }, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END }, .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .first_gpio = GPIO_PTA7, .last_gpio = GPIO_FN_KEYOUT5_IN5, .gpios = pinmux_gpios, .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), }; static int __init plat_pinmux_setup(void) { return register_pinmux(&sh7722_pinmux_info); } arch_initcall(plat_pinmux_setup);
gpl-2.0
eoghan2t9/primou-kernel-IRONMAN
drivers/ide/ide-timings.c
13852
6661
/* * Copyright (c) 1999-2001 Vojtech Pavlik * Copyright (c) 2007-2008 Bartlomiej Zolnierkiewicz * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/ide.h> #include <linux/module.h> /* * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). * These were taken from ATA/ATAPI-6 standard, rev 0a, except * for PIO 5, which is a nonstandard extension and UDMA6, which * is currently supported only by Maxtor drives. */ static struct ide_timing ide_timing[] = { { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 }, { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 }, { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 }, { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 }, { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 }, { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 }, { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 }, { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 }, { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 }, { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 }, { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 }, { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 }, { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, { 0xff } }; struct ide_timing *ide_timing_find_mode(u8 speed) { struct ide_timing *t; for (t = ide_timing; t->mode != speed; t++) if (t->mode == 0xff) return NULL; return t; } EXPORT_SYMBOL_GPL(ide_timing_find_mode); u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio) { u16 *id = drive->id; struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); u16 cycle = 0; if (id[ATA_ID_FIELD_VALID] & 2) { if (ata_id_has_iordy(drive->id)) cycle = id[ATA_ID_EIDE_PIO_IORDY]; else cycle = id[ATA_ID_EIDE_PIO]; /* conservative "downgrade" for all pre-ATA2 drives */ if (pio < 3 && cycle < t->cycle) cycle = 0; /* use standard timing */ /* Use the standard timing for the CF specific modes too */ if (pio > 4 && ata_id_is_cfa(id)) cycle = 0; } return cycle ? cycle : t->cycle; } EXPORT_SYMBOL_GPL(ide_pio_cycle_time); #define ENOUGH(v, unit) (((v) - 1) / (unit) + 1) #define EZ(v, unit) ((v) ? ENOUGH(v, unit) : 0) static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, int T, int UT) { q->setup = EZ(t->setup * 1000, T); q->act8b = EZ(t->act8b * 1000, T); q->rec8b = EZ(t->rec8b * 1000, T); q->cyc8b = EZ(t->cyc8b * 1000, T); q->active = EZ(t->active * 1000, T); q->recover = EZ(t->recover * 1000, T); q->cycle = EZ(t->cycle * 1000, T); q->udma = EZ(t->udma * 1000, UT); } void ide_timing_merge(struct ide_timing *a, struct ide_timing *b, struct ide_timing *m, unsigned int what) { if (what & IDE_TIMING_SETUP) m->setup = max(a->setup, b->setup); if (what & IDE_TIMING_ACT8B) m->act8b = max(a->act8b, b->act8b); if (what & IDE_TIMING_REC8B) m->rec8b = max(a->rec8b, b->rec8b); if (what & IDE_TIMING_CYC8B) m->cyc8b = max(a->cyc8b, b->cyc8b); if (what & IDE_TIMING_ACTIVE) m->active = max(a->active, b->active); if (what & IDE_TIMING_RECOVER) m->recover = max(a->recover, b->recover); if (what & IDE_TIMING_CYCLE) m->cycle = max(a->cycle, b->cycle); if (what & IDE_TIMING_UDMA) m->udma = max(a->udma, b->udma); } EXPORT_SYMBOL_GPL(ide_timing_merge); int ide_timing_compute(ide_drive_t *drive, u8 speed, struct ide_timing *t, int T, int UT) { u16 *id = drive->id; struct ide_timing *s, p; /* * Find the mode. */ s = ide_timing_find_mode(speed); if (s == NULL) return -EINVAL; /* * Copy the timing from the table. */ *t = *s; /* * If the drive is an EIDE drive, it can tell us it needs extended * PIO/MWDMA cycle timing. */ if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ memset(&p, 0, sizeof(p)); if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; else if ((speed <= XFER_PIO_4) || (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) p.cycle = id[ATA_ID_EIDE_DMA_MIN]; ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B); } /* * Convert the timing to bus clock counts. */ ide_timing_quantize(t, t, T, UT); /* * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, * S.M.A.R.T and some other commands. We have to ensure that the * DMA cycle timing is slower/equal than the current PIO timing. */ if (speed >= XFER_SW_DMA_0) { ide_timing_compute(drive, drive->pio_mode, &p, T, UT); ide_timing_merge(&p, t, t, IDE_TIMING_ALL); } /* * Lengthen active & recovery time so that cycle time is correct. */ if (t->act8b + t->rec8b < t->cyc8b) { t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; t->rec8b = t->cyc8b - t->act8b; } if (t->active + t->recover < t->cycle) { t->active += (t->cycle - (t->active + t->recover)) / 2; t->recover = t->cycle - t->active; } return 0; } EXPORT_SYMBOL_GPL(ide_timing_compute);
gpl-2.0
jiangliu/linux
drivers/ide/ide-timings.c
13852
6661
/* * Copyright (c) 1999-2001 Vojtech Pavlik * Copyright (c) 2007-2008 Bartlomiej Zolnierkiewicz * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/ide.h> #include <linux/module.h> /* * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). * These were taken from ATA/ATAPI-6 standard, rev 0a, except * for PIO 5, which is a nonstandard extension and UDMA6, which * is currently supported only by Maxtor drives. */ static struct ide_timing ide_timing[] = { { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 }, { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 }, { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 }, { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 }, { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 }, { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 }, { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 }, { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 }, { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 }, { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 }, { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 }, { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 }, { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, { 0xff } }; struct ide_timing *ide_timing_find_mode(u8 speed) { struct ide_timing *t; for (t = ide_timing; t->mode != speed; t++) if (t->mode == 0xff) return NULL; return t; } EXPORT_SYMBOL_GPL(ide_timing_find_mode); u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio) { u16 *id = drive->id; struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); u16 cycle = 0; if (id[ATA_ID_FIELD_VALID] & 2) { if (ata_id_has_iordy(drive->id)) cycle = id[ATA_ID_EIDE_PIO_IORDY]; else cycle = id[ATA_ID_EIDE_PIO]; /* conservative "downgrade" for all pre-ATA2 drives */ if (pio < 3 && cycle < t->cycle) cycle = 0; /* use standard timing */ /* Use the standard timing for the CF specific modes too */ if (pio > 4 && ata_id_is_cfa(id)) cycle = 0; } return cycle ? cycle : t->cycle; } EXPORT_SYMBOL_GPL(ide_pio_cycle_time); #define ENOUGH(v, unit) (((v) - 1) / (unit) + 1) #define EZ(v, unit) ((v) ? ENOUGH(v, unit) : 0) static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, int T, int UT) { q->setup = EZ(t->setup * 1000, T); q->act8b = EZ(t->act8b * 1000, T); q->rec8b = EZ(t->rec8b * 1000, T); q->cyc8b = EZ(t->cyc8b * 1000, T); q->active = EZ(t->active * 1000, T); q->recover = EZ(t->recover * 1000, T); q->cycle = EZ(t->cycle * 1000, T); q->udma = EZ(t->udma * 1000, UT); } void ide_timing_merge(struct ide_timing *a, struct ide_timing *b, struct ide_timing *m, unsigned int what) { if (what & IDE_TIMING_SETUP) m->setup = max(a->setup, b->setup); if (what & IDE_TIMING_ACT8B) m->act8b = max(a->act8b, b->act8b); if (what & IDE_TIMING_REC8B) m->rec8b = max(a->rec8b, b->rec8b); if (what & IDE_TIMING_CYC8B) m->cyc8b = max(a->cyc8b, b->cyc8b); if (what & IDE_TIMING_ACTIVE) m->active = max(a->active, b->active); if (what & IDE_TIMING_RECOVER) m->recover = max(a->recover, b->recover); if (what & IDE_TIMING_CYCLE) m->cycle = max(a->cycle, b->cycle); if (what & IDE_TIMING_UDMA) m->udma = max(a->udma, b->udma); } EXPORT_SYMBOL_GPL(ide_timing_merge); int ide_timing_compute(ide_drive_t *drive, u8 speed, struct ide_timing *t, int T, int UT) { u16 *id = drive->id; struct ide_timing *s, p; /* * Find the mode. */ s = ide_timing_find_mode(speed); if (s == NULL) return -EINVAL; /* * Copy the timing from the table. */ *t = *s; /* * If the drive is an EIDE drive, it can tell us it needs extended * PIO/MWDMA cycle timing. */ if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ memset(&p, 0, sizeof(p)); if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; else if ((speed <= XFER_PIO_4) || (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) p.cycle = id[ATA_ID_EIDE_DMA_MIN]; ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B); } /* * Convert the timing to bus clock counts. */ ide_timing_quantize(t, t, T, UT); /* * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, * S.M.A.R.T and some other commands. We have to ensure that the * DMA cycle timing is slower/equal than the current PIO timing. */ if (speed >= XFER_SW_DMA_0) { ide_timing_compute(drive, drive->pio_mode, &p, T, UT); ide_timing_merge(&p, t, t, IDE_TIMING_ALL); } /* * Lengthen active & recovery time so that cycle time is correct. */ if (t->act8b + t->rec8b < t->cyc8b) { t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; t->rec8b = t->cyc8b - t->act8b; } if (t->active + t->recover < t->cycle) { t->active += (t->cycle - (t->active + t->recover)) / 2; t->recover = t->cycle - t->active; } return 0; } EXPORT_SYMBOL_GPL(ide_timing_compute);
gpl-2.0
akkufix/sola_jb_kernel
drivers/base/power/main.c
29
30069
/* * drivers/base/power/main.c - Where the driver meets power management. * * Copyright (c) 2003 Patrick Mochel * Copyright (c) 2003 Open Source Development Lab * Copyright (c) 2012 Sony Mobile Communications AB * * This file is released under the GPLv2 * * * The driver model core calls device_pm_add() when a device is registered. * This will initialize the embedded device_pm_info object in the device * and add it to the list of power-controlled devices. sysfs entries for * controlling device power management will also be added. * * A separate list is used for keeping track of power info, because the power * domain dependencies may differ from the ancestral dependencies that the * subsystem list maintains. */ #include <linux/device.h> #include <linux/kallsyms.h> #include <linux/mutex.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/resume-trace.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/async.h> #include <linux/suspend.h> #include <linux/timer.h> #include "../base.h" #include "power.h" /* * The entries in the dpm_list list are in a depth first order, simply * because children are guaranteed to be discovered after parents, and * are inserted at the back of the list on discovery. * * Since device_pm_add() may be called with a device lock held, * we must never try to acquire a device lock while holding * dpm_list_mutex. */ LIST_HEAD(dpm_list); LIST_HEAD(dpm_prepared_list); LIST_HEAD(dpm_suspended_list); LIST_HEAD(dpm_noirq_list); static DEFINE_MUTEX(dpm_list_mtx); static pm_message_t pm_transition; static void dpm_drv_timeout(unsigned long data); static void __dpm_drv_timeout(unsigned long data); static void (*dpm_drv_timeout_fun)(unsigned long data) = __dpm_drv_timeout; static int async_error; /** * device_pm_init - Initialize the PM-related part of a device object. * @dev: Device object being initialized. */ void device_pm_init(struct device *dev) { dev->power.is_prepared = false; dev->power.is_suspended = false; init_completion(&dev->power.completion); complete_all(&dev->power.completion); dev->power.wakeup = NULL; spin_lock_init(&dev->power.lock); pm_runtime_init(dev); INIT_LIST_HEAD(&dev->power.entry); } /** * device_pm_lock - Lock the list of active devices used by the PM core. */ void device_pm_lock(void) { mutex_lock(&dpm_list_mtx); } /** * device_pm_unlock - Unlock the list of active devices used by the PM core. */ void device_pm_unlock(void) { mutex_unlock(&dpm_list_mtx); } /** * device_pm_add - Add a device to the PM core's list of active devices. * @dev: Device to add to the list. */ void device_pm_add(struct device *dev) { pr_debug("PM: Adding info for %s:%s\n", dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); mutex_lock(&dpm_list_mtx); if (dev->parent && dev->parent->power.is_prepared) dev_warn(dev, "parent %s should not be sleeping\n", dev_name(dev->parent)); list_add_tail(&dev->power.entry, &dpm_list); mutex_unlock(&dpm_list_mtx); } /** * device_pm_remove - Remove a device from the PM core's list of active devices. * @dev: Device to be removed from the list. */ void device_pm_remove(struct device *dev) { pr_debug("PM: Removing info for %s:%s\n", dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); complete_all(&dev->power.completion); mutex_lock(&dpm_list_mtx); list_del_init(&dev->power.entry); mutex_unlock(&dpm_list_mtx); device_wakeup_disable(dev); pm_runtime_remove(dev); } /** * device_pm_move_before - Move device in the PM core's list of active devices. * @deva: Device to move in dpm_list. * @devb: Device @deva should come before. */ void device_pm_move_before(struct device *deva, struct device *devb) { pr_debug("PM: Moving %s:%s before %s:%s\n", deva->bus ? deva->bus->name : "No Bus", dev_name(deva), devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); /* Delete deva from dpm_list and reinsert before devb. */ list_move_tail(&deva->power.entry, &devb->power.entry); } /** * device_pm_move_after - Move device in the PM core's list of active devices. * @deva: Device to move in dpm_list. * @devb: Device @deva should come after. */ void device_pm_move_after(struct device *deva, struct device *devb) { pr_debug("PM: Moving %s:%s after %s:%s\n", deva->bus ? deva->bus->name : "No Bus", dev_name(deva), devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); /* Delete deva from dpm_list and reinsert after devb. */ list_move(&deva->power.entry, &devb->power.entry); } /** * device_pm_move_last - Move device to end of the PM core's list of devices. * @dev: Device to move in dpm_list. */ void device_pm_move_last(struct device *dev) { pr_debug("PM: Moving %s:%s to end of list\n", dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); list_move_tail(&dev->power.entry, &dpm_list); } static ktime_t initcall_debug_start(struct device *dev) { ktime_t calltime = ktime_set(0, 0); if (initcall_debug) { pr_info("calling %s+ @ %i\n", dev_name(dev), task_pid_nr(current)); calltime = ktime_get(); } return calltime; } static void initcall_debug_report(struct device *dev, ktime_t calltime, int error) { ktime_t delta, rettime; if (initcall_debug) { rettime = ktime_get(); delta = ktime_sub(rettime, calltime); pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), error, (unsigned long long)ktime_to_ns(delta) >> 10); } } /** * dpm_wait - Wait for a PM operation to complete. * @dev: Device to wait for. * @async: If unset, wait only if the device's power.async_suspend flag is set. */ static void dpm_wait(struct device *dev, bool async) { if (!dev) return; if (async || (pm_async_enabled && dev->power.async_suspend)) wait_for_completion(&dev->power.completion); } static int dpm_wait_fn(struct device *dev, void *async_ptr) { dpm_wait(dev, *((bool *)async_ptr)); return 0; } static void dpm_wait_for_children(struct device *dev, bool async) { device_for_each_child(dev, &async, dpm_wait_fn); } /** * pm_op - Execute the PM operation appropriate for given PM event. * @dev: Device to handle. * @ops: PM operations to choose from. * @state: PM transition of the system being carried out. */ static int pm_op(struct device *dev, const struct dev_pm_ops *ops, pm_message_t state) { int error = 0; ktime_t calltime; calltime = initcall_debug_start(dev); switch (state.event) { #ifdef CONFIG_SUSPEND case PM_EVENT_SUSPEND: if (ops->suspend) { error = ops->suspend(dev); suspend_report_result(ops->suspend, error); } break; case PM_EVENT_RESUME: if (ops->resume) { error = ops->resume(dev); suspend_report_result(ops->resume, error); } break; #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: if (ops->freeze) { error = ops->freeze(dev); suspend_report_result(ops->freeze, error); } break; case PM_EVENT_HIBERNATE: if (ops->poweroff) { error = ops->poweroff(dev); suspend_report_result(ops->poweroff, error); } break; case PM_EVENT_THAW: case PM_EVENT_RECOVER: if (ops->thaw) { error = ops->thaw(dev); suspend_report_result(ops->thaw, error); } break; case PM_EVENT_RESTORE: if (ops->restore) { error = ops->restore(dev); suspend_report_result(ops->restore, error); } break; #endif /* CONFIG_HIBERNATE_CALLBACKS */ default: error = -EINVAL; } initcall_debug_report(dev, calltime, error); return error; } /** * pm_noirq_op - Execute the PM operation appropriate for given PM event. * @dev: Device to handle. * @ops: PM operations to choose from. * @state: PM transition of the system being carried out. * * The driver of @dev will not receive interrupts while this function is being * executed. */ static int pm_noirq_op(struct device *dev, const struct dev_pm_ops *ops, pm_message_t state) { int error = 0; ktime_t calltime = ktime_set(0, 0), delta, rettime; if (initcall_debug) { pr_info("calling %s+ @ %i, parent: %s\n", dev_name(dev), task_pid_nr(current), dev->parent ? dev_name(dev->parent) : "none"); calltime = ktime_get(); } switch (state.event) { #ifdef CONFIG_SUSPEND case PM_EVENT_SUSPEND: if (ops->suspend_noirq) { error = ops->suspend_noirq(dev); suspend_report_result(ops->suspend_noirq, error); } break; case PM_EVENT_RESUME: if (ops->resume_noirq) { error = ops->resume_noirq(dev); suspend_report_result(ops->resume_noirq, error); } break; #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: if (ops->freeze_noirq) { error = ops->freeze_noirq(dev); suspend_report_result(ops->freeze_noirq, error); } break; case PM_EVENT_HIBERNATE: if (ops->poweroff_noirq) { error = ops->poweroff_noirq(dev); suspend_report_result(ops->poweroff_noirq, error); } break; case PM_EVENT_THAW: case PM_EVENT_RECOVER: if (ops->thaw_noirq) { error = ops->thaw_noirq(dev); suspend_report_result(ops->thaw_noirq, error); } break; case PM_EVENT_RESTORE: if (ops->restore_noirq) { error = ops->restore_noirq(dev); suspend_report_result(ops->restore_noirq, error); } break; #endif /* CONFIG_HIBERNATE_CALLBACKS */ default: error = -EINVAL; } if (initcall_debug) { rettime = ktime_get(); delta = ktime_sub(rettime, calltime); printk("initcall %s_i+ returned %d after %Ld usecs\n", dev_name(dev), error, (unsigned long long)ktime_to_ns(delta) >> 10); } return error; } static char *pm_verb(int event) { switch (event) { case PM_EVENT_SUSPEND: return "suspend"; case PM_EVENT_RESUME: return "resume"; case PM_EVENT_FREEZE: return "freeze"; case PM_EVENT_QUIESCE: return "quiesce"; case PM_EVENT_HIBERNATE: return "hibernate"; case PM_EVENT_THAW: return "thaw"; case PM_EVENT_RESTORE: return "restore"; case PM_EVENT_RECOVER: return "recover"; default: return "(unknown PM event)"; } } static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) { dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? ", may wakeup" : ""); } static void pm_dev_err(struct device *dev, pm_message_t state, char *info, int error) { printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", dev_name(dev), pm_verb(state.event), info, error); } static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) { ktime_t calltime; u64 usecs64; int usecs; calltime = ktime_get(); usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); do_div(usecs64, NSEC_PER_USEC); usecs = usecs64; if (usecs == 0) usecs = 1; pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", info ?: "", info ? " " : "", pm_verb(state.event), usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); } /*------------------------- Resume routines -------------------------*/ /** * device_resume_noirq - Execute an "early resume" callback for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * * The driver of @dev will not receive interrupts while this function is being * executed. */ static int device_resume_noirq(struct device *dev, pm_message_t state) { int error = 0; TRACE_DEVICE(dev); TRACE_RESUME(0); if (dev->pwr_domain) { pm_dev_dbg(dev, state, "EARLY power domain "); error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); } else if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "EARLY type "); error = pm_noirq_op(dev, dev->type->pm, state); } else if (dev->class && dev->class->pm) { pm_dev_dbg(dev, state, "EARLY class "); error = pm_noirq_op(dev, dev->class->pm, state); } else if (dev->bus && dev->bus->pm) { pm_dev_dbg(dev, state, "EARLY "); error = pm_noirq_op(dev, dev->bus->pm, state); } TRACE_RESUME(error); return error; } /** * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. * @state: PM transition of the system being carried out. * * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and * enable device drivers to receive interrupts. */ void dpm_resume_noirq(pm_message_t state) { ktime_t starttime = ktime_get(); mutex_lock(&dpm_list_mtx); while (!list_empty(&dpm_noirq_list)) { struct device *dev = to_device(dpm_noirq_list.next); int error; get_device(dev); list_move_tail(&dev->power.entry, &dpm_suspended_list); mutex_unlock(&dpm_list_mtx); error = device_resume_noirq(dev, state); if (error) pm_dev_err(dev, state, " early", error); mutex_lock(&dpm_list_mtx); put_device(dev); } mutex_unlock(&dpm_list_mtx); dpm_show_time(starttime, state, "early"); resume_device_irqs(); } EXPORT_SYMBOL_GPL(dpm_resume_noirq); /** * legacy_resume - Execute a legacy (bus or class) resume callback for device. * @dev: Device to resume. * @cb: Resume callback to execute. */ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) { int error; ktime_t calltime; calltime = initcall_debug_start(dev); error = cb(dev); suspend_report_result(cb, error); initcall_debug_report(dev, calltime, error); return error; } /** * device_resume - Execute "resume" callbacks for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being resumed asynchronously. */ static int device_resume(struct device *dev, pm_message_t state, bool async) { int error = 0; bool put = false; TRACE_DEVICE(dev); TRACE_RESUME(0); dpm_wait(dev->parent, async); device_lock(dev); /* * This is a fib. But we'll allow new children to be added below * a resumed device, even if the device hasn't been completed yet. */ dev->power.is_prepared = false; if (!dev->power.is_suspended) goto Unlock; pm_runtime_enable(dev); put = true; if (dev->pwr_domain) { pm_dev_dbg(dev, state, "power domain "); error = pm_op(dev, &dev->pwr_domain->ops, state); goto End; } if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "type "); error = pm_op(dev, dev->type->pm, state); goto End; } if (dev->class) { if (dev->class->pm) { pm_dev_dbg(dev, state, "class "); error = pm_op(dev, dev->class->pm, state); goto End; } else if (dev->class->resume) { pm_dev_dbg(dev, state, "legacy class "); error = legacy_resume(dev, dev->class->resume); goto End; } } if (dev->bus) { if (dev->bus->pm) { pm_dev_dbg(dev, state, ""); error = pm_op(dev, dev->bus->pm, state); } else if (dev->bus->resume) { pm_dev_dbg(dev, state, "legacy "); error = legacy_resume(dev, dev->bus->resume); } } End: dev->power.is_suspended = false; Unlock: device_unlock(dev); complete_all(&dev->power.completion); TRACE_RESUME(error); if (put) pm_runtime_put_sync(dev); return error; } static void async_resume(void *data, async_cookie_t cookie) { struct device *dev = (struct device *)data; int error; error = device_resume(dev, pm_transition, true); if (error) pm_dev_err(dev, pm_transition, " async", error); put_device(dev); } static bool is_async(struct device *dev) { return dev->power.async_suspend && pm_async_enabled && !pm_trace_is_enabled(); } /** * dpm_drv_timeout - Driver suspend / resume watchdog handler * @data: struct device which timed out * * Called when a driver has timed out suspending or resuming. * There's not much we can do here to recover so * BUG() out for a crash-dump * */ static void dpm_drv_timeout(unsigned long data) { dpm_drv_timeout_fun(data); } /** * Default dpm_drv_timeout. Change using device_pm_set_timout_handler. */ static void __dpm_drv_timeout(unsigned long data) { struct dpm_drv_wd_data *wd_data = (void *)data; struct device *dev = wd_data->dev; struct task_struct *tsk = wd_data->tsk; printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev), (dev->driver ? dev->driver->name : "no driver")); printk(KERN_EMERG "dpm suspend stack:\n"); show_stack(tsk, NULL); BUG(); } /** * device_pm_set_timout_handler - Set a new current time-out handler * @new_fun: function to be used. Must have the same signature and behavior as * __dpm_drv_timeout. * * @retval: The previous handler. */ void (*device_pm_set_timout_handler(void (*new_fun)(unsigned long))) (unsigned long) { void (*old_fun)(unsigned long) = xchg(&dpm_drv_timeout_fun, new_fun); #ifdef CONFIG_KALLSYMS char sym[KSYM_SYMBOL_LEN]; sprint_symbol(sym, (unsigned long)new_fun); printk(KERN_NOTICE "DPM timeout function changed to [<%08lx>]: %s\n", (unsigned long)new_fun, sym); #else printk(KERN_NOTICE "DPM timeout function changed to [<%08lx>]\n", (unsigned long)new_fun); #endif return old_fun; } /** * dpm_resume - Execute "resume" callbacks for non-sysdev devices. * @state: PM transition of the system being carried out. * * Execute the appropriate "resume" callback for all devices whose status * indicates that they are suspended. */ void dpm_resume(pm_message_t state) { struct device *dev; ktime_t starttime = ktime_get(); might_sleep(); mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; list_for_each_entry(dev, &dpm_suspended_list, power.entry) { INIT_COMPLETION(dev->power.completion); if (is_async(dev)) { get_device(dev); async_schedule(async_resume, dev); } } while (!list_empty(&dpm_suspended_list)) { dev = to_device(dpm_suspended_list.next); get_device(dev); if (!is_async(dev)) { int error; mutex_unlock(&dpm_list_mtx); error = device_resume(dev, state, false); if (error) pm_dev_err(dev, state, "", error); mutex_lock(&dpm_list_mtx); } if (!list_empty(&dev->power.entry)) list_move_tail(&dev->power.entry, &dpm_prepared_list); put_device(dev); } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, NULL); } /** * device_complete - Complete a PM transition for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. */ static void device_complete(struct device *dev, pm_message_t state) { device_lock(dev); if (dev->pwr_domain) { pm_dev_dbg(dev, state, "completing power domain "); if (dev->pwr_domain->ops.complete) dev->pwr_domain->ops.complete(dev); } else if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "completing type "); if (dev->type->pm->complete) dev->type->pm->complete(dev); } else if (dev->class && dev->class->pm) { pm_dev_dbg(dev, state, "completing class "); if (dev->class->pm->complete) dev->class->pm->complete(dev); } else if (dev->bus && dev->bus->pm) { pm_dev_dbg(dev, state, "completing "); if (dev->bus->pm->complete) dev->bus->pm->complete(dev); } device_unlock(dev); } /** * dpm_complete - Complete a PM transition for all non-sysdev devices. * @state: PM transition of the system being carried out. * * Execute the ->complete() callbacks for all devices whose PM status is not * DPM_ON (this allows new devices to be registered). */ void dpm_complete(pm_message_t state) { struct list_head list; might_sleep(); INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); while (!list_empty(&dpm_prepared_list)) { struct device *dev = to_device(dpm_prepared_list.prev); get_device(dev); dev->power.is_prepared = false; list_move(&dev->power.entry, &list); mutex_unlock(&dpm_list_mtx); device_complete(dev, state); mutex_lock(&dpm_list_mtx); put_device(dev); } list_splice(&list, &dpm_list); mutex_unlock(&dpm_list_mtx); } /** * dpm_resume_end - Execute "resume" callbacks and complete system transition. * @state: PM transition of the system being carried out. * * Execute "resume" callbacks for all devices and complete the PM transition of * the system. */ void dpm_resume_end(pm_message_t state) { dpm_resume(state); dpm_complete(state); } EXPORT_SYMBOL_GPL(dpm_resume_end); /*------------------------- Suspend routines -------------------------*/ /** * resume_event - Return a "resume" message for given "suspend" sleep state. * @sleep_state: PM message representing a sleep state. * * Return a PM message representing the resume event corresponding to given * sleep state. */ static pm_message_t resume_event(pm_message_t sleep_state) { switch (sleep_state.event) { case PM_EVENT_SUSPEND: return PMSG_RESUME; case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: return PMSG_RECOVER; case PM_EVENT_HIBERNATE: return PMSG_RESTORE; } return PMSG_ON; } /** * device_suspend_noirq - Execute a "late suspend" callback for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * * The driver of @dev will not receive interrupts while this function is being * executed. */ static int device_suspend_noirq(struct device *dev, pm_message_t state) { int error; if (dev->pwr_domain) { pm_dev_dbg(dev, state, "LATE power domain "); error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); if (error) return error; } else if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "LATE type "); error = pm_noirq_op(dev, dev->type->pm, state); if (error) return error; } else if (dev->class && dev->class->pm) { pm_dev_dbg(dev, state, "LATE class "); error = pm_noirq_op(dev, dev->class->pm, state); if (error) return error; } else if (dev->bus && dev->bus->pm) { pm_dev_dbg(dev, state, "LATE "); error = pm_noirq_op(dev, dev->bus->pm, state); if (error) return error; } return 0; } /** * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. * @state: PM transition of the system being carried out. * * Prevent device drivers from receiving interrupts and call the "noirq" suspend * handlers for all non-sysdev devices. */ int dpm_suspend_noirq(pm_message_t state) { ktime_t starttime = ktime_get(); int error = 0; suspend_device_irqs(); mutex_lock(&dpm_list_mtx); while (!list_empty(&dpm_suspended_list)) { struct device *dev = to_device(dpm_suspended_list.prev); get_device(dev); mutex_unlock(&dpm_list_mtx); error = device_suspend_noirq(dev, state); mutex_lock(&dpm_list_mtx); if (error) { pm_dev_err(dev, state, " late", error); put_device(dev); break; } if (!list_empty(&dev->power.entry)) list_move(&dev->power.entry, &dpm_noirq_list); put_device(dev); } mutex_unlock(&dpm_list_mtx); if (error) dpm_resume_noirq(resume_event(state)); else dpm_show_time(starttime, state, "late"); return error; } EXPORT_SYMBOL_GPL(dpm_suspend_noirq); /** * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. * @dev: Device to suspend. * @state: PM transition of the system being carried out. * @cb: Suspend callback to execute. */ static int legacy_suspend(struct device *dev, pm_message_t state, int (*cb)(struct device *dev, pm_message_t state)) { int error; ktime_t calltime; calltime = initcall_debug_start(dev); error = cb(dev, state); suspend_report_result(cb, error); initcall_debug_report(dev, calltime, error); return error; } /** * device_suspend - Execute "suspend" callbacks for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being suspended asynchronously. */ static int __device_suspend(struct device *dev, pm_message_t state, bool async) { int error = 0; struct timer_list timer; struct dpm_drv_wd_data data; dpm_wait_for_children(dev, async); data.dev = dev; data.tsk = get_current(); init_timer_on_stack(&timer); timer.expires = jiffies + HZ * 3; timer.function = dpm_drv_timeout; timer.data = (unsigned long)&data; add_timer(&timer); if (async_error) { del_timer_sync(&timer); destroy_timer_on_stack(&timer); return 0; } pm_runtime_get_noresume(dev); if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) pm_wakeup_event(dev, 0); if (pm_wakeup_pending()) { del_timer_sync(&timer); destroy_timer_on_stack(&timer); pm_runtime_put_sync(dev); async_error = -EBUSY; return 0; } device_lock(dev); if (dev->pwr_domain) { pm_dev_dbg(dev, state, "power domain "); error = pm_op(dev, &dev->pwr_domain->ops, state); goto End; } if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "type "); error = pm_op(dev, dev->type->pm, state); goto End; } if (dev->class) { if (dev->class->pm) { pm_dev_dbg(dev, state, "class "); error = pm_op(dev, dev->class->pm, state); goto End; } else if (dev->class->suspend) { pm_dev_dbg(dev, state, "legacy class "); error = legacy_suspend(dev, state, dev->class->suspend); goto End; } } if (dev->bus) { if (dev->bus->pm) { pm_dev_dbg(dev, state, ""); error = pm_op(dev, dev->bus->pm, state); } else if (dev->bus->suspend) { pm_dev_dbg(dev, state, "legacy "); error = legacy_suspend(dev, state, dev->bus->suspend); } } End: dev->power.is_suspended = !error; device_unlock(dev); del_timer_sync(&timer); destroy_timer_on_stack(&timer); complete_all(&dev->power.completion); if (error) { pm_runtime_put_sync(dev); async_error = error; } else if (dev->power.is_suspended) { __pm_runtime_disable(dev, false); } return error; } static void async_suspend(void *data, async_cookie_t cookie) { struct device *dev = (struct device *)data; int error; error = __device_suspend(dev, pm_transition, true); if (error) pm_dev_err(dev, pm_transition, " async", error); put_device(dev); } static int device_suspend(struct device *dev) { INIT_COMPLETION(dev->power.completion); if (pm_async_enabled && dev->power.async_suspend) { get_device(dev); async_schedule(async_suspend, dev); return 0; } return __device_suspend(dev, pm_transition, false); } /** * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. * @state: PM transition of the system being carried out. */ int dpm_suspend(pm_message_t state) { ktime_t starttime = ktime_get(); int error = 0; might_sleep(); mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; while (!list_empty(&dpm_prepared_list)) { struct device *dev = to_device(dpm_prepared_list.prev); get_device(dev); mutex_unlock(&dpm_list_mtx); error = device_suspend(dev); mutex_lock(&dpm_list_mtx); if (error) { pm_dev_err(dev, state, "", error); put_device(dev); break; } if (!list_empty(&dev->power.entry)) list_move(&dev->power.entry, &dpm_suspended_list); put_device(dev); if (async_error) break; } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); if (!error) error = async_error; if (!error) dpm_show_time(starttime, state, NULL); return error; } /** * device_prepare - Prepare a device for system power transition. * @dev: Device to handle. * @state: PM transition of the system being carried out. * * Execute the ->prepare() callback(s) for given device. No new children of the * device may be registered after this function has returned. */ static int device_prepare(struct device *dev, pm_message_t state) { int error = 0; device_lock(dev); if (dev->pwr_domain) { pm_dev_dbg(dev, state, "preparing power domain "); if (dev->pwr_domain->ops.prepare) error = dev->pwr_domain->ops.prepare(dev); suspend_report_result(dev->pwr_domain->ops.prepare, error); if (error) goto End; } else if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "preparing type "); if (dev->type->pm->prepare) error = dev->type->pm->prepare(dev); suspend_report_result(dev->type->pm->prepare, error); if (error) goto End; } else if (dev->class && dev->class->pm) { pm_dev_dbg(dev, state, "preparing class "); if (dev->class->pm->prepare) error = dev->class->pm->prepare(dev); suspend_report_result(dev->class->pm->prepare, error); if (error) goto End; } else if (dev->bus && dev->bus->pm) { pm_dev_dbg(dev, state, "preparing "); if (dev->bus->pm->prepare) error = dev->bus->pm->prepare(dev); suspend_report_result(dev->bus->pm->prepare, error); } End: device_unlock(dev); return error; } /** * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. * @state: PM transition of the system being carried out. * * Execute the ->prepare() callback(s) for all devices. */ int dpm_prepare(pm_message_t state) { int error = 0; might_sleep(); mutex_lock(&dpm_list_mtx); while (!list_empty(&dpm_list)) { struct device *dev = to_device(dpm_list.next); get_device(dev); mutex_unlock(&dpm_list_mtx); error = device_prepare(dev, state); mutex_lock(&dpm_list_mtx); if (error) { if (error == -EAGAIN) { put_device(dev); error = 0; continue; } printk(KERN_INFO "PM: Device %s not prepared " "for power transition: code %d\n", dev_name(dev), error); put_device(dev); break; } dev->power.is_prepared = true; if (!list_empty(&dev->power.entry)) list_move_tail(&dev->power.entry, &dpm_prepared_list); put_device(dev); } mutex_unlock(&dpm_list_mtx); return error; } /** * dpm_suspend_start - Prepare devices for PM transition and suspend them. * @state: PM transition of the system being carried out. * * Prepare all non-sysdev devices for system PM transition and execute "suspend" * callbacks for them. */ int dpm_suspend_start(pm_message_t state) { int error; error = dpm_prepare(state); if (!error) error = dpm_suspend(state); return error; } EXPORT_SYMBOL_GPL(dpm_suspend_start); void __suspend_report_result(const char *function, void *fn, int ret) { if (ret) printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); } EXPORT_SYMBOL_GPL(__suspend_report_result); /** * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. * @dev: Device to wait for. * @subordinate: Device that needs to wait for @dev. */ int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) { dpm_wait(dev, subordinate->power.async_suspend); return async_error; } EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
gpl-2.0