repo_name
stringlengths
5
85
path
stringlengths
3
252
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
922
999k
license
stringclasses
15 values
hallovveen31/Magic
arch/ia64/kernel/cpufreq/acpi-cpufreq.c
7426
10032
/* * arch/ia64/kernel/cpufreq/acpi-cpufreq.c * This file provides the ACPI based P-state support. This * module works with generic cpufreq infrastructure. Most of * the code is based on i386 version * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c) * * Copyright (C) 2005 Intel Corp * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pal.h> #include <linux/acpi.h> #include <acpi/processor.h> MODULE_AUTHOR("Venkatesh Pallipadi"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); MODULE_LICENSE("GPL"); struct cpufreq_acpi_io { struct acpi_processor_performance acpi_data; struct cpufreq_frequency_table *freq_table; unsigned int resume; }; static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; static struct cpufreq_driver acpi_cpufreq_driver; static int processor_set_pstate ( u32 value) { s64 retval; pr_debug("processor_set_pstate\n"); retval = ia64_pal_set_pstate((u64)value); if (retval) { pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n", value, retval); return -ENODEV; } return (int)retval; } static int processor_get_pstate ( u32 *value) { u64 pstate_index = 0; s64 retval; pr_debug("processor_get_pstate\n"); retval = ia64_pal_get_pstate(&pstate_index, PAL_GET_PSTATE_TYPE_INSTANT); *value = (u32) pstate_index; if (retval) pr_debug("Failed to get current freq with " "error 0x%lx, idx 0x%x\n", retval, *value); return (int)retval; } /* To be used only after data->acpi_data is initialized */ static unsigned extract_clock ( struct cpufreq_acpi_io *data, unsigned value, unsigned int cpu) { unsigned long i; pr_debug("extract_clock\n"); for (i = 0; i < data->acpi_data.state_count; i++) { if (value == data->acpi_data.states[i].status) return data->acpi_data.states[i].core_frequency; } return data->acpi_data.states[i-1].core_frequency; } static unsigned int processor_get_freq ( struct cpufreq_acpi_io *data, unsigned int cpu) { int ret = 0; u32 value = 0; cpumask_t saved_mask; unsigned long clock_freq; pr_debug("processor_get_freq\n"); saved_mask = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); if (smp_processor_id() != cpu) goto migrate_end; /* processor_get_pstate gets the instantaneous frequency */ ret = processor_get_pstate(&value); if (ret) { set_cpus_allowed_ptr(current, &saved_mask); printk(KERN_WARNING "get performance failed with error %d\n", ret); ret = 0; goto migrate_end; } clock_freq = extract_clock(data, value, cpu); ret = (clock_freq*1000); migrate_end: set_cpus_allowed_ptr(current, &saved_mask); return ret; } static int processor_set_freq ( struct cpufreq_acpi_io *data, unsigned int cpu, int state) { int ret = 0; u32 value = 0; struct cpufreq_freqs cpufreq_freqs; cpumask_t saved_mask; int retval; pr_debug("processor_set_freq\n"); saved_mask = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); if (smp_processor_id() != cpu) { retval = -EAGAIN; goto migrate_end; } if (state == data->acpi_data.state) { if (unlikely(data->resume)) { pr_debug("Called after resume, resetting to P%d\n", state); data->resume = 0; } else { pr_debug("Already at target state (P%d)\n", state); retval = 0; goto migrate_end; } } pr_debug("Transitioning from P%d to P%d\n", data->acpi_data.state, state); /* cpufreq frequency struct */ cpufreq_freqs.cpu = cpu; cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency; cpufreq_freqs.new = data->freq_table[state].frequency; /* notify cpufreq */ cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); /* * First we write the target state's 'control' value to the * control_register. */ value = (u32) data->acpi_data.states[state].control; pr_debug("Transitioning to state: 0x%08x\n", value); ret = processor_set_pstate(value); if (ret) { unsigned int tmp = cpufreq_freqs.new; cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); cpufreq_freqs.new = cpufreq_freqs.old; cpufreq_freqs.old = tmp; cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); printk(KERN_WARNING "Transition failed with error %d\n", ret); retval = -ENODEV; goto migrate_end; } cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); data->acpi_data.state = state; retval = 0; migrate_end: set_cpus_allowed_ptr(current, &saved_mask); return (retval); } static unsigned int acpi_cpufreq_get ( unsigned int cpu) { struct cpufreq_acpi_io *data = acpi_io_data[cpu]; pr_debug("acpi_cpufreq_get\n"); return processor_get_freq(data, cpu); } static int acpi_cpufreq_target ( struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; unsigned int next_state = 0; unsigned int result = 0; pr_debug("acpi_cpufreq_setpolicy\n"); result = cpufreq_frequency_table_target(policy, data->freq_table, target_freq, relation, &next_state); if (result) return (result); result = processor_set_freq(data, policy->cpu, next_state); return (result); } static int acpi_cpufreq_verify ( struct cpufreq_policy *policy) { unsigned int result = 0; struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; pr_debug("acpi_cpufreq_verify\n"); result = cpufreq_frequency_table_verify(policy, data->freq_table); return (result); } static int acpi_cpufreq_cpu_init ( struct cpufreq_policy *policy) { unsigned int i; unsigned int cpu = policy->cpu; struct cpufreq_acpi_io *data; unsigned int result = 0; pr_debug("acpi_cpufreq_cpu_init\n"); data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); if (!data) return (-ENOMEM); acpi_io_data[cpu] = data; result = acpi_processor_register_performance(&data->acpi_data, cpu); if (result) goto err_free; /* capability check */ if (data->acpi_data.state_count <= 1) { pr_debug("No P-States\n"); result = -ENODEV; goto err_unreg; } if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { pr_debug("Unsupported address space [%d, %d]\n", (u32) (data->acpi_data.control_register.space_id), (u32) (data->acpi_data.status_register.space_id)); result = -ENODEV; goto err_unreg; } /* alloc freq_table */ data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (data->acpi_data.state_count + 1), GFP_KERNEL); if (!data->freq_table) { result = -ENOMEM; goto err_unreg; } /* detect transition latency */ policy->cpuinfo.transition_latency = 0; for (i=0; i<data->acpi_data.state_count; i++) { if ((data->acpi_data.states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency) { policy->cpuinfo.transition_latency = data->acpi_data.states[i].transition_latency * 1000; } } policy->cur = processor_get_freq(data, policy->cpu); /* table init */ for (i = 0; i <= data->acpi_data.state_count; i++) { data->freq_table[i].index = i; if (i < data->acpi_data.state_count) { data->freq_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; } else { data->freq_table[i].frequency = CPUFREQ_TABLE_END; } } result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); if (result) { goto err_freqfree; } /* notify BIOS that we exist */ acpi_processor_notify_smm(THIS_MODULE); printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management " "activated.\n", cpu); for (i = 0; i < data->acpi_data.state_count; i++) pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", (i == data->acpi_data.state?'*':' '), i, (u32) data->acpi_data.states[i].core_frequency, (u32) data->acpi_data.states[i].power, (u32) data->acpi_data.states[i].transition_latency, (u32) data->acpi_data.states[i].bus_master_latency, (u32) data->acpi_data.states[i].status, (u32) data->acpi_data.states[i].control); cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); /* the first call to ->target() should result in us actually * writing something to the appropriate registers. */ data->resume = 1; return (result); err_freqfree: kfree(data->freq_table); err_unreg: acpi_processor_unregister_performance(&data->acpi_data, cpu); err_free: kfree(data); acpi_io_data[cpu] = NULL; return (result); } static int acpi_cpufreq_cpu_exit ( struct cpufreq_policy *policy) { struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; pr_debug("acpi_cpufreq_cpu_exit\n"); if (data) { cpufreq_frequency_table_put_attr(policy->cpu); acpi_io_data[policy->cpu] = NULL; acpi_processor_unregister_performance(&data->acpi_data, policy->cpu); kfree(data); } return (0); } static struct freq_attr* acpi_cpufreq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver acpi_cpufreq_driver = { .verify = acpi_cpufreq_verify, .target = acpi_cpufreq_target, .get = acpi_cpufreq_get, .init = acpi_cpufreq_cpu_init, .exit = acpi_cpufreq_cpu_exit, .name = "acpi-cpufreq", .owner = THIS_MODULE, .attr = acpi_cpufreq_attr, }; static int __init acpi_cpufreq_init (void) { pr_debug("acpi_cpufreq_init\n"); return cpufreq_register_driver(&acpi_cpufreq_driver); } static void __exit acpi_cpufreq_exit (void) { pr_debug("acpi_cpufreq_exit\n"); cpufreq_unregister_driver(&acpi_cpufreq_driver); return; } late_initcall(acpi_cpufreq_init); module_exit(acpi_cpufreq_exit);
gpl-2.0
android-armv7a-belalang-tempur/Android_SpeedKernel_3.4
arch/ia64/kernel/cpufreq/acpi-cpufreq.c
7426
10032
/* * arch/ia64/kernel/cpufreq/acpi-cpufreq.c * This file provides the ACPI based P-state support. This * module works with generic cpufreq infrastructure. Most of * the code is based on i386 version * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c) * * Copyright (C) 2005 Intel Corp * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pal.h> #include <linux/acpi.h> #include <acpi/processor.h> MODULE_AUTHOR("Venkatesh Pallipadi"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); MODULE_LICENSE("GPL"); struct cpufreq_acpi_io { struct acpi_processor_performance acpi_data; struct cpufreq_frequency_table *freq_table; unsigned int resume; }; static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; static struct cpufreq_driver acpi_cpufreq_driver; static int processor_set_pstate ( u32 value) { s64 retval; pr_debug("processor_set_pstate\n"); retval = ia64_pal_set_pstate((u64)value); if (retval) { pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n", value, retval); return -ENODEV; } return (int)retval; } static int processor_get_pstate ( u32 *value) { u64 pstate_index = 0; s64 retval; pr_debug("processor_get_pstate\n"); retval = ia64_pal_get_pstate(&pstate_index, PAL_GET_PSTATE_TYPE_INSTANT); *value = (u32) pstate_index; if (retval) pr_debug("Failed to get current freq with " "error 0x%lx, idx 0x%x\n", retval, *value); return (int)retval; } /* To be used only after data->acpi_data is initialized */ static unsigned extract_clock ( struct cpufreq_acpi_io *data, unsigned value, unsigned int cpu) { unsigned long i; pr_debug("extract_clock\n"); for (i = 0; i < data->acpi_data.state_count; i++) { if (value == data->acpi_data.states[i].status) return data->acpi_data.states[i].core_frequency; } return data->acpi_data.states[i-1].core_frequency; } static unsigned int processor_get_freq ( struct cpufreq_acpi_io *data, unsigned int cpu) { int ret = 0; u32 value = 0; cpumask_t saved_mask; unsigned long clock_freq; pr_debug("processor_get_freq\n"); saved_mask = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); if (smp_processor_id() != cpu) goto migrate_end; /* processor_get_pstate gets the instantaneous frequency */ ret = processor_get_pstate(&value); if (ret) { set_cpus_allowed_ptr(current, &saved_mask); printk(KERN_WARNING "get performance failed with error %d\n", ret); ret = 0; goto migrate_end; } clock_freq = extract_clock(data, value, cpu); ret = (clock_freq*1000); migrate_end: set_cpus_allowed_ptr(current, &saved_mask); return ret; } static int processor_set_freq ( struct cpufreq_acpi_io *data, unsigned int cpu, int state) { int ret = 0; u32 value = 0; struct cpufreq_freqs cpufreq_freqs; cpumask_t saved_mask; int retval; pr_debug("processor_set_freq\n"); saved_mask = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); if (smp_processor_id() != cpu) { retval = -EAGAIN; goto migrate_end; } if (state == data->acpi_data.state) { if (unlikely(data->resume)) { pr_debug("Called after resume, resetting to P%d\n", state); data->resume = 0; } else { pr_debug("Already at target state (P%d)\n", state); retval = 0; goto migrate_end; } } pr_debug("Transitioning from P%d to P%d\n", data->acpi_data.state, state); /* cpufreq frequency struct */ cpufreq_freqs.cpu = cpu; cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency; cpufreq_freqs.new = data->freq_table[state].frequency; /* notify cpufreq */ cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); /* * First we write the target state's 'control' value to the * control_register. */ value = (u32) data->acpi_data.states[state].control; pr_debug("Transitioning to state: 0x%08x\n", value); ret = processor_set_pstate(value); if (ret) { unsigned int tmp = cpufreq_freqs.new; cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); cpufreq_freqs.new = cpufreq_freqs.old; cpufreq_freqs.old = tmp; cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); printk(KERN_WARNING "Transition failed with error %d\n", ret); retval = -ENODEV; goto migrate_end; } cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); data->acpi_data.state = state; retval = 0; migrate_end: set_cpus_allowed_ptr(current, &saved_mask); return (retval); } static unsigned int acpi_cpufreq_get ( unsigned int cpu) { struct cpufreq_acpi_io *data = acpi_io_data[cpu]; pr_debug("acpi_cpufreq_get\n"); return processor_get_freq(data, cpu); } static int acpi_cpufreq_target ( struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; unsigned int next_state = 0; unsigned int result = 0; pr_debug("acpi_cpufreq_setpolicy\n"); result = cpufreq_frequency_table_target(policy, data->freq_table, target_freq, relation, &next_state); if (result) return (result); result = processor_set_freq(data, policy->cpu, next_state); return (result); } static int acpi_cpufreq_verify ( struct cpufreq_policy *policy) { unsigned int result = 0; struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; pr_debug("acpi_cpufreq_verify\n"); result = cpufreq_frequency_table_verify(policy, data->freq_table); return (result); } static int acpi_cpufreq_cpu_init ( struct cpufreq_policy *policy) { unsigned int i; unsigned int cpu = policy->cpu; struct cpufreq_acpi_io *data; unsigned int result = 0; pr_debug("acpi_cpufreq_cpu_init\n"); data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); if (!data) return (-ENOMEM); acpi_io_data[cpu] = data; result = acpi_processor_register_performance(&data->acpi_data, cpu); if (result) goto err_free; /* capability check */ if (data->acpi_data.state_count <= 1) { pr_debug("No P-States\n"); result = -ENODEV; goto err_unreg; } if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { pr_debug("Unsupported address space [%d, %d]\n", (u32) (data->acpi_data.control_register.space_id), (u32) (data->acpi_data.status_register.space_id)); result = -ENODEV; goto err_unreg; } /* alloc freq_table */ data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (data->acpi_data.state_count + 1), GFP_KERNEL); if (!data->freq_table) { result = -ENOMEM; goto err_unreg; } /* detect transition latency */ policy->cpuinfo.transition_latency = 0; for (i=0; i<data->acpi_data.state_count; i++) { if ((data->acpi_data.states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency) { policy->cpuinfo.transition_latency = data->acpi_data.states[i].transition_latency * 1000; } } policy->cur = processor_get_freq(data, policy->cpu); /* table init */ for (i = 0; i <= data->acpi_data.state_count; i++) { data->freq_table[i].index = i; if (i < data->acpi_data.state_count) { data->freq_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; } else { data->freq_table[i].frequency = CPUFREQ_TABLE_END; } } result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); if (result) { goto err_freqfree; } /* notify BIOS that we exist */ acpi_processor_notify_smm(THIS_MODULE); printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management " "activated.\n", cpu); for (i = 0; i < data->acpi_data.state_count; i++) pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", (i == data->acpi_data.state?'*':' '), i, (u32) data->acpi_data.states[i].core_frequency, (u32) data->acpi_data.states[i].power, (u32) data->acpi_data.states[i].transition_latency, (u32) data->acpi_data.states[i].bus_master_latency, (u32) data->acpi_data.states[i].status, (u32) data->acpi_data.states[i].control); cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); /* the first call to ->target() should result in us actually * writing something to the appropriate registers. */ data->resume = 1; return (result); err_freqfree: kfree(data->freq_table); err_unreg: acpi_processor_unregister_performance(&data->acpi_data, cpu); err_free: kfree(data); acpi_io_data[cpu] = NULL; return (result); } static int acpi_cpufreq_cpu_exit ( struct cpufreq_policy *policy) { struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; pr_debug("acpi_cpufreq_cpu_exit\n"); if (data) { cpufreq_frequency_table_put_attr(policy->cpu); acpi_io_data[policy->cpu] = NULL; acpi_processor_unregister_performance(&data->acpi_data, policy->cpu); kfree(data); } return (0); } static struct freq_attr* acpi_cpufreq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver acpi_cpufreq_driver = { .verify = acpi_cpufreq_verify, .target = acpi_cpufreq_target, .get = acpi_cpufreq_get, .init = acpi_cpufreq_cpu_init, .exit = acpi_cpufreq_cpu_exit, .name = "acpi-cpufreq", .owner = THIS_MODULE, .attr = acpi_cpufreq_attr, }; static int __init acpi_cpufreq_init (void) { pr_debug("acpi_cpufreq_init\n"); return cpufreq_register_driver(&acpi_cpufreq_driver); } static void __exit acpi_cpufreq_exit (void) { pr_debug("acpi_cpufreq_exit\n"); cpufreq_unregister_driver(&acpi_cpufreq_driver); return; } late_initcall(acpi_cpufreq_init); module_exit(acpi_cpufreq_exit);
gpl-2.0
insvnx/android_kernel_htc_a11chl
fs/squashfs/decompressor.c
7682
3039
/* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 * Phillip Lougher <phillip@squashfs.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * decompressor.c */ #include <linux/types.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "decompressor.h" #include "squashfs.h" /* * This file (and decompressor.h) implements a decompressor framework for * Squashfs, allowing multiple decompressors to be easily supported */ static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = { NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0 }; #ifndef CONFIG_SQUASHFS_LZO static const struct squashfs_decompressor squashfs_lzo_comp_ops = { NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0 }; #endif #ifndef CONFIG_SQUASHFS_XZ static const struct squashfs_decompressor squashfs_xz_comp_ops = { NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0 }; #endif #ifndef CONFIG_SQUASHFS_ZLIB static const struct squashfs_decompressor squashfs_zlib_comp_ops = { NULL, NULL, NULL, ZLIB_COMPRESSION, "zlib", 0 }; #endif static const struct squashfs_decompressor squashfs_unknown_comp_ops = { NULL, NULL, NULL, 0, "unknown", 0 }; static const struct squashfs_decompressor *decompressor[] = { &squashfs_zlib_comp_ops, &squashfs_lzo_comp_ops, &squashfs_xz_comp_ops, &squashfs_lzma_unsupported_comp_ops, &squashfs_unknown_comp_ops }; const struct squashfs_decompressor *squashfs_lookup_decompressor(int id) { int i; for (i = 0; decompressor[i]->id; i++) if (id == decompressor[i]->id) break; return decompressor[i]; } void *squashfs_decompressor_init(struct super_block *sb, unsigned short flags) { struct squashfs_sb_info *msblk = sb->s_fs_info; void *strm, *buffer = NULL; int length = 0; /* * Read decompressor specific options from file system if present */ if (SQUASHFS_COMP_OPTS(flags)) { buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); if (buffer == NULL) return ERR_PTR(-ENOMEM); length = squashfs_read_data(sb, &buffer, sizeof(struct squashfs_super_block), 0, NULL, PAGE_CACHE_SIZE, 1); if (length < 0) { strm = ERR_PTR(length); goto finished; } } strm = msblk->decompressor->init(msblk, buffer, length); finished: kfree(buffer); return strm; }
gpl-2.0
omnirom/android_kernel_oppo_r819
crypto/cast5.c
8194
33810
/* Kernel cryptographic api. * cast5.c - Cast5 cipher algorithm (rfc2144). * * Derived from GnuPG implementation of cast5. * * Major Changes. * Complete conformance to rfc2144. * Supports key size from 40 to 128 bits. * * Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc. * Copyright (C) 2003 Kartikey Mahendra Bhatt <kartik_me@hotmail.com>. * * This program is free software; you can redistribute it and/or modify it * under the terms of GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include <asm/byteorder.h> #include <linux/init.h> #include <linux/crypto.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #define CAST5_BLOCK_SIZE 8 #define CAST5_MIN_KEY_SIZE 5 #define CAST5_MAX_KEY_SIZE 16 struct cast5_ctx { u32 Km[16]; u8 Kr[16]; int rr; /* rr?number of rounds = 16:number of rounds = 12; (rfc 2144) */ }; static const u32 s1[256] = { 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf }; static const u32 s2[256] = { 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1 }; static const u32 s3[256] = { 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783 }; static const u32 s4[256] = { 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2 }; static const u32 s5[256] = { 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4 }; static const u32 s6[256] = { 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f }; static const u32 s7[256] = { 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3 }; static const u32 sb8[256] = { 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e }; #define F1(D, m, r) ((I = ((m) + (D))), (I = rol32(I, (r))), \ (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff])) #define F2(D, m, r) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \ (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff])) #define F3(D, m, r) ((I = ((m) - (D))), (I = rol32(I, (r))), \ (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff])) static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { struct cast5_ctx *c = crypto_tfm_ctx(tfm); const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 l, r, t; u32 I; /* used by the Fx macros */ u32 *Km; u8 *Kr; Km = c->Km; Kr = c->Kr; /* (L0,R0) <-- (m1...m64). (Split the plaintext into left and * right 32-bit halves L0 = m1...m32 and R0 = m33...m64.) */ l = be32_to_cpu(src[0]); r = be32_to_cpu(src[1]); /* (16 rounds) for i from 1 to 16, compute Li and Ri as follows: * Li = Ri-1; * Ri = Li-1 ^ f(Ri-1,Kmi,Kri), where f is defined in Section 2.2 * Rounds 1, 4, 7, 10, 13, and 16 use f function Type 1. * Rounds 2, 5, 8, 11, and 14 use f function Type 2. * Rounds 3, 6, 9, 12, and 15 use f function Type 3. */ t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); if (!(c->rr)) { t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); } /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and * concatenate to form the ciphertext.) */ dst[0] = cpu_to_be32(r); dst[1] = cpu_to_be32(l); } static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { struct cast5_ctx *c = crypto_tfm_ctx(tfm); const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 l, r, t; u32 I; u32 *Km; u8 *Kr; Km = c->Km; Kr = c->Kr; l = be32_to_cpu(src[0]); r = be32_to_cpu(src[1]); if (!(c->rr)) { t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); } t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); dst[0] = cpu_to_be32(r); dst[1] = cpu_to_be32(l); } static void key_schedule(u32 *x, u32 *z, u32 *k) { #define xi(i) ((x[(i)/4] >> (8*(3-((i)%4)))) & 0xff) #define zi(i) ((z[(i)/4] >> (8*(3-((i)%4)))) & 0xff) z[0] = x[0] ^ s5[xi(13)] ^ s6[xi(15)] ^ s7[xi(12)] ^ sb8[xi(14)] ^ s7[xi(8)]; z[1] = x[2] ^ s5[zi(0)] ^ s6[zi(2)] ^ s7[zi(1)] ^ sb8[zi(3)] ^ sb8[xi(10)]; z[2] = x[3] ^ s5[zi(7)] ^ s6[zi(6)] ^ s7[zi(5)] ^ sb8[zi(4)] ^ s5[xi(9)]; z[3] = x[1] ^ s5[zi(10)] ^ s6[zi(9)] ^ s7[zi(11)] ^ sb8[zi(8)] ^ s6[xi(11)]; k[0] = s5[zi(8)] ^ s6[zi(9)] ^ s7[zi(7)] ^ sb8[zi(6)] ^ s5[zi(2)]; k[1] = s5[zi(10)] ^ s6[zi(11)] ^ s7[zi(5)] ^ sb8[zi(4)] ^ s6[zi(6)]; k[2] = s5[zi(12)] ^ s6[zi(13)] ^ s7[zi(3)] ^ sb8[zi(2)] ^ s7[zi(9)]; k[3] = s5[zi(14)] ^ s6[zi(15)] ^ s7[zi(1)] ^ sb8[zi(0)] ^ sb8[zi(12)]; x[0] = z[2] ^ s5[zi(5)] ^ s6[zi(7)] ^ s7[zi(4)] ^ sb8[zi(6)] ^ s7[zi(0)]; x[1] = z[0] ^ s5[xi(0)] ^ s6[xi(2)] ^ s7[xi(1)] ^ sb8[xi(3)] ^ sb8[zi(2)]; x[2] = z[1] ^ s5[xi(7)] ^ s6[xi(6)] ^ s7[xi(5)] ^ sb8[xi(4)] ^ s5[zi(1)]; x[3] = z[3] ^ s5[xi(10)] ^ s6[xi(9)] ^ s7[xi(11)] ^ sb8[xi(8)] ^ s6[zi(3)]; k[4] = s5[xi(3)] ^ s6[xi(2)] ^ s7[xi(12)] ^ sb8[xi(13)] ^ s5[xi(8)]; k[5] = s5[xi(1)] ^ s6[xi(0)] ^ s7[xi(14)] ^ sb8[xi(15)] ^ s6[xi(13)]; k[6] = s5[xi(7)] ^ s6[xi(6)] ^ s7[xi(8)] ^ sb8[xi(9)] ^ s7[xi(3)]; k[7] = s5[xi(5)] ^ s6[xi(4)] ^ s7[xi(10)] ^ sb8[xi(11)] ^ sb8[xi(7)]; z[0] = x[0] ^ s5[xi(13)] ^ s6[xi(15)] ^ s7[xi(12)] ^ sb8[xi(14)] ^ s7[xi(8)]; z[1] = x[2] ^ s5[zi(0)] ^ s6[zi(2)] ^ s7[zi(1)] ^ sb8[zi(3)] ^ sb8[xi(10)]; z[2] = x[3] ^ s5[zi(7)] ^ s6[zi(6)] ^ s7[zi(5)] ^ sb8[zi(4)] ^ s5[xi(9)]; z[3] = x[1] ^ s5[zi(10)] ^ s6[zi(9)] ^ s7[zi(11)] ^ sb8[zi(8)] ^ s6[xi(11)]; k[8] = s5[zi(3)] ^ s6[zi(2)] ^ s7[zi(12)] ^ sb8[zi(13)] ^ s5[zi(9)]; k[9] = s5[zi(1)] ^ s6[zi(0)] ^ s7[zi(14)] ^ sb8[zi(15)] ^ s6[zi(12)]; k[10] = s5[zi(7)] ^ s6[zi(6)] ^ s7[zi(8)] ^ sb8[zi(9)] ^ s7[zi(2)]; k[11] = s5[zi(5)] ^ s6[zi(4)] ^ s7[zi(10)] ^ sb8[zi(11)] ^ sb8[zi(6)]; x[0] = z[2] ^ s5[zi(5)] ^ s6[zi(7)] ^ s7[zi(4)] ^ sb8[zi(6)] ^ s7[zi(0)]; x[1] = z[0] ^ s5[xi(0)] ^ s6[xi(2)] ^ s7[xi(1)] ^ sb8[xi(3)] ^ sb8[zi(2)]; x[2] = z[1] ^ s5[xi(7)] ^ s6[xi(6)] ^ s7[xi(5)] ^ sb8[xi(4)] ^ s5[zi(1)]; x[3] = z[3] ^ s5[xi(10)] ^ s6[xi(9)] ^ s7[xi(11)] ^ sb8[xi(8)] ^ s6[zi(3)]; k[12] = s5[xi(8)] ^ s6[xi(9)] ^ s7[xi(7)] ^ sb8[xi(6)] ^ s5[xi(3)]; k[13] = s5[xi(10)] ^ s6[xi(11)] ^ s7[xi(5)] ^ sb8[xi(4)] ^ s6[xi(7)]; k[14] = s5[xi(12)] ^ s6[xi(13)] ^ s7[xi(3)] ^ sb8[xi(2)] ^ s7[xi(8)]; k[15] = s5[xi(14)] ^ s6[xi(15)] ^ s7[xi(1)] ^ sb8[xi(0)] ^ sb8[xi(13)]; #undef xi #undef zi } static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned key_len) { struct cast5_ctx *c = crypto_tfm_ctx(tfm); int i; u32 x[4]; u32 z[4]; u32 k[16]; __be32 p_key[4]; c->rr = key_len <= 10 ? 1 : 0; memset(p_key, 0, 16); memcpy(p_key, key, key_len); x[0] = be32_to_cpu(p_key[0]); x[1] = be32_to_cpu(p_key[1]); x[2] = be32_to_cpu(p_key[2]); x[3] = be32_to_cpu(p_key[3]); key_schedule(x, z, k); for (i = 0; i < 16; i++) c->Km[i] = k[i]; key_schedule(x, z, k); for (i = 0; i < 16; i++) c->Kr[i] = k[i] & 0x1f; return 0; } static struct crypto_alg alg = { .cra_name = "cast5", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAST5_BLOCK_SIZE, .cra_ctxsize = sizeof(struct cast5_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = CAST5_MIN_KEY_SIZE, .cia_max_keysize = CAST5_MAX_KEY_SIZE, .cia_setkey = cast5_setkey, .cia_encrypt = cast5_encrypt, .cia_decrypt = cast5_decrypt } } }; static int __init cast5_mod_init(void) { return crypto_register_alg(&alg); } static void __exit cast5_mod_fini(void) { crypto_unregister_alg(&alg); } module_init(cast5_mod_init); module_exit(cast5_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
gpl-2.0
GeyerA/android_kernel_samsung_tuna
drivers/ide/opti621.c
9218
4592
/* * Copyright (C) 1996-1998 Linus Torvalds & authors (see below) */ /* * Authors: * Jaromir Koutek <miri@punknet.cz>, * Jan Harkes <jaharkes@cwi.nl>, * Mark Lord <mlord@pobox.com> * Some parts of code are from ali14xx.c and from rz1000.c. */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/ide.h> #include <asm/io.h> #define DRV_NAME "opti621" #define READ_REG 0 /* index of Read cycle timing register */ #define WRITE_REG 1 /* index of Write cycle timing register */ #define CNTRL_REG 3 /* index of Control register */ #define STRAP_REG 5 /* index of Strap register */ #define MISC_REG 6 /* index of Miscellaneous register */ static int reg_base; static DEFINE_SPINLOCK(opti621_lock); /* Write value to register reg, base of register * is at reg_base (0x1f0 primary, 0x170 secondary, * if not changed by PCI configuration). * This is from setupvic.exe program. */ static void write_reg(u8 value, int reg) { inw(reg_base + 1); inw(reg_base + 1); outb(3, reg_base + 2); outb(value, reg_base + reg); outb(0x83, reg_base + 2); } /* Read value from register reg, base of register * is at reg_base (0x1f0 primary, 0x170 secondary, * if not changed by PCI configuration). * This is from setupvic.exe program. */ static u8 read_reg(int reg) { u8 ret = 0; inw(reg_base + 1); inw(reg_base + 1); outb(3, reg_base + 2); ret = inb(reg_base + reg); outb(0x83, reg_base + 2); return ret; } static void opti621_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { ide_drive_t *pair = ide_get_pair_dev(drive); unsigned long flags; unsigned long mode = drive->pio_mode, pair_mode; const u8 pio = mode - XFER_PIO_0; u8 tim, misc, addr_pio = pio, clk; /* DRDY is default 2 (by OPTi Databook) */ static const u8 addr_timings[2][5] = { { 0x20, 0x10, 0x00, 0x00, 0x00 }, /* 33 MHz */ { 0x10, 0x10, 0x00, 0x00, 0x00 }, /* 25 MHz */ }; static const u8 data_rec_timings[2][5] = { { 0x5b, 0x45, 0x32, 0x21, 0x20 }, /* 33 MHz */ { 0x48, 0x34, 0x21, 0x10, 0x10 } /* 25 MHz */ }; ide_set_drivedata(drive, (void *)mode); if (pair) { pair_mode = (unsigned long)ide_get_drivedata(pair); if (pair_mode && pair_mode < mode) addr_pio = pair_mode - XFER_PIO_0; } spin_lock_irqsave(&opti621_lock, flags); reg_base = hwif->io_ports.data_addr; /* allow Register-B */ outb(0xc0, reg_base + CNTRL_REG); /* hmm, setupvic.exe does this ;-) */ outb(0xff, reg_base + 5); /* if reads 0xff, adapter not exist? */ (void)inb(reg_base + CNTRL_REG); /* if reads 0xc0, no interface exist? */ read_reg(CNTRL_REG); /* check CLK speed */ clk = read_reg(STRAP_REG) & 1; printk(KERN_INFO "%s: CLK = %d MHz\n", hwif->name, clk ? 25 : 33); tim = data_rec_timings[clk][pio]; misc = addr_timings[clk][addr_pio]; /* select Index-0/1 for Register-A/B */ write_reg(drive->dn & 1, MISC_REG); /* set read cycle timings */ write_reg(tim, READ_REG); /* set write cycle timings */ write_reg(tim, WRITE_REG); /* use Register-A for drive 0 */ /* use Register-B for drive 1 */ write_reg(0x85, CNTRL_REG); /* set address setup, DRDY timings, */ /* and read prefetch for both drives */ write_reg(misc, MISC_REG); spin_unlock_irqrestore(&opti621_lock, flags); } static const struct ide_port_ops opti621_port_ops = { .set_pio_mode = opti621_set_pio_mode, }; static const struct ide_port_info opti621_chipset __devinitdata = { .name = DRV_NAME, .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} }, .port_ops = &opti621_port_ops, .host_flags = IDE_HFLAG_NO_DMA, .pio_mask = ATA_PIO4, }; static int __devinit opti621_init_one(struct pci_dev *dev, const struct pci_device_id *id) { return ide_pci_init_one(dev, &opti621_chipset, NULL); } static const struct pci_device_id opti621_pci_tbl[] = { { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C621), 0 }, { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C825), 0 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, opti621_pci_tbl); static struct pci_driver opti621_pci_driver = { .name = "Opti621_IDE", .id_table = opti621_pci_tbl, .probe = opti621_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init opti621_ide_init(void) { return ide_pci_register_driver(&opti621_pci_driver); } static void __exit opti621_ide_exit(void) { pci_unregister_driver(&opti621_pci_driver); } module_init(opti621_ide_init); module_exit(opti621_ide_exit); MODULE_AUTHOR("Jaromir Koutek, Jan Harkes, Mark Lord"); MODULE_DESCRIPTION("PCI driver module for Opti621 IDE"); MODULE_LICENSE("GPL");
gpl-2.0
StarKissed/starkissed-kernel-ardbeg
arch/cris/arch-v10/lib/memset.c
27906
7459
/* A memset for CRIS. Copyright (C) 1999-2005 Axis Communications. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Neither the name of Axis Communications nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY AXIS COMMUNICATIONS AND ITS CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AXIS COMMUNICATIONS OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* FIXME: This file should really only be used for reference, as the result is somewhat depending on gcc generating what we expect rather than what we describe. An assembly file should be used instead. */ /* Note the multiple occurrence of the expression "12*4", including the asm. It is hard to get it into the asm in a good way. Thus better to expose the problem everywhere: no macro. */ /* Assuming one cycle per dword written or read (ok, not really true; the world is not ideal), and one cycle per instruction, then 43+3*(n/48-1) <= 24+24*(n/48-1) so n >= 45.7; n >= 0.9; we win on the first full 48-byte block to set. */ #define MEMSET_BY_BLOCK_THRESHOLD (1 * 48) /* No name ambiguities in this file. */ __asm__ (".syntax no_register_prefix"); void *memset(void *pdst, int c, unsigned int plen) { /* Now we want the parameters in special registers. Make sure the compiler does something usable with this. */ register char *return_dst __asm__ ("r10") = pdst; register int n __asm__ ("r12") = plen; register int lc __asm__ ("r11") = c; /* Most apps use memset sanely. Memsetting about 3..4 bytes or less get penalized here compared to the generic implementation. */ /* This is fragile performancewise at best. Check with newer GCC releases, if they compile cascaded "x |= x << 8" to sane code. */ __asm__("movu.b %0,r13 \n\ lslq 8,r13 \n\ move.b %0,r13 \n\ move.d r13,%0 \n\ lslq 16,r13 \n\ or.d r13,%0" : "=r" (lc) /* Inputs. */ : "0" (lc) /* Outputs. */ : "r13"); /* Trash. */ { register char *dst __asm__ ("r13") = pdst; if (((unsigned long) pdst & 3) != 0 /* Oops! n = 0 must be a valid call, regardless of alignment. */ && n >= 3) { if ((unsigned long) dst & 1) { *dst = (char) lc; n--; dst++; } if ((unsigned long) dst & 2) { *(short *) dst = lc; n -= 2; dst += 2; } } /* Decide which setting method to use. */ if (n >= MEMSET_BY_BLOCK_THRESHOLD) { /* It is not optimal to tell the compiler about clobbering any registers; that will move the saving/restoring of those registers to the function prologue/epilogue, and make non-block sizes suboptimal. */ __asm__ volatile ("\ ;; GCC does promise correct register allocations, but let's \n\ ;; make sure it keeps its promises. \n\ .ifnc %0-%1-%4,$r13-$r12-$r11 \n\ .error \"GCC reg alloc bug: %0-%1-%4 != $r13-$r12-$r11\" \n\ .endif \n\ \n\ ;; Save the registers we'll clobber in the movem process \n\ ;; on the stack. Don't mention them to gcc, it will only be \n\ ;; upset. \n\ subq 11*4,sp \n\ movem r10,[sp] \n\ \n\ move.d r11,r0 \n\ move.d r11,r1 \n\ move.d r11,r2 \n\ move.d r11,r3 \n\ move.d r11,r4 \n\ move.d r11,r5 \n\ move.d r11,r6 \n\ move.d r11,r7 \n\ move.d r11,r8 \n\ move.d r11,r9 \n\ move.d r11,r10 \n\ \n\ ;; Now we've got this: \n\ ;; r13 - dst \n\ ;; r12 - n \n\ \n\ ;; Update n for the first loop \n\ subq 12*4,r12 \n\ 0: \n\ " #ifdef __arch_common_v10_v32 /* Cater to branch offset difference between v32 and v10. We assume the branch below has an 8-bit offset. */ " setf\n" #endif " subq 12*4,r12 \n\ bge 0b \n\ movem r11,[r13+] \n\ \n\ ;; Compensate for last loop underflowing n. \n\ addq 12*4,r12 \n\ \n\ ;; Restore registers from stack. \n\ movem [sp+],r10" /* Outputs. */ : "=r" (dst), "=r" (n) /* Inputs. */ : "0" (dst), "1" (n), "r" (lc)); } /* An ad-hoc unroll, used for 4*12-1..16 bytes. */ while (n >= 16) { *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; n -= 16; } switch (n) { case 0: break; case 1: *dst = (char) lc; break; case 2: *(short *) dst = (short) lc; break; case 3: *(short *) dst = (short) lc; dst += 2; *dst = (char) lc; break; case 4: *(long *) dst = lc; break; case 5: *(long *) dst = lc; dst += 4; *dst = (char) lc; break; case 6: *(long *) dst = lc; dst += 4; *(short *) dst = (short) lc; break; case 7: *(long *) dst = lc; dst += 4; *(short *) dst = (short) lc; dst += 2; *dst = (char) lc; break; case 8: *(long *) dst = lc; dst += 4; *(long *) dst = lc; break; case 9: *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *dst = (char) lc; break; case 10: *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(short *) dst = (short) lc; break; case 11: *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(short *) dst = (short) lc; dst += 2; *dst = (char) lc; break; case 12: *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(long *) dst = lc; break; case 13: *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *dst = (char) lc; break; case 14: *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(short *) dst = (short) lc; break; case 15: *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(short *) dst = (short) lc; dst += 2; *dst = (char) lc; break; } } return return_dst; }
gpl-2.0
teamfx/openjfx-8u-dev-rt
modules/web/src/main/native/Source/WebCore/html/forms/FileIconLoader.cpp
3
2005
/* * Copyright (C) 2007-2017 Apple Inc. All rights reserved. * Copyright (C) 2011 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include "FileIconLoader.h" #include "Icon.h" namespace WebCore { void FileIconLoader::invalidate() { ASSERT(m_client); m_client = nullptr; } void FileIconLoader::iconLoaded(RefPtr<Icon>&& icon) { if (m_client) m_client->iconLoaded(WTFMove(icon)); } FileIconLoader::FileIconLoader(FileIconLoaderClient& client) : m_client(&client) { } }
gpl-2.0
Arkapravo/Player-3.0.2
server/drivers/planner/wavefront/plan_control.c
3
7533
/* * Player - One Hell of a Robot Server * Copyright (C) 2003 * Andrew Howard * Brian Gerkey * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <assert.h> #include "plan.h" #if !defined (M_PI) #include <libplayercommon/playercommon.h> #endif static double _plan_check_path(plan_t* plan, plan_cell_t* s, plan_cell_t* g); static double _angle_diff(double a, double b); int plan_check_done(plan_t* plan, double lx, double ly, double la, double gx, double gy, double ga, double goal_d, double goal_a) { double dt, da; dt = sqrt((gx-lx)*(gx-lx) + (gy-ly)*(gy-ly)); da = fabs(_angle_diff(ga,la)); if((dt < goal_d) && (da < goal_a)) return(1); else return(0); } int plan_compute_diffdrive_cmds(plan_t* plan, double* vx, double *va, int* rotate_dir, double lx, double ly, double la, double gx, double gy, double ga, double goal_d, double goal_a, double maxd, double dweight, double tvmin, double tvmax, double avmin, double avmax, double amin, double amax) { double cx, cy; double d,b,a,ad; //puts("*******plan_compute_diffdrive_cmds************"); // Are we at the goal? if(plan_check_done(plan,lx,ly,la,gx,gy,ga,goal_d,goal_a)) { *vx = 0.0; *va = 0.0; return(0); } // Are we on top of the goal? d = sqrt((gx-lx)*(gx-lx)+(gy-ly)*(gy-ly)); //printf("d: %.3f\n", d); if(d < goal_d) { ad = _angle_diff(ga,la); if(!*rotate_dir) { if(ad < 0) *rotate_dir = -1; else *rotate_dir = 1; } *vx = 0.0; *va = *rotate_dir * (avmin + (fabs(ad)/M_PI) * (avmax-avmin)); //printf("on top; vx:%.3f va: %.3f\n", *vx, *va); return(0); } *rotate_dir = 0; // We're away from the goal; compute velocities if(plan_get_carrot(plan, &cx, &cy, lx, ly, maxd, dweight) < 0.0) { //puts("no carrot"); return(-1); } d = sqrt((lx-cx)*(lx-cx) + (ly-cy)*(ly-cy)); b = atan2(cy - ly, cx - lx); a = amin + (d / maxd) * (amax-amin); //printf("a: %.3f\n", a*180.0/M_PI); ad = _angle_diff(b,la); //printf("ad: %.3f\n", ad*180.0/M_PI); if(fabs(ad) > a) *vx = 0.0; else *vx = tvmin + (d / maxd) * (tvmax-tvmin); *va = avmin + (fabs(ad)/M_PI) * (avmax-avmin); if(ad < 0) *va = -*va; //printf("away; vx:%.3f va: %.3f\n", *vx, *va); return(0); } double plan_get_carrot(plan_t* plan, double* px, double* py, double lx, double ly, double maxdist, double distweight) { plan_cell_t* cell, *ncell; int li, lj; double dist, d; double cost, bestcost; char old_occ_state; float old_occ_dist; li = PLAN_GXWX(plan, lx); lj = PLAN_GYWY(plan, ly); cell = plan->cells + PLAN_INDEX(plan,li,lj); // Latch and clear the obstacle state for the cell I'm in cell = plan->cells + PLAN_INDEX(plan, li, lj); old_occ_state = cell->occ_state_dyn; old_occ_dist = cell->occ_dist_dyn; cell->occ_state_dyn = -1; cell->occ_dist_dyn = (float) (plan->max_radius); // Step back from maxdist, looking for the best carrot bestcost = -1.0; for(dist = maxdist; dist >= plan->scale; dist -= plan->scale) { // Find a point the required distance ahead, following the cost gradient d=plan->scale; for(ncell = cell; (ncell->plan_next && (d < dist)); ncell = ncell->plan_next, d+=plan->scale); // Check whether the straight-line path is clear if((cost = _plan_check_path(plan, cell, ncell)) < 0.0) { //printf("no path from (%d,%d) to (%d,%d)\n", //cell->ci, cell->cj, ncell->ci, ncell->cj); continue; } // Weight distance cost += distweight * (1.0/(dist*dist)); if((bestcost < 0.0) || (cost < bestcost)) { bestcost = cost; *px = PLAN_WXGX(plan,ncell->ci); *py = PLAN_WYGY(plan,ncell->cj); } } // Restore the obstacle state for the cell I'm in cell = plan->cells + PLAN_INDEX(plan, li, lj); cell->occ_state_dyn = old_occ_state; cell->occ_dist_dyn = old_occ_dist; return(bestcost); } static double _plan_check_path(plan_t* plan, plan_cell_t* s, plan_cell_t* g) { // Bresenham raytracing int x0,x1,y0,y1; int x,y; int xstep, ystep; char steep; int tmp; int deltax, deltay, error, deltaerr; int obscost=0; x0 = s->ci; y0 = s->cj; x1 = g->ci; y1 = g->cj; if(abs(y1-y0) > abs(x1-x0)) steep = 1; else steep = 0; if(steep) { tmp = x0; x0 = y0; y0 = tmp; tmp = x1; x1 = y1; y1 = tmp; } deltax = abs(x1-x0); deltay = abs(y1-y0); error = 0; deltaerr = deltay; x = x0; y = y0; if(x0 < x1) xstep = 1; else xstep = -1; if(y0 < y1) ystep = 1; else ystep = -1; if(steep) { if(plan->cells[PLAN_INDEX(plan,y,x)].occ_dist_dyn < plan->abs_min_radius) return -1; else if(plan->cells[PLAN_INDEX(plan,y,x)].occ_dist_dyn < plan->max_radius) obscost += (int) (plan->dist_penalty * (plan->max_radius - plan->cells[PLAN_INDEX(plan,y,x)].occ_dist_dyn)); } else { if(plan->cells[PLAN_INDEX(plan,x,y)].occ_dist_dyn < plan->abs_min_radius) return -1; else if(plan->cells[PLAN_INDEX(plan,x,y)].occ_dist_dyn < plan->max_radius) obscost += (int) (plan->dist_penalty * (plan->max_radius - plan->cells[PLAN_INDEX(plan,x,y)].occ_dist_dyn)); } while(x != (x1 + xstep * 1)) { x += xstep; error += deltaerr; if(2*error >= deltax) { y += ystep; error -= deltax; } if(steep) { if(plan->cells[PLAN_INDEX(plan,y,x)].occ_dist_dyn < plan->abs_min_radius) return -1; else if(plan->cells[PLAN_INDEX(plan,y,x)].occ_dist_dyn < plan->max_radius) obscost += (int) (plan->dist_penalty * (plan->max_radius - plan->cells[PLAN_INDEX(plan,y,x)].occ_dist_dyn)); } else { if(plan->cells[PLAN_INDEX(plan,x,y)].occ_dist_dyn < plan->abs_min_radius) return -1; else if(plan->cells[PLAN_INDEX(plan,x,y)].occ_dist_dyn < plan->max_radius) obscost += (int) (plan->dist_penalty * (plan->max_radius - plan->cells[PLAN_INDEX(plan,x,y)].occ_dist_dyn)); } } return(obscost); } #define ANG_NORM(a) atan2(sin((a)),cos((a))) static double _angle_diff(double a, double b) { double d1, d2; a = ANG_NORM(a); b = ANG_NORM(b); d1 = a-b; d2 = 2*M_PI - fabs(d1); if(d1 > 0) d2 *= -1.0; if(fabs(d1) < fabs(d2)) return(d1); else return(d2); }
gpl-2.0
xl0/openinkpot-u-boot-n516
cpu/arm_cortexa8/omap3/mem.c
3
8349
/* * (C) Copyright 2008 * Texas Instruments, <www.ti.com> * * Author : * Manikandan Pillai <mani.pillai@ti.com> * * Initial Code from: * Richard Woodruff <r-woodruff2@ti.com> * Syed Mohammed Khasim <khasim@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <asm/io.h> #include <asm/arch/mem.h> #include <asm/arch/sys_proto.h> #include <command.h> /* * Only One NAND allowed on board at a time. * The GPMC CS Base for the same */ unsigned int boot_flash_base; unsigned int boot_flash_off; unsigned int boot_flash_sec; unsigned int boot_flash_type; volatile unsigned int boot_flash_env_addr; #if defined(CONFIG_CMD_NAND) static u32 gpmc_m_nand[GPMC_MAX_REG] = { M_NAND_GPMC_CONFIG1, M_NAND_GPMC_CONFIG2, M_NAND_GPMC_CONFIG3, M_NAND_GPMC_CONFIG4, M_NAND_GPMC_CONFIG5, M_NAND_GPMC_CONFIG6, 0 }; gpmc_csx_t *nand_cs_base; gpmc_t *gpmc_cfg_base; #if defined(CONFIG_ENV_IS_IN_NAND) #define GPMC_CS 0 #else #define GPMC_CS 1 #endif #endif #if defined(CONFIG_CMD_ONENAND) static u32 gpmc_onenand[GPMC_MAX_REG] = { ONENAND_GPMC_CONFIG1, ONENAND_GPMC_CONFIG2, ONENAND_GPMC_CONFIG3, ONENAND_GPMC_CONFIG4, ONENAND_GPMC_CONFIG5, ONENAND_GPMC_CONFIG6, 0 }; gpmc_csx_t *onenand_cs_base; #if defined(CONFIG_ENV_IS_IN_ONENAND) #define GPMC_CS 0 #else #define GPMC_CS 1 #endif #endif static sdrc_t *sdrc_base = (sdrc_t *)OMAP34XX_SDRC_BASE; /************************************************************************** * make_cs1_contiguous() - for es2 and above remap cs1 behind cs0 to allow * command line mem=xyz use all memory with out discontinuous support * compiled in. Could do it at the ATAG, but there really is two banks... * Called as part of 2nd phase DDR init. **************************************************************************/ void make_cs1_contiguous(void) { u32 size, a_add_low, a_add_high; size = get_sdr_cs_size(CS0); size /= SZ_32M; /* find size to offset CS1 */ a_add_high = (size & 3) << 8; /* set up low field */ a_add_low = (size & 0x3C) >> 2; /* set up high field */ writel((a_add_high | a_add_low), &sdrc_base->cs_cfg); } /******************************************************** * mem_ok() - test used to see if timings are correct * for a part. Helps in guessing which part * we are currently using. *******************************************************/ u32 mem_ok(u32 cs) { u32 val1, val2, addr; u32 pattern = 0x12345678; addr = OMAP34XX_SDRC_CS0 + get_sdr_cs_offset(cs); writel(0x0, addr + 0x400); /* clear pos A */ writel(pattern, addr); /* pattern to pos B */ writel(0x0, addr + 4); /* remove pattern off the bus */ val1 = readl(addr + 0x400); /* get pos A value */ val2 = readl(addr); /* get val2 */ if ((val1 != 0) || (val2 != pattern)) /* see if pos A val changed */ return 0; else return 1; } /******************************************************** * sdrc_init() - init the sdrc chip selects CS0 and CS1 * - early init routines, called from flash or * SRAM. *******************************************************/ void sdrc_init(void) { /* only init up first bank here */ do_sdrc_init(CS0, EARLY_INIT); } /************************************************************************* * do_sdrc_init(): initialize the SDRAM for use. * -code sets up SDRAM basic SDRC timings for CS0 * -optimal settings can be placed here, or redone after i2c * inspection of board info * * - code called once in C-Stack only context for CS0 and a possible 2nd * time depending on memory configuration from stack+global context **************************************************************************/ void do_sdrc_init(u32 cs, u32 early) { sdrc_actim_t *sdrc_actim_base; if(cs) sdrc_actim_base = (sdrc_actim_t *)SDRC_ACTIM_CTRL1_BASE; else sdrc_actim_base = (sdrc_actim_t *)SDRC_ACTIM_CTRL0_BASE; if (early) { /* reset sdrc controller */ writel(SOFTRESET, &sdrc_base->sysconfig); wait_on_value(RESETDONE, RESETDONE, &sdrc_base->status, 12000000); writel(0, &sdrc_base->sysconfig); /* setup sdrc to ball mux */ writel(SDP_SDRC_SHARING, &sdrc_base->sharing); /* Disable Power Down of CKE cuz of 1 CKE on combo part */ writel(SRFRONRESET | PAGEPOLICY_HIGH, &sdrc_base->power); writel(ENADLL | DLLPHASE_90, &sdrc_base->dlla_ctrl); sdelay(0x20000); } writel(RASWIDTH_13BITS | CASWIDTH_10BITS | ADDRMUXLEGACY | RAMSIZE_128 | BANKALLOCATION | B32NOT16 | B32NOT16 | DEEPPD | DDR_SDRAM, &sdrc_base->cs[cs].mcfg); writel(ARCV | ARE_ARCV_1, &sdrc_base->cs[cs].rfr_ctrl); writel(V_ACTIMA_165, &sdrc_actim_base->ctrla); writel(V_ACTIMB_165, &sdrc_actim_base->ctrlb); writel(CMD_NOP, &sdrc_base ->cs[cs].manual); writel(CMD_PRECHARGE, &sdrc_base->cs[cs].manual); writel(CMD_AUTOREFRESH, &sdrc_base->cs[cs].manual); writel(CMD_AUTOREFRESH, &sdrc_base->cs[cs].manual); /* * CAS latency 3, Write Burst = Read Burst, Serial Mode, * Burst length = 4 */ writel(CASL3 | BURSTLENGTH4, &sdrc_base->cs[cs].mr); if (!mem_ok(cs)) writel(0, &sdrc_base->cs[cs].mcfg); } void enable_gpmc_config(u32 *gpmc_config, gpmc_csx_t *gpmc_cs_base, u32 base, u32 size) { writel(0, &gpmc_cs_base->config7); sdelay(1000); /* Delay for settling */ writel(gpmc_config[0], &gpmc_cs_base->config1); writel(gpmc_config[1], &gpmc_cs_base->config2); writel(gpmc_config[2], &gpmc_cs_base->config3); writel(gpmc_config[3], &gpmc_cs_base->config4); writel(gpmc_config[4], &gpmc_cs_base->config5); writel(gpmc_config[5], &gpmc_cs_base->config6); /* Enable the config */ writel((((size & 0xF) << 8) | ((base >> 24) & 0x3F) | (1 << 6)), &gpmc_cs_base->config7); sdelay(2000); } /***************************************************** * gpmc_init(): init gpmc bus * Init GPMC for x16, MuxMode (SDRAM in x32). * This code can only be executed from SRAM or SDRAM. *****************************************************/ void gpmc_init(void) { /* putting a blanket check on GPMC based on ZeBu for now */ u32 *gpmc_config = NULL; gpmc_t *gpmc_base = (gpmc_t *)GPMC_BASE; gpmc_csx_t *gpmc_cs_base = (gpmc_csx_t *)GPMC_CONFIG_CS0_BASE; u32 base = 0; u32 size = 0; u32 f_off = CONFIG_SYS_MONITOR_LEN; u32 f_sec = 0; u32 config = 0; /* global settings */ writel(0, &gpmc_base->irqenable); /* isr's sources masked */ writel(0, &gpmc_base->timeout_control);/* timeout disable */ config = readl(&gpmc_base->config); config &= (~0xf00); writel(config, &gpmc_base->config); /* * Disable the GPMC0 config set by ROM code * It conflicts with our MPDB (both at 0x08000000) */ writel(0, &gpmc_cs_base->config7); sdelay(1000); #if defined(CONFIG_CMD_NAND) /* CS 0 */ gpmc_config = gpmc_m_nand; gpmc_cfg_base = gpmc_base; nand_cs_base = (gpmc_csx_t *)(GPMC_CONFIG_CS0_BASE + (GPMC_CS * GPMC_CONFIG_WIDTH)); base = PISMO1_NAND_BASE; size = PISMO1_NAND_SIZE; enable_gpmc_config(gpmc_config, nand_cs_base, base, size); #if defined(CONFIG_ENV_IS_IN_NAND) f_off = SMNAND_ENV_OFFSET; f_sec = SZ_128K; /* env setup */ boot_flash_base = base; boot_flash_off = f_off; boot_flash_sec = f_sec; boot_flash_env_addr = f_off; #endif #endif #if defined(CONFIG_CMD_ONENAND) gpmc_config = gpmc_onenand; onenand_cs_base = (gpmc_csx_t *)(GPMC_CONFIG_CS0_BASE + (GPMC_CS * GPMC_CONFIG_WIDTH)); base = PISMO1_ONEN_BASE; size = PISMO1_ONEN_SIZE; enable_gpmc_config(gpmc_config, onenand_cs_base, base, size); #if defined(CONFIG_ENV_IS_IN_ONENAND) f_off = ONENAND_ENV_OFFSET; f_sec = SZ_128K; /* env setup */ boot_flash_base = base; boot_flash_off = f_off; boot_flash_sec = f_sec; boot_flash_env_addr = f_off; #endif #endif }
gpl-2.0
gdarko/CProgrammingExercises
auditoriski/aud_4/zad_30.c
3
1183
/******************************************************************************/ /* Да се напише програма коjа од непознат броj на цели броеви кои се */ /* внесуваат од тастатура ќе ги определи позициите (редните броеви на */ /* внесување) на двата последователни броеви кои jа имаат наjголемата сума. */ /* Програмата завршува ако едно по друго (последователно) се внесат два */ /* негативни цели броjа. */ /* */ /* Пример влез: */ /* */ /* Пример излез: */ /******************************************************************************/ int main() { return 0; }
gpl-2.0
jaredjones/TrinityCore
src/tools/map_extractor/System.cpp
3
38852
/* * Copyright (C) 2008-2015 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2005-2011 MaNGOS <http://getmangos.com/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #define _CRT_SECURE_NO_DEPRECATE #include <cstdio> #include <deque> #include <list> #include <set> #include <cstdlib> #include <cstring> #ifdef _WIN32 #include "direct.h" #else #include <sys/stat.h> #include <unistd.h> #define ERROR_PATH_NOT_FOUND ERROR_FILE_NOT_FOUND #endif #include <boost/filesystem/path.hpp> #include <boost/filesystem/operations.hpp> #include "DBFilesClientList.h" #include "CascLib.h" #include "dbcfile.h" #include "adt.h" #include "wdt.h" #include <fcntl.h> #if defined( __GNUC__ ) #define _open open #define _close close #ifndef O_BINARY #define O_BINARY 0 #endif #else #include <io.h> #endif #ifdef O_LARGEFILE #define OPEN_FLAGS (O_RDONLY | O_BINARY | O_LARGEFILE) #else #define OPEN_FLAGS (O_RDONLY | O_BINARY) #endif namespace { const char* HumanReadableCASCError(int error) { switch (error) { case ERROR_SUCCESS: return "SUCCESS"; case ERROR_FILE_CORRUPT: return "FILE_CORRUPT"; case ERROR_CAN_NOT_COMPLETE: return "CAN_NOT_COMPLETE"; case ERROR_HANDLE_EOF: return "HANDLE_EOF"; case ERROR_NO_MORE_FILES: return "NO_MORE_FILES"; case ERROR_BAD_FORMAT: return "BAD_FORMAT"; case ERROR_INSUFFICIENT_BUFFER: return "INSUFFICIENT_BUFFER"; case ERROR_ALREADY_EXISTS: return "ALREADY_EXISTS"; case ERROR_DISK_FULL: return "DISK_FULL"; case ERROR_INVALID_PARAMETER: return "INVALID_PARAMETER"; case ERROR_NOT_SUPPORTED: return "NOT_SUPPORTED"; case ERROR_NOT_ENOUGH_MEMORY: return "NOT_ENOUGH_MEMORY"; case ERROR_INVALID_HANDLE: return "INVALID_HANDLE"; case ERROR_ACCESS_DENIED: return "ACCESS_DENIED"; case ERROR_FILE_NOT_FOUND: return "FILE_NOT_FOUND"; default: return "UNKNOWN"; } } } HANDLE CascStorage = NULL; typedef struct { char name[64]; uint32 id; } map_id; map_id *map_ids; uint16 *areas; uint16 *LiqType; #define MAX_PATH_LENGTH 128 char output_path[MAX_PATH_LENGTH] = "."; char input_path[MAX_PATH_LENGTH] = "."; uint32 maxAreaId = 0; // ************************************************** // Extractor options // ************************************************** enum Extract { EXTRACT_MAP = 1, EXTRACT_DBC = 2 }; // Select data for extract int CONF_extract = EXTRACT_MAP | EXTRACT_DBC; // This option allow limit minimum height to some value (Allow save some memory) bool CONF_allow_height_limit = true; float CONF_use_minHeight = -500.0f; // This option allow use float to int conversion bool CONF_allow_float_to_int = true; float CONF_float_to_int8_limit = 2.0f; // Max accuracy = val/256 float CONF_float_to_int16_limit = 2048.0f; // Max accuracy = val/65536 float CONF_flat_height_delta_limit = 0.005f; // If max - min less this value - surface is flat float CONF_flat_liquid_delta_limit = 0.001f; // If max - min less this value - liquid surface is flat uint32 CONF_Locale = 0; #define LOCALES_COUNT 17 char const* Locales[LOCALES_COUNT] = { "none", "enUS", "koKR", "unknown", "frFR", "deDE", "zhCN", "esES", "zhTW", "enGB", "enCN", "enTW", "esMX", "ruRU", "ptBR", "itIT", "ptPT" }; void CreateDir(std::string const& path) { if (chdir(path.c_str()) == 0) { chdir("../"); return; } #ifdef _WIN32 _mkdir(path.c_str()); #else mkdir(path.c_str(), S_IRWXU | S_IRWXG | S_IRWXO); // 0777 #endif } bool FileExists(TCHAR const* fileName) { int fp = _open(fileName, OPEN_FLAGS); if(fp != -1) { _close(fp); return true; } return false; } void Usage(char const* prg) { printf( "Usage:\n"\ "%s -[var] [value]\n"\ "-i set input path (max %d characters)\n"\ "-o set output path (max %d characters)\n"\ "-e extract only MAP(1)/DBC(2) - standard: both(3)\n"\ "-f height stored as int (less map size but lost some accuracy) 1 by default\n"\ "Example: %s -f 0 -i \"c:\\games\\game\"\n", prg, MAX_PATH_LENGTH - 1, MAX_PATH_LENGTH - 1, prg); exit(1); } void HandleArgs(int argc, char* arg[]) { for (int c = 1; c < argc; ++c) { // i - input path // o - output path // e - extract only MAP(1)/DBC(2) - standard both(3) // f - use float to int conversion // h - limit minimum height // b - target client build if (arg[c][0] != '-') Usage(arg[0]); switch (arg[c][1]) { case 'i': if (c + 1 < argc && strlen(arg[c + 1]) < MAX_PATH_LENGTH) // all ok { strncpy(input_path, arg[c++ + 1], MAX_PATH_LENGTH); input_path[MAX_PATH_LENGTH - 1] = '\0'; } else Usage(arg[0]); break; case 'o': if (c + 1 < argc && strlen(arg[c + 1]) < MAX_PATH_LENGTH) // all ok { strncpy(output_path, arg[c++ + 1], MAX_PATH_LENGTH); output_path[MAX_PATH_LENGTH - 1] = '\0'; } else Usage(arg[0]); break; case 'f': if (c + 1 < argc) // all ok CONF_allow_float_to_int = atoi(arg[c++ + 1])!=0; else Usage(arg[0]); break; case 'e': if (c + 1 < argc) // all ok { CONF_extract = atoi(arg[c++ + 1]); if (!(CONF_extract > 0 && CONF_extract < 4)) Usage(arg[0]); } else Usage(arg[0]); break; case 'l': if (c + 1 < argc) // all ok { for (uint32 i = 0; i < LOCALES_COUNT; ++i) if (!strcmp(arg[c + 1], Locales[i])) CONF_Locale = 1 << i; ++c; } else Usage(arg[0]); break; case 'h': Usage(arg[0]); break; default: break; } } } uint32 ReadBuild(int locale) { // include build info file also std::string filename = std::string("component.wow-") + Locales[locale] + ".txt"; //printf("Read %s file... ", filename.c_str()); HANDLE dbcFile; if (!CascOpenFile(CascStorage, filename.c_str(), CASC_LOCALE_ALL, 0, &dbcFile)) { printf("Locale %s not installed.\n", Locales[locale]); return 0; } char buff[512]; DWORD readBytes = 0; CascReadFile(dbcFile, buff, 512, &readBytes); if (!readBytes) { printf("Fatal error: Not found %s file!\n", filename.c_str()); exit(1); } std::string text = std::string(buff, readBytes); CascCloseFile(dbcFile); size_t pos = text.find("version=\""); size_t pos1 = pos + strlen("version=\""); size_t pos2 = text.find("\"", pos1); if (pos == text.npos || pos2 == text.npos || pos1 >= pos2) { printf("Fatal error: Invalid %s file format!\n", filename.c_str()); exit(1); } std::string build_str = text.substr(pos1,pos2-pos1); int build = atoi(build_str.c_str()); if (build <= 0) { printf("Fatal error: Invalid %s file format!\n", filename.c_str()); exit(1); } return build; } uint32 ReadMapDBC() { printf("Read Map.dbc file... "); HANDLE dbcFile; if (!CascOpenFile(CascStorage, "DBFilesClient\\Map.dbc", CASC_LOCALE_NONE, 0, &dbcFile)) { printf("Fatal error: Cannot find Map.dbc in archive! %s\n", HumanReadableCASCError(GetLastError())); exit(1); } DBCFile dbc(dbcFile); if (!dbc.open()) { printf("Fatal error: Invalid Map.dbc file format!\n"); exit(1); } size_t map_count = dbc.getRecordCount(); map_ids = new map_id[map_count]; for(uint32 x = 0; x < map_count; ++x) { map_ids[x].id = dbc.getRecord(x).getUInt(0); const char* map_name = dbc.getRecord(x).getString(1); size_t max_map_name_length = sizeof(map_ids[x].name); if (strlen(map_name) >= max_map_name_length) { printf("Fatal error: Map name too long!\n"); exit(1); } strncpy(map_ids[x].name, map_name, max_map_name_length); map_ids[x].name[max_map_name_length - 1] = '\0'; } CascCloseFile(dbcFile); printf("Done! (%u maps loaded)\n", uint32(map_count)); return map_count; } void ReadAreaTableDBC() { printf("Read AreaTable.dbc file..."); HANDLE dbcFile; if (!CascOpenFile(CascStorage, "DBFilesClient\\AreaTable.dbc", CASC_LOCALE_NONE, 0, &dbcFile)) { printf("Fatal error: Cannot find AreaTable.dbc in archive! %s\n", HumanReadableCASCError(GetLastError())); exit(1); } DBCFile dbc(dbcFile); if(!dbc.open()) { printf("Fatal error: Invalid AreaTable.dbc file format!\n"); exit(1); } size_t area_count = dbc.getRecordCount(); maxAreaId = dbc.getMaxId(); areas = new uint16[maxAreaId + 1]; memset(areas, 0xFF, sizeof(uint16) * (maxAreaId + 1)); for (uint32 x = 0; x < area_count; ++x) areas[dbc.getRecord(x).getUInt(0)] = dbc.getRecord(x).getUInt(3); CascCloseFile(dbcFile); printf("Done! (%u areas loaded)\n", uint32(area_count)); } void ReadLiquidTypeTableDBC() { printf("Read LiquidType.dbc file..."); HANDLE dbcFile; if (!CascOpenFile(CascStorage, "DBFilesClient\\LiquidType.dbc", CASC_LOCALE_NONE, 0, &dbcFile)) { printf("Fatal error: Cannot find LiquidType.dbc in archive! %s\n", HumanReadableCASCError(GetLastError())); exit(1); } DBCFile dbc(dbcFile); if(!dbc.open()) { printf("Fatal error: Invalid LiquidType.dbc file format!\n"); exit(1); } size_t liqTypeCount = dbc.getRecordCount(); size_t liqTypeMaxId = dbc.getMaxId(); LiqType = new uint16[liqTypeMaxId + 1]; memset(LiqType, 0xff, (liqTypeMaxId + 1) * sizeof(uint16)); for(uint32 x = 0; x < liqTypeCount; ++x) LiqType[dbc.getRecord(x).getUInt(0)] = dbc.getRecord(x).getUInt(3); CascCloseFile(dbcFile); printf("Done! (%u LiqTypes loaded)\n", (uint32)liqTypeCount); } // // Adt file convertor function and data // // Map file format data static char const* MAP_MAGIC = "MAPS"; static char const* MAP_VERSION_MAGIC = "v1.4"; static char const* MAP_AREA_MAGIC = "AREA"; static char const* MAP_HEIGHT_MAGIC = "MHGT"; static char const* MAP_LIQUID_MAGIC = "MLIQ"; struct map_fileheader { uint32 mapMagic; uint32 versionMagic; uint32 buildMagic; uint32 areaMapOffset; uint32 areaMapSize; uint32 heightMapOffset; uint32 heightMapSize; uint32 liquidMapOffset; uint32 liquidMapSize; uint32 holesOffset; uint32 holesSize; }; #define MAP_AREA_NO_AREA 0x0001 struct map_areaHeader { uint32 fourcc; uint16 flags; uint16 gridArea; }; #define MAP_HEIGHT_NO_HEIGHT 0x0001 #define MAP_HEIGHT_AS_INT16 0x0002 #define MAP_HEIGHT_AS_INT8 0x0004 struct map_heightHeader { uint32 fourcc; uint32 flags; float gridHeight; float gridMaxHeight; }; #define MAP_LIQUID_TYPE_NO_WATER 0x00 #define MAP_LIQUID_TYPE_WATER 0x01 #define MAP_LIQUID_TYPE_OCEAN 0x02 #define MAP_LIQUID_TYPE_MAGMA 0x04 #define MAP_LIQUID_TYPE_SLIME 0x08 #define MAP_LIQUID_TYPE_DARK_WATER 0x10 #define MAP_LIQUID_TYPE_WMO_WATER 0x20 #define MAP_LIQUID_NO_TYPE 0x0001 #define MAP_LIQUID_NO_HEIGHT 0x0002 struct map_liquidHeader { uint32 fourcc; uint16 flags; uint16 liquidType; uint8 offsetX; uint8 offsetY; uint8 width; uint8 height; float liquidLevel; }; float selectUInt8StepStore(float maxDiff) { return 255 / maxDiff; } float selectUInt16StepStore(float maxDiff) { return 65535 / maxDiff; } // Temporary grid data store uint16 area_flags[ADT_CELLS_PER_GRID][ADT_CELLS_PER_GRID]; float V8[ADT_GRID_SIZE][ADT_GRID_SIZE]; float V9[ADT_GRID_SIZE+1][ADT_GRID_SIZE+1]; uint16 uint16_V8[ADT_GRID_SIZE][ADT_GRID_SIZE]; uint16 uint16_V9[ADT_GRID_SIZE+1][ADT_GRID_SIZE+1]; uint8 uint8_V8[ADT_GRID_SIZE][ADT_GRID_SIZE]; uint8 uint8_V9[ADT_GRID_SIZE+1][ADT_GRID_SIZE+1]; uint16 liquid_entry[ADT_CELLS_PER_GRID][ADT_CELLS_PER_GRID]; uint8 liquid_flags[ADT_CELLS_PER_GRID][ADT_CELLS_PER_GRID]; bool liquid_show[ADT_GRID_SIZE][ADT_GRID_SIZE]; float liquid_height[ADT_GRID_SIZE+1][ADT_GRID_SIZE+1]; uint8 holes[ADT_CELLS_PER_GRID][ADT_CELLS_PER_GRID][8]; bool TransformToHighRes(uint16 holes, uint8 hiResHoles[8]) { for (uint8 i = 0; i < 8; i++) { for (uint8 j = 0; j < 8; j++) { int32 holeIdxL = (i / 2) * 4 + (j / 2); if (((holes >> holeIdxL) & 1) == 1) hiResHoles[i] |= (1 << j); } } return *((uint64*)hiResHoles) != 0; } bool ConvertADT(char *filename, char *filename2, int /*cell_y*/, int /*cell_x*/, uint32 build) { ChunkedFile adt; if (!adt.loadFile(CascStorage, filename)) return false; // Prepare map header map_fileheader map; map.mapMagic = *(uint32 const*)MAP_MAGIC; map.versionMagic = *(uint32 const*)MAP_VERSION_MAGIC; map.buildMagic = build; // Get area flags data memset(area_flags, 0xFF, sizeof(area_flags)); memset(V9, 0, sizeof(V9)); memset(V8, 0, sizeof(V8)); memset(liquid_show, 0, sizeof(liquid_show)); memset(liquid_flags, 0, sizeof(liquid_flags)); memset(liquid_entry, 0, sizeof(liquid_entry)); memset(holes, 0, sizeof(holes)); bool hasHoles = false; for (std::multimap<std::string, FileChunk*>::const_iterator itr = adt.chunks.lower_bound("MCNK"); itr != adt.chunks.upper_bound("MCNK"); ++itr) { adt_MCNK* mcnk = itr->second->As<adt_MCNK>(); // Area data if (mcnk->areaid <= maxAreaId && areas[mcnk->areaid] != 0xFFFF) area_flags[mcnk->iy][mcnk->ix] = areas[mcnk->areaid]; // Height // Height values for triangles stored in order: // 1 2 3 4 5 6 7 8 9 // 10 11 12 13 14 15 16 17 // 18 19 20 21 22 23 24 25 26 // 27 28 29 30 31 32 33 34 // . . . . . . . . // For better get height values merge it to V9 and V8 map // V9 height map: // 1 2 3 4 5 6 7 8 9 // 18 19 20 21 22 23 24 25 26 // . . . . . . . . // V8 height map: // 10 11 12 13 14 15 16 17 // 27 28 29 30 31 32 33 34 // . . . . . . . . // Set map height as grid height for (int y = 0; y <= ADT_CELL_SIZE; y++) { int cy = mcnk->iy * ADT_CELL_SIZE + y; for (int x = 0; x <= ADT_CELL_SIZE; x++) { int cx = mcnk->ix * ADT_CELL_SIZE + x; V9[cy][cx] = mcnk->ypos; } } for (int y = 0; y < ADT_CELL_SIZE; y++) { int cy = mcnk->iy * ADT_CELL_SIZE + y; for (int x = 0; x < ADT_CELL_SIZE; x++) { int cx = mcnk->ix * ADT_CELL_SIZE + x; V8[cy][cx] = mcnk->ypos; } } // Get custom height if (FileChunk* chunk = itr->second->GetSubChunk("MCVT")) { adt_MCVT* mcvt = chunk->As<adt_MCVT>(); // get V9 height map for (int y = 0; y <= ADT_CELL_SIZE; y++) { int cy = mcnk->iy * ADT_CELL_SIZE + y; for (int x = 0; x <= ADT_CELL_SIZE; x++) { int cx = mcnk->ix * ADT_CELL_SIZE + x; V9[cy][cx] += mcvt->height_map[y*(ADT_CELL_SIZE * 2 + 1) + x]; } } // get V8 height map for (int y = 0; y < ADT_CELL_SIZE; y++) { int cy = mcnk->iy * ADT_CELL_SIZE + y; for (int x = 0; x < ADT_CELL_SIZE; x++) { int cx = mcnk->ix * ADT_CELL_SIZE + x; V8[cy][cx] += mcvt->height_map[y*(ADT_CELL_SIZE * 2 + 1) + ADT_CELL_SIZE + 1 + x]; } } } // Liquid data if (mcnk->sizeMCLQ > 8) { if (FileChunk* chunk = itr->second->GetSubChunk("MCLQ")) { adt_MCLQ* liquid = chunk->As<adt_MCLQ>(); int count = 0; for (int y = 0; y < ADT_CELL_SIZE; ++y) { int cy = mcnk->iy * ADT_CELL_SIZE + y; for (int x = 0; x < ADT_CELL_SIZE; ++x) { int cx = mcnk->ix * ADT_CELL_SIZE + x; if (liquid->flags[y][x] != 0x0F) { liquid_show[cy][cx] = true; if (liquid->flags[y][x] & (1 << 7)) liquid_flags[mcnk->iy][mcnk->ix] |= MAP_LIQUID_TYPE_DARK_WATER; ++count; } } } uint32 c_flag = mcnk->flags; if (c_flag & (1 << 2)) { liquid_entry[mcnk->iy][mcnk->ix] = 1; liquid_flags[mcnk->iy][mcnk->ix] |= MAP_LIQUID_TYPE_WATER; // water } if (c_flag & (1 << 3)) { liquid_entry[mcnk->iy][mcnk->ix] = 2; liquid_flags[mcnk->iy][mcnk->ix] |= MAP_LIQUID_TYPE_OCEAN; // ocean } if (c_flag & (1 << 4)) { liquid_entry[mcnk->iy][mcnk->ix] = 3; liquid_flags[mcnk->iy][mcnk->ix] |= MAP_LIQUID_TYPE_MAGMA; // magma/slime } if (!count && liquid_flags[mcnk->iy][mcnk->ix]) fprintf(stderr, "Wrong liquid detect in MCLQ chunk"); for (int y = 0; y <= ADT_CELL_SIZE; ++y) { int cy = mcnk->iy * ADT_CELL_SIZE + y; for (int x = 0; x <= ADT_CELL_SIZE; ++x) { int cx = mcnk->ix * ADT_CELL_SIZE + x; liquid_height[cy][cx] = liquid->liquid[y][x].height; } } } } // Hole data if (!(mcnk->flags & 0x10000)) { if (uint16 hole = mcnk->holes) if (TransformToHighRes(hole, holes[mcnk->iy][mcnk->ix])) hasHoles = true; } else { memcpy(holes[mcnk->iy][mcnk->ix], mcnk->union_5_3_0.HighResHoles, sizeof(uint64)); if (*((uint64*)holes[mcnk->iy][mcnk->ix]) != 0) hasHoles = true; } } // Get liquid map for grid (in WOTLK used MH2O chunk) if (FileChunk* chunk = adt.GetChunk("MH2O")) { adt_MH2O* h2o = chunk->As<adt_MH2O>(); for (int i = 0; i < ADT_CELLS_PER_GRID; i++) { for (int j = 0; j < ADT_CELLS_PER_GRID; j++) { adt_liquid_header *h = h2o->getLiquidData(i, j); if (!h) continue; int count = 0; uint64 show = h2o->getLiquidShowMap(h); for (int y = 0; y < h->height; y++) { int cy = i * ADT_CELL_SIZE + y + h->yOffset; for (int x = 0; x < h->width; x++) { int cx = j * ADT_CELL_SIZE + x + h->xOffset; if (show & 1) { liquid_show[cy][cx] = true; ++count; } show >>= 1; } } liquid_entry[i][j] = h->liquidType; switch (LiqType[h->liquidType]) { case LIQUID_TYPE_WATER: liquid_flags[i][j] |= MAP_LIQUID_TYPE_WATER; break; case LIQUID_TYPE_OCEAN: liquid_flags[i][j] |= MAP_LIQUID_TYPE_OCEAN; break; case LIQUID_TYPE_MAGMA: liquid_flags[i][j] |= MAP_LIQUID_TYPE_MAGMA; break; case LIQUID_TYPE_SLIME: liquid_flags[i][j] |= MAP_LIQUID_TYPE_SLIME; break; default: printf("\nCan't find Liquid type %u for map %s\nchunk %d,%d\n", h->liquidType, filename, i, j); break; } // Dark water detect if (LiqType[h->liquidType] == LIQUID_TYPE_OCEAN) { uint8* lm = h2o->getLiquidLightMap(h); if (!lm) liquid_flags[i][j] |= MAP_LIQUID_TYPE_DARK_WATER; } if (!count && liquid_flags[i][j]) printf("Wrong liquid detect in MH2O chunk"); float* height = h2o->getLiquidHeightMap(h); int pos = 0; for (int y = 0; y <= h->height; y++) { int cy = i * ADT_CELL_SIZE + y + h->yOffset; for (int x = 0; x <= h->width; x++) { int cx = j * ADT_CELL_SIZE + x + h->xOffset; if (height) liquid_height[cy][cx] = height[pos]; else liquid_height[cy][cx] = h->heightLevel1; pos++; } } } } } //============================================ // Try pack area data //============================================ bool fullAreaData = false; uint32 areaflag = area_flags[0][0]; for (int y=0;y<ADT_CELLS_PER_GRID;y++) { for(int x=0;x<ADT_CELLS_PER_GRID;x++) { if(area_flags[y][x]!=areaflag) { fullAreaData = true; break; } } } map.areaMapOffset = sizeof(map); map.areaMapSize = sizeof(map_areaHeader); map_areaHeader areaHeader; areaHeader.fourcc = *(uint32 const*)MAP_AREA_MAGIC; areaHeader.flags = 0; if (fullAreaData) { areaHeader.gridArea = 0; map.areaMapSize+=sizeof(area_flags); } else { areaHeader.flags |= MAP_AREA_NO_AREA; areaHeader.gridArea = (uint16)areaflag; } //============================================ // Try pack height data //============================================ float maxHeight = -20000; float minHeight = 20000; for (int y=0; y<ADT_GRID_SIZE; y++) { for(int x=0;x<ADT_GRID_SIZE;x++) { float h = V8[y][x]; if (maxHeight < h) maxHeight = h; if (minHeight > h) minHeight = h; } } for (int y=0; y<=ADT_GRID_SIZE; y++) { for(int x=0;x<=ADT_GRID_SIZE;x++) { float h = V9[y][x]; if (maxHeight < h) maxHeight = h; if (minHeight > h) minHeight = h; } } // Check for allow limit minimum height (not store height in deep ochean - allow save some memory) if (CONF_allow_height_limit && minHeight < CONF_use_minHeight) { for (int y=0; y<ADT_GRID_SIZE; y++) for(int x=0;x<ADT_GRID_SIZE;x++) if (V8[y][x] < CONF_use_minHeight) V8[y][x] = CONF_use_minHeight; for (int y=0; y<=ADT_GRID_SIZE; y++) for(int x=0;x<=ADT_GRID_SIZE;x++) if (V9[y][x] < CONF_use_minHeight) V9[y][x] = CONF_use_minHeight; if (minHeight < CONF_use_minHeight) minHeight = CONF_use_minHeight; if (maxHeight < CONF_use_minHeight) maxHeight = CONF_use_minHeight; } map.heightMapOffset = map.areaMapOffset + map.areaMapSize; map.heightMapSize = sizeof(map_heightHeader); map_heightHeader heightHeader; heightHeader.fourcc = *(uint32 const*)MAP_HEIGHT_MAGIC; heightHeader.flags = 0; heightHeader.gridHeight = minHeight; heightHeader.gridMaxHeight = maxHeight; if (maxHeight == minHeight) heightHeader.flags |= MAP_HEIGHT_NO_HEIGHT; // Not need store if flat surface if (CONF_allow_float_to_int && (maxHeight - minHeight) < CONF_flat_height_delta_limit) heightHeader.flags |= MAP_HEIGHT_NO_HEIGHT; // Try store as packed in uint16 or uint8 values if (!(heightHeader.flags & MAP_HEIGHT_NO_HEIGHT)) { float step = 0; // Try Store as uint values if (CONF_allow_float_to_int) { float diff = maxHeight - minHeight; if (diff < CONF_float_to_int8_limit) // As uint8 (max accuracy = CONF_float_to_int8_limit/256) { heightHeader.flags|=MAP_HEIGHT_AS_INT8; step = selectUInt8StepStore(diff); } else if (diff<CONF_float_to_int16_limit) // As uint16 (max accuracy = CONF_float_to_int16_limit/65536) { heightHeader.flags|=MAP_HEIGHT_AS_INT16; step = selectUInt16StepStore(diff); } } // Pack it to int values if need if (heightHeader.flags&MAP_HEIGHT_AS_INT8) { for (int y=0; y<ADT_GRID_SIZE; y++) for(int x=0;x<ADT_GRID_SIZE;x++) uint8_V8[y][x] = uint8((V8[y][x] - minHeight) * step + 0.5f); for (int y=0; y<=ADT_GRID_SIZE; y++) for(int x=0;x<=ADT_GRID_SIZE;x++) uint8_V9[y][x] = uint8((V9[y][x] - minHeight) * step + 0.5f); map.heightMapSize+= sizeof(uint8_V9) + sizeof(uint8_V8); } else if (heightHeader.flags&MAP_HEIGHT_AS_INT16) { for (int y=0; y<ADT_GRID_SIZE; y++) for(int x=0;x<ADT_GRID_SIZE;x++) uint16_V8[y][x] = uint16((V8[y][x] - minHeight) * step + 0.5f); for (int y=0; y<=ADT_GRID_SIZE; y++) for(int x=0;x<=ADT_GRID_SIZE;x++) uint16_V9[y][x] = uint16((V9[y][x] - minHeight) * step + 0.5f); map.heightMapSize+= sizeof(uint16_V9) + sizeof(uint16_V8); } else map.heightMapSize+= sizeof(V9) + sizeof(V8); } //============================================ // Pack liquid data //============================================ uint8 type = liquid_flags[0][0]; bool fullType = false; for (int y = 0; y < ADT_CELLS_PER_GRID; y++) { for (int x = 0; x < ADT_CELLS_PER_GRID; x++) { if (liquid_flags[y][x] != type) { fullType = true; y = ADT_CELLS_PER_GRID; break; } } } map_liquidHeader liquidHeader; // no water data (if all grid have 0 liquid type) if (type == 0 && !fullType) { // No liquid data map.liquidMapOffset = 0; map.liquidMapSize = 0; } else { int minX = 255, minY = 255; int maxX = 0, maxY = 0; maxHeight = -20000; minHeight = 20000; for (int y=0; y<ADT_GRID_SIZE; y++) { for(int x=0; x<ADT_GRID_SIZE; x++) { if (liquid_show[y][x]) { if (minX > x) minX = x; if (maxX < x) maxX = x; if (minY > y) minY = y; if (maxY < y) maxY = y; float h = liquid_height[y][x]; if (maxHeight < h) maxHeight = h; if (minHeight > h) minHeight = h; } else liquid_height[y][x] = CONF_use_minHeight; } } map.liquidMapOffset = map.heightMapOffset + map.heightMapSize; map.liquidMapSize = sizeof(map_liquidHeader); liquidHeader.fourcc = *(uint32 const*)MAP_LIQUID_MAGIC; liquidHeader.flags = 0; liquidHeader.liquidType = 0; liquidHeader.offsetX = minX; liquidHeader.offsetY = minY; liquidHeader.width = maxX - minX + 1 + 1; liquidHeader.height = maxY - minY + 1 + 1; liquidHeader.liquidLevel = minHeight; if (maxHeight == minHeight) liquidHeader.flags |= MAP_LIQUID_NO_HEIGHT; // Not need store if flat surface if (CONF_allow_float_to_int && (maxHeight - minHeight) < CONF_flat_liquid_delta_limit) liquidHeader.flags |= MAP_LIQUID_NO_HEIGHT; if (!fullType) liquidHeader.flags |= MAP_LIQUID_NO_TYPE; if (liquidHeader.flags & MAP_LIQUID_NO_TYPE) liquidHeader.liquidType = type; else map.liquidMapSize += sizeof(liquid_entry) + sizeof(liquid_flags); if (!(liquidHeader.flags & MAP_LIQUID_NO_HEIGHT)) map.liquidMapSize += sizeof(float)*liquidHeader.width*liquidHeader.height; } if (map.liquidMapOffset) map.holesOffset = map.liquidMapOffset + map.liquidMapSize; else map.holesOffset = map.heightMapOffset + map.heightMapSize; if (hasHoles) map.holesSize = sizeof(holes); else map.holesSize = 0; // Ok all data prepared - store it FILE* output = fopen(filename2, "wb"); if (!output) { printf("Can't create the output file '%s'\n", filename2); return false; } fwrite(&map, sizeof(map), 1, output); // Store area data fwrite(&areaHeader, sizeof(areaHeader), 1, output); if (!(areaHeader.flags&MAP_AREA_NO_AREA)) fwrite(area_flags, sizeof(area_flags), 1, output); // Store height data fwrite(&heightHeader, sizeof(heightHeader), 1, output); if (!(heightHeader.flags & MAP_HEIGHT_NO_HEIGHT)) { if (heightHeader.flags & MAP_HEIGHT_AS_INT16) { fwrite(uint16_V9, sizeof(uint16_V9), 1, output); fwrite(uint16_V8, sizeof(uint16_V8), 1, output); } else if (heightHeader.flags & MAP_HEIGHT_AS_INT8) { fwrite(uint8_V9, sizeof(uint8_V9), 1, output); fwrite(uint8_V8, sizeof(uint8_V8), 1, output); } else { fwrite(V9, sizeof(V9), 1, output); fwrite(V8, sizeof(V8), 1, output); } } // Store liquid data if need if (map.liquidMapOffset) { fwrite(&liquidHeader, sizeof(liquidHeader), 1, output); if (!(liquidHeader.flags & MAP_LIQUID_NO_TYPE)) { fwrite(liquid_entry, sizeof(liquid_entry), 1, output); fwrite(liquid_flags, sizeof(liquid_flags), 1, output); } if (!(liquidHeader.flags & MAP_LIQUID_NO_HEIGHT)) { for (int y = 0; y < liquidHeader.height; y++) fwrite(&liquid_height[y + liquidHeader.offsetY][liquidHeader.offsetX], sizeof(float), liquidHeader.width, output); } } // store hole data if (hasHoles) fwrite(holes, map.holesSize, 1, output); fclose(output); return true; } void ExtractWmos(ChunkedFile& file, std::set<std::string>& wmoList) { if (FileChunk* chunk = file.GetChunk("MWMO")) { file_MWMO* wmo = chunk->As<file_MWMO>(); if (wmo->size) { char* fileName = wmo->FileList; while (fileName < wmo->FileList + wmo->size) { wmoList.insert(fileName); fileName += strlen(fileName) + 1; } } } } void ExtractMaps(uint32 build) { char storagePath[1024]; char output_filename[1024]; printf("Extracting maps...\n"); uint32 map_count = ReadMapDBC(); ReadAreaTableDBC(); ReadLiquidTypeTableDBC(); std::string path = output_path; path += "/maps/"; CreateDir(path); std::set<std::string> wmoList; printf("Convert map files\n"); for (uint32 z = 0; z < map_count; ++z) { printf("Extract %s (%d/%u) \n", map_ids[z].name, z+1, map_count); // Loadup map grid data sprintf(storagePath, "World\\Maps\\%s\\%s.wdt", map_ids[z].name, map_ids[z].name); ChunkedFile wdt; if (!wdt.loadFile(CascStorage, storagePath, false)) continue; ExtractWmos(wdt, wmoList); FileChunk* chunk = wdt.GetChunk("MAIN"); for (uint32 y = 0; y < WDT_MAP_SIZE; ++y) { for (uint32 x = 0; x < WDT_MAP_SIZE; ++x) { if (!(chunk->As<wdt_MAIN>()->adt_list[y][x].flag & 0x1)) continue; sprintf(storagePath, "World\\Maps\\%s\\%s_%u_%u.adt", map_ids[z].name, map_ids[z].name, x, y); sprintf(output_filename, "%s/maps/%03u%02u%02u.map", output_path, map_ids[z].id, y, x); ConvertADT(storagePath, output_filename, y, x, build); sprintf(storagePath, "World\\Maps\\%s\\%s_%u_%u_obj0.adt", map_ids[z].name, map_ids[z].name, x, y); ChunkedFile adtObj; if (adtObj.loadFile(CascStorage, storagePath, false)) ExtractWmos(adtObj, wmoList); } // draw progress bar printf("Processing........................%d%%\r", (100 * (y+1)) / WDT_MAP_SIZE); } } if (!wmoList.empty()) { if (FILE* wmoListFile = fopen("wmo_list.txt", "w")) { for (std::string const& wmo : wmoList) fprintf(wmoListFile, "%s\n", wmo.c_str()); fclose(wmoListFile); } } printf("\n"); delete[] areas; delete[] map_ids; } bool ExtractFile(HANDLE fileInArchive, char const* filename) { FILE* output = fopen(filename, "wb"); if(!output) { printf("Can't create the output file '%s'\n", filename); return false; } char buffer[0x10000]; DWORD readBytes = 1; while (readBytes > 0) { CascReadFile(fileInArchive, buffer, sizeof(buffer), &readBytes); if (readBytes > 0) fwrite(buffer, 1, readBytes, output); } fclose(output); return true; } void ExtractDBFilesClient(int l) { printf("Extracting dbc/db2 files...\n"); std::string outputPath = output_path; outputPath += "/dbc/"; CreateDir(outputPath); outputPath += Locales[l]; outputPath += "/"; CreateDir(outputPath); uint32 index = 0; uint32 count = 0; char const* fileName = DBFilesClientList[index]; HANDLE dbcFile; while (fileName) { std::string filename = fileName; if (CascOpenFile(CascStorage, (filename = (filename + ".db2")).c_str(), 1 << l, 0, &dbcFile) || CascOpenFile(CascStorage, (filename = (filename.substr(0, filename.length() - 4) + ".dbc")).c_str(), 1 << l, 0, &dbcFile)) { filename = outputPath + filename.substr(filename.rfind('\\') + 1); if (!FileExists(filename.c_str())) if (ExtractFile(dbcFile, filename.c_str())) ++count; CascCloseFile(dbcFile); } else printf("Unable to open file %s in the archive for locale %s: %s\n", fileName, Locales[l], HumanReadableCASCError(GetLastError())); fileName = DBFilesClientList[++index]; } printf("Extracted %u files\n\n", count); } bool OpenCascStorage() { try { boost::filesystem::path const storage_dir(boost::filesystem::canonical(input_path) / "Data"); if (!CascOpenStorage(storage_dir.string().c_str(), 0, &CascStorage)) { printf("error opening casc storage '%s': %s\n", storage_dir.string().c_str(), HumanReadableCASCError(GetLastError())); return false; } printf("opened casc storage '%s'\n", storage_dir.string().c_str()); return true; } catch (boost::filesystem::filesystem_error& error) { printf("error opening casc storage : %s\n", error.what()); return false; } } int main(int argc, char * arg[]) { printf("Map & DBC Extractor\n"); printf("===================\n"); HandleArgs(argc, arg); int FirstLocale = -1; uint32 build = 0; if (!OpenCascStorage()) { return 1; } for (int i = 0; i < LOCALES_COUNT; ++i) { if (CONF_Locale && !(CONF_Locale & (1 << i))) continue; if ((CONF_extract & EXTRACT_DBC) == 0) { FirstLocale = i; build = ReadBuild(i); if (!build) continue; printf("Detected client build: %u\n\n", build); break; } //Extract DBC files uint32 tempBuild = ReadBuild(i); if (!tempBuild) continue; printf("Detected client build %u for locale %s\n\n", tempBuild, Locales[i]); ExtractDBFilesClient(i); if (FirstLocale < 0) { FirstLocale = i; build = tempBuild; } } if (FirstLocale < 0) { printf("No locales detected\n"); return 0; } if (CONF_extract & EXTRACT_MAP) { printf("Using locale: %s\n", Locales[FirstLocale]); ExtractMaps(build); } CascCloseStorage(CascStorage); return 0; }
gpl-2.0
pwithnall/totem
src/plugins/screenshot/screenshot-filename-builder.c
3
8432
/* screenshot-filename-builder.c - Builds a filename suitable for a screenshot * * Copyright (C) 2008, 2011 Cosimo Cecchi <cosimoc@gnome.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * * 28th June 2012: Bastien Nocera: Add exception clause. * See license_change file for details. */ #include <config.h> #include <gio/gio.h> #include <glib/gi18n.h> #include <pwd.h> #include <string.h> #include "screenshot-filename-builder.h" typedef enum { TEST_SAVED_DIR = 0, TEST_DEFAULT, TEST_FALLBACK, NUM_TESTS } TestType; typedef struct { char *base_paths[NUM_TESTS]; char *screenshot_origin; int iteration; TestType type; GSimpleAsyncResult *async_result; } AsyncExistenceJob; /* Taken from gnome-vfs-utils.c */ static char * expand_initial_tilde (const char *path) { char *slash_after_user_name, *user_name; struct passwd *passwd_file_entry; if (path[1] == '/' || path[1] == '\0') { return g_build_filename (g_get_home_dir (), &path[1], NULL); } slash_after_user_name = strchr (&path[1], '/'); if (slash_after_user_name == NULL) { user_name = g_strdup (&path[1]); } else { user_name = g_strndup (&path[1], slash_after_user_name - &path[1]); } passwd_file_entry = getpwnam (user_name); g_free (user_name); if (passwd_file_entry == NULL || passwd_file_entry->pw_dir == NULL) { return g_strdup (path); } return g_strconcat (passwd_file_entry->pw_dir, slash_after_user_name, NULL); } static gchar * get_fallback_screenshot_dir (void) { return g_strdup (g_get_home_dir ()); } static gchar * get_default_screenshot_dir (void) { return g_strdup (g_get_user_special_dir (G_USER_DIRECTORY_PICTURES)); } static gchar * sanitize_save_directory (const gchar *save_dir) { gchar *retval = g_strdup (save_dir); if (save_dir == NULL) return NULL; if (save_dir[0] == '~') { char *tmp = expand_initial_tilde (save_dir); g_free (retval); retval = tmp; } else if (strstr (save_dir, "://") != NULL) { GFile *file; g_free (retval); file = g_file_new_for_uri (save_dir); retval = g_file_get_path (file); g_object_unref (file); } return retval; } static char * build_path (AsyncExistenceJob *job) { const gchar *base_path; char *retval, *file_name; char *origin; base_path = job->base_paths[job->type]; if (base_path == NULL || base_path[0] == '\0') return NULL; if (job->screenshot_origin == NULL) { GDateTime *d; d = g_date_time_new_now_local (); origin = g_date_time_format (d, "%Y-%m-%d %H:%M:%S"); g_date_time_unref (d); } else origin = g_strdup (job->screenshot_origin); if (job->iteration == 0) { /* translators: this is the name of the file that gets made up * with the screenshot if the entire screen is taken */ file_name = g_strdup_printf (_("Screenshot from %s.png"), origin); } else { /* translators: this is the name of the file that gets * made up with the screenshot if the entire screen is * taken */ file_name = g_strdup_printf (_("Screenshot from %s - %d.png"), origin, job->iteration); } retval = g_build_filename (base_path, file_name, NULL); g_free (file_name); g_free (origin); return retval; } static void async_existence_job_free (AsyncExistenceJob *job) { gint idx; for (idx = 0; idx < NUM_TESTS; idx++) g_free (job->base_paths[idx]); g_free (job->screenshot_origin); g_clear_object (&job->async_result); g_slice_free (AsyncExistenceJob, job); } static gboolean prepare_next_cycle (AsyncExistenceJob *job) { gboolean res = FALSE; if (job->type != (NUM_TESTS - 1)) { (job->type)++; job->iteration = 0; res = TRUE; } return res; } static gboolean try_check_file (GIOSchedulerJob *io_job, GCancellable *cancellable, gpointer data) { AsyncExistenceJob *job = data; GFile *file; GFileInfo *info; GError *error; char *path, *retval; retry: error = NULL; path = build_path (job); if (path == NULL) { (job->type)++; goto retry; } file = g_file_new_for_path (path); info = g_file_query_info (file, G_FILE_ATTRIBUTE_STANDARD_TYPE, G_FILE_QUERY_INFO_NONE, cancellable, &error); if (info != NULL) { /* file already exists, iterate again */ g_object_unref (info); g_object_unref (file); g_free (path); (job->iteration)++; goto retry; } else { /* see the error to check whether the location is not accessible * or the file does not exist. */ if (error->code == G_IO_ERROR_NOT_FOUND) { GFile *parent; /* if the parent directory doesn't exist as well, forget the saved * directory and treat this as a generic error. */ parent = g_file_get_parent (file); if (!g_file_query_exists (parent, NULL)) { if (!prepare_next_cycle (job)) { retval = NULL; g_object_unref (parent); goto out; } g_object_unref (file); g_object_unref (parent); goto retry; } else { retval = path; g_object_unref (parent); goto out; } } else { /* another kind of error, assume this location is not * accessible. */ g_free (path); if (prepare_next_cycle (job)) { g_error_free (error); g_object_unref (file); goto retry; } else { retval = NULL; goto out; } } } out: g_error_free (error); g_object_unref (file); g_simple_async_result_set_op_res_gpointer (job->async_result, retval, NULL); if (retval == NULL) g_simple_async_result_set_error (job->async_result, G_IO_ERROR, G_IO_ERROR_FAILED, "%s", "Failed to find a valid place to save"); g_simple_async_result_complete_in_idle (job->async_result); async_existence_job_free (job); return FALSE; } void screenshot_build_filename_async (const char *save_dir, const char *screenshot_origin, GAsyncReadyCallback callback, gpointer user_data) { AsyncExistenceJob *job; job = g_slice_new0 (AsyncExistenceJob); job->base_paths[TEST_SAVED_DIR] = sanitize_save_directory (save_dir); job->base_paths[TEST_DEFAULT] = get_default_screenshot_dir (); job->base_paths[TEST_FALLBACK] = get_fallback_screenshot_dir (); job->iteration = 0; job->type = TEST_SAVED_DIR; job->screenshot_origin = g_strdup (screenshot_origin); job->async_result = g_simple_async_result_new (NULL, callback, user_data, screenshot_build_filename_async); g_io_scheduler_push_job (try_check_file, job, NULL, G_PRIORITY_DEFAULT, NULL); } gchar * screenshot_build_filename_finish (GAsyncResult *result, GError **error) { if (g_simple_async_result_propagate_error (G_SIMPLE_ASYNC_RESULT (result), error)) return NULL; return g_simple_async_result_get_op_res_gpointer (G_SIMPLE_ASYNC_RESULT (result)); }
gpl-2.0
dai-dao/Grounded-Language-Learning-in-Pytorch
engine/code/qcommon/vm.c
3
21548
/* =========================================================================== Copyright (C) 1999-2005 Id Software, Inc., 2016 Google Inc. This file is part of Quake III Arena source code. Quake III Arena source code is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Quake III Arena source code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Quake III Arena source code; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA =========================================================================== */ // vm.c -- virtual machine /* intermix code and data symbol table a dll has one imported function: VM_SystemCall and one exported function: Perform */ #include "vm_local.h" vm_t *currentVM = NULL; vm_t *lastVM = NULL; int vm_debugLevel; // used by Com_Error to get rid of running vm's before longjmp static int forced_unload; #define MAX_VM 3 vm_t vmTable[MAX_VM]; void VM_VmInfo_f( void ); void VM_VmProfile_f( void ); #if 0 // 64bit! // converts a VM pointer to a C pointer and // checks to make sure that the range is acceptable void *VM_VM2C( vmptr_t p, int length ) { return (void *)p; } #endif void VM_Debug( int level ) { vm_debugLevel = level; } /* ============== VM_Init ============== */ void VM_Init( void ) { Cvar_Get( "vm_cgame", "2", CVAR_ARCHIVE ); // !@# SHIP WITH SET TO 2 Cvar_Get( "vm_game", "2", CVAR_ARCHIVE ); // !@# SHIP WITH SET TO 2 Cvar_Get( "vm_ui", "2", CVAR_ARCHIVE ); // !@# SHIP WITH SET TO 2 Cmd_AddCommand ("vmprofile", VM_VmProfile_f ); Cmd_AddCommand ("vminfo", VM_VmInfo_f ); Com_Memset( vmTable, 0, sizeof( vmTable ) ); } /* =============== VM_ValueToSymbol Assumes a program counter value =============== */ const char *VM_ValueToSymbol( vm_t *vm, int value ) { vmSymbol_t *sym; static char text[MAX_TOKEN_CHARS]; sym = vm->symbols; if ( !sym ) { return "NO SYMBOLS"; } // find the symbol while ( sym->next && sym->next->symValue <= value ) { sym = sym->next; } if ( value == sym->symValue ) { return sym->symName; } Com_sprintf( text, sizeof( text ), "%s+%i", sym->symName, value - sym->symValue ); return text; } /* =============== VM_ValueToFunctionSymbol For profiling, find the symbol behind this value =============== */ vmSymbol_t *VM_ValueToFunctionSymbol( vm_t *vm, int value ) { vmSymbol_t *sym; static vmSymbol_t nullSym; sym = vm->symbols; if ( !sym ) { return &nullSym; } while ( sym->next && sym->next->symValue <= value ) { sym = sym->next; } return sym; } /* =============== VM_SymbolToValue =============== */ int VM_SymbolToValue( vm_t *vm, const char *symbol ) { vmSymbol_t *sym; for ( sym = vm->symbols ; sym ; sym = sym->next ) { if ( !strcmp( symbol, sym->symName ) ) { return sym->symValue; } } return 0; } /* ===================== VM_SymbolForCompiledPointer ===================== */ #if 0 // 64bit! const char *VM_SymbolForCompiledPointer( vm_t *vm, void *code ) { int i; if ( code < (void *)vm->codeBase ) { return "Before code block"; } if ( code >= (void *)(vm->codeBase + vm->codeLength) ) { return "After code block"; } // find which original instruction it is after for ( i = 0 ; i < vm->codeLength ; i++ ) { if ( (void *)vm->instructionPointers[i] > code ) { break; } } i--; // now look up the bytecode instruction pointer return VM_ValueToSymbol( vm, i ); } #endif /* =============== ParseHex =============== */ int ParseHex( const char *text ) { int value; int c; value = 0; while ( ( c = *text++ ) != 0 ) { if ( c >= '0' && c <= '9' ) { value = value * 16 + c - '0'; continue; } if ( c >= 'a' && c <= 'f' ) { value = value * 16 + 10 + c - 'a'; continue; } if ( c >= 'A' && c <= 'F' ) { value = value * 16 + 10 + c - 'A'; continue; } } return value; } /* =============== VM_LoadSymbols =============== */ void VM_LoadSymbols( vm_t *vm ) { union { char *c; void *v; } mapfile; char *text_p, *token; char name[MAX_QPATH]; char symbols[MAX_QPATH]; vmSymbol_t **prev, *sym; int count; int value; int chars; int segment; int numInstructions; // don't load symbols if not developer if ( !com_developer->integer ) { return; } COM_StripExtension(vm->name, name, sizeof(name)); Com_sprintf( symbols, sizeof( symbols ), "vm/%s.map", name ); FS_ReadFile( symbols, &mapfile.v ); if ( !mapfile.c ) { Com_Printf( "Couldn't load symbol file: %s\n", symbols ); return; } numInstructions = vm->instructionCount; // parse the symbols text_p = mapfile.c; prev = &vm->symbols; count = 0; while ( 1 ) { token = COM_Parse( &text_p ); if ( !token[0] ) { break; } segment = ParseHex( token ); if ( segment ) { COM_Parse( &text_p ); COM_Parse( &text_p ); continue; // only load code segment values } token = COM_Parse( &text_p ); if ( !token[0] ) { Com_Printf( "WARNING: incomplete line at end of file\n" ); break; } value = ParseHex( token ); token = COM_Parse( &text_p ); if ( !token[0] ) { Com_Printf( "WARNING: incomplete line at end of file\n" ); break; } chars = strlen( token ); sym = Hunk_Alloc( sizeof( *sym ) + chars, h_high ); *prev = sym; prev = &sym->next; sym->next = NULL; // convert value from an instruction number to a code offset if ( value >= 0 && value < numInstructions ) { value = vm->instructionPointers[value]; } sym->symValue = value; Q_strncpyz( sym->symName, token, chars + 1 ); count++; } vm->numSymbols = count; Com_Printf( "%i symbols parsed from %s\n", count, symbols ); FS_FreeFile( mapfile.v ); } /* ============ VM_DllSyscall Dlls will call this directly rcg010206 The horror; the horror. The syscall mechanism relies on stack manipulation to get its args. This is likely due to C's inability to pass "..." parameters to a function in one clean chunk. On PowerPC Linux, these parameters are not necessarily passed on the stack, so while (&arg[0] == arg) is true, (&arg[1] == 2nd function parameter) is not necessarily accurate, as arg's value might have been stored to the stack or other piece of scratch memory to give it a valid address, but the next parameter might still be sitting in a register. Quake's syscall system also assumes that the stack grows downward, and that any needed types can be squeezed, safely, into a signed int. This hack below copies all needed values for an argument to a array in memory, so that Quake can get the correct values. This can also be used on systems where the stack grows upwards, as the presumably standard and safe stdargs.h macros are used. As for having enough space in a signed int for your datatypes, well, it might be better to wait for DOOM 3 before you start porting. :) The original code, while probably still inherently dangerous, seems to work well enough for the platforms it already works on. Rather than add the performance hit for those platforms, the original code is still in use there. For speed, we just grab 15 arguments, and don't worry about exactly how many the syscall actually needs; the extra is thrown away. ============ */ intptr_t QDECL VM_DllSyscall( intptr_t arg, ... ) { #if !id386 || defined __clang__ // rcg010206 - see commentary above intptr_t args[MAX_VMSYSCALL_ARGS]; int i; va_list ap; args[0] = arg; va_start(ap, arg); for (i = 1; i < ARRAY_LEN (args); i++) args[i] = va_arg(ap, intptr_t); va_end(ap); return currentVM->systemCall( args ); #else // original id code return currentVM->systemCall( &arg ); #endif } /* ================= VM_LoadQVM Load a .qvm file ================= */ vmHeader_t *VM_LoadQVM( vm_t *vm, qboolean alloc, qboolean unpure) { int dataLength; int i; char filename[MAX_QPATH]; union { vmHeader_t *h; void *v; } header; // load the image Com_sprintf( filename, sizeof(filename), "vm/%s.qvm", vm->name ); Com_Printf( "Loading vm file %s...\n", filename ); FS_ReadFileDir(filename, vm->searchPath, unpure, &header.v); if ( !header.h ) { Com_Printf( "Failed.\n" ); VM_Free( vm ); Com_Printf(S_COLOR_YELLOW "Warning: Couldn't open VM file %s\n", filename); return NULL; } // show where the qvm was loaded from FS_Which(filename, vm->searchPath); if( LittleLong( header.h->vmMagic ) == VM_MAGIC_VER2 ) { Com_Printf( "...which has vmMagic VM_MAGIC_VER2\n" ); // byte swap the header for ( i = 0 ; i < sizeof( vmHeader_t ) / 4 ; i++ ) { ((int *)header.h)[i] = LittleLong( ((int *)header.h)[i] ); } // validate if ( header.h->jtrgLength < 0 || header.h->bssLength < 0 || header.h->dataLength < 0 || header.h->litLength < 0 || header.h->codeLength <= 0 ) { VM_Free(vm); FS_FreeFile(header.v); Com_Printf(S_COLOR_YELLOW "Warning: %s has bad header\n", filename); return NULL; } } else if( LittleLong( header.h->vmMagic ) == VM_MAGIC ) { // byte swap the header // sizeof( vmHeader_t ) - sizeof( int ) is the 1.32b vm header size for ( i = 0 ; i < ( sizeof( vmHeader_t ) - sizeof( int ) ) / 4 ; i++ ) { ((int *)header.h)[i] = LittleLong( ((int *)header.h)[i] ); } // validate if ( header.h->bssLength < 0 || header.h->dataLength < 0 || header.h->litLength < 0 || header.h->codeLength <= 0 ) { VM_Free(vm); FS_FreeFile(header.v); Com_Printf(S_COLOR_YELLOW "Warning: %s has bad header\n", filename); return NULL; } } else { VM_Free( vm ); FS_FreeFile(header.v); Com_Printf(S_COLOR_YELLOW "Warning: %s does not have a recognisable " "magic number in its header\n", filename); return NULL; } // round up to next power of 2 so all data operations can // be mask protected dataLength = header.h->dataLength + header.h->litLength + header.h->bssLength; for ( i = 0 ; dataLength > ( 1 << i ) ; i++ ) { } dataLength = 1 << i; if(alloc) { // allocate zero filled space for initialized and uninitialized data vm->dataBase = Hunk_Alloc(dataLength, h_high); vm->dataMask = dataLength - 1; } else { // clear the data, but make sure we're not clearing more than allocated if(vm->dataMask + 1 != dataLength) { VM_Free(vm); FS_FreeFile(header.v); Com_Printf(S_COLOR_YELLOW "Warning: Data region size of %s not matching after " "VM_Restart()\n", filename); return NULL; } Com_Memset(vm->dataBase, 0, dataLength); } // copy the intialized data Com_Memcpy( vm->dataBase, (byte *)header.h + header.h->dataOffset, header.h->dataLength + header.h->litLength ); // byte swap the longs for ( i = 0 ; i < header.h->dataLength ; i += 4 ) { *(int *)(vm->dataBase + i) = LittleLong( *(int *)(vm->dataBase + i ) ); } if(header.h->vmMagic == VM_MAGIC_VER2) { int previousNumJumpTableTargets = vm->numJumpTableTargets; header.h->jtrgLength &= ~0x03; vm->numJumpTableTargets = header.h->jtrgLength >> 2; Com_Printf("Loading %d jump table targets\n", vm->numJumpTableTargets); if(alloc) { vm->jumpTableTargets = Hunk_Alloc(header.h->jtrgLength, h_high); } else { if(vm->numJumpTableTargets != previousNumJumpTableTargets) { VM_Free(vm); FS_FreeFile(header.v); Com_Printf(S_COLOR_YELLOW "Warning: Jump table size of %s not matching after " "VM_Restart()\n", filename); return NULL; } Com_Memset(vm->jumpTableTargets, 0, header.h->jtrgLength); } Com_Memcpy(vm->jumpTableTargets, (byte *) header.h + header.h->dataOffset + header.h->dataLength + header.h->litLength, header.h->jtrgLength); // byte swap the longs for ( i = 0 ; i < header.h->jtrgLength ; i += 4 ) { *(int *)(vm->jumpTableTargets + i) = LittleLong( *(int *)(vm->jumpTableTargets + i ) ); } } return header.h; } /* ================= VM_Restart Reload the data, but leave everything else in place This allows a server to do a map_restart without changing memory allocation We need to make sure that servers can access unpure QVMs (not contained in any pak) even if the client is pure, so take "unpure" as argument. ================= */ vm_t *VM_Restart(vm_t *vm, qboolean unpure) { vmHeader_t *header; // DLL's can't be restarted in place if ( vm->dllHandle ) { char name[MAX_QPATH]; intptr_t (*systemCall)( intptr_t *parms ); systemCall = vm->systemCall; Q_strncpyz( name, vm->name, sizeof( name ) ); VM_Free( vm ); vm = VM_Create( name, systemCall, VMI_NATIVE ); return vm; } // load the image Com_Printf("VM_Restart()\n"); if(!(header = VM_LoadQVM(vm, qfalse, unpure))) { Com_Error(ERR_DROP, "VM_Restart failed"); return NULL; } // free the original file FS_FreeFile(header); return vm; } /* ================ VM_Create If image ends in .qvm it will be interpreted, otherwise it will attempt to load as a system dll ================ */ vm_t *VM_Create( const char *module, intptr_t (*systemCalls)(intptr_t *), vmInterpret_t interpret ) { vm_t *vm; vmHeader_t *header; int i, remaining, retval; char filename[MAX_OSPATH]; void *startSearch = NULL; if ( !module || !module[0] || !systemCalls ) { Com_Error( ERR_FATAL, "VM_Create: bad parms" ); } remaining = Hunk_MemoryRemaining(); // see if we already have the VM for ( i = 0 ; i < MAX_VM ; i++ ) { if (!Q_stricmp(vmTable[i].name, module)) { vm = &vmTable[i]; return vm; } } // find a free vm for ( i = 0 ; i < MAX_VM ; i++ ) { if ( !vmTable[i].name[0] ) { break; } } if ( i == MAX_VM ) { Com_Error( ERR_FATAL, "VM_Create: no free vm_t" ); } vm = &vmTable[i]; Q_strncpyz(vm->name, module, sizeof(vm->name)); do { retval = FS_FindVM(&startSearch, filename, sizeof(filename), module, (interpret == VMI_NATIVE)); if(retval == VMI_NATIVE) { Com_Printf("Try loading dll file %s\n", filename); vm->dllHandle = Sys_LoadGameDll(filename, &vm->entryPoint, VM_DllSyscall); if(vm->dllHandle) { vm->systemCall = systemCalls; return vm; } Com_Printf("Failed loading dll, trying next\n"); } else if(retval == VMI_COMPILED) { vm->searchPath = startSearch; if((header = VM_LoadQVM(vm, qtrue, qfalse))) break; // VM_Free overwrites the name on failed load Q_strncpyz(vm->name, module, sizeof(vm->name)); } } while(retval >= 0); if(retval < 0) return NULL; vm->systemCall = systemCalls; // allocate space for the jump targets, which will be filled in by the compile/prep functions vm->instructionCount = header->instructionCount; vm->instructionPointers = Hunk_Alloc(vm->instructionCount * sizeof(*vm->instructionPointers), h_high); // copy or compile the instructions vm->codeLength = header->codeLength; vm->compiled = qfalse; #ifdef NO_VM_COMPILED if(interpret >= VMI_COMPILED) { Com_Printf("Architecture doesn't have a bytecode compiler, using interpreter\n"); interpret = VMI_BYTECODE; } #else if(interpret != VMI_BYTECODE) { vm->compiled = qtrue; VM_Compile( vm, header ); } #endif // VM_Compile may have reset vm->compiled if compilation failed if (!vm->compiled) { VM_PrepareInterpreter( vm, header ); } // free the original file FS_FreeFile( header ); // load the map file VM_LoadSymbols( vm ); // the stack is implicitly at the end of the image vm->programStack = vm->dataMask + 1; vm->stackBottom = vm->programStack - PROGRAM_STACK_SIZE; Com_Printf("%s loaded in %d bytes on the hunk\n", module, remaining - Hunk_MemoryRemaining()); return vm; } /* ============== VM_Free ============== */ void VM_Free( vm_t *vm ) { if(!vm) { return; } if(vm->callLevel) { if(!forced_unload) { Com_Error( ERR_FATAL, "VM_Free(%s) on running vm", vm->name ); return; } else { Com_Printf( "forcefully unloading %s vm\n", vm->name ); } } if(vm->destroy) vm->destroy(vm); if ( vm->dllHandle ) { Sys_UnloadDll( vm->dllHandle ); Com_Memset( vm, 0, sizeof( *vm ) ); } #if 0 // now automatically freed by hunk if ( vm->codeBase ) { Z_Free( vm->codeBase ); } if ( vm->dataBase ) { Z_Free( vm->dataBase ); } if ( vm->instructionPointers ) { Z_Free( vm->instructionPointers ); } #endif Com_Memset( vm, 0, sizeof( *vm ) ); currentVM = NULL; lastVM = NULL; } void VM_Clear(void) { int i; for (i=0;i<MAX_VM; i++) { VM_Free(&vmTable[i]); } } void VM_Forced_Unload_Start(void) { forced_unload = 1; } void VM_Forced_Unload_Done(void) { forced_unload = 0; } void *VM_ArgPtr( intptr_t intValue ) { if ( !intValue ) { return NULL; } // currentVM is missing on reconnect if ( currentVM==NULL ) return NULL; if ( currentVM->entryPoint ) { return (void *)(currentVM->dataBase + intValue); } else { return (void *)(currentVM->dataBase + (intValue & currentVM->dataMask)); } } void *VM_ExplicitArgPtr( vm_t *vm, intptr_t intValue ) { if ( !intValue ) { return NULL; } // currentVM is missing on reconnect here as well? if ( currentVM==NULL ) return NULL; // if ( vm->entryPoint ) { return (void *)(vm->dataBase + intValue); } else { return (void *)(vm->dataBase + (intValue & vm->dataMask)); } } /* ============== VM_Call Upon a system call, the stack will look like: sp+32 parm1 sp+28 parm0 sp+24 return value sp+20 return address sp+16 local1 sp+14 local0 sp+12 arg1 sp+8 arg0 sp+4 return stack sp return address An interpreted function will immediately execute an OP_ENTER instruction, which will subtract space for locals from sp ============== */ intptr_t QDECL VM_Call_Impl( vm_t *vm, int(*args)[13] ) { vm_t *oldVM; intptr_t r; if(!vm || !vm->name[0]) Com_Error(ERR_FATAL, "VM_Call with NULL vm"); oldVM = currentVM; currentVM = vm; lastVM = vm; if ( vm_debugLevel ) { Com_Printf( "VM_Call( %d )\n", (*args)[0] ); } ++vm->callLevel; // if we have a dll loaded, call it directly if ( vm->entryPoint ) { //rcg010207 - see dissertation at top of VM_DllSyscall() in this file. r = vm->entryPoint( (*args)[0], (*args)[1], (*args)[2], (*args)[3], (*args)[4], (*args)[5], (*args)[6], (*args)[7], (*args)[8], (*args)[9], (*args)[10], (*args)[11], (*args)[12]); } else { #ifndef NO_VM_COMPILED if ( vm->compiled ) r = VM_CallCompiled( vm, *args ); else #endif r = VM_CallInterpreted( vm, *args ); } --vm->callLevel; if ( oldVM != NULL ) currentVM = oldVM; return r; } //================================================================= static int QDECL VM_ProfileSort( const void *a, const void *b ) { vmSymbol_t *sa, *sb; sa = *(vmSymbol_t **)a; sb = *(vmSymbol_t **)b; if ( sa->profileCount < sb->profileCount ) { return -1; } if ( sa->profileCount > sb->profileCount ) { return 1; } return 0; } /* ============== VM_VmProfile_f ============== */ void VM_VmProfile_f( void ) { vm_t *vm; vmSymbol_t **sorted, *sym; int i; double total; if ( !lastVM ) { return; } vm = lastVM; if ( !vm->numSymbols ) { return; } sorted = Z_Malloc( vm->numSymbols * sizeof( *sorted ) ); sorted[0] = vm->symbols; total = sorted[0]->profileCount; for ( i = 1 ; i < vm->numSymbols ; i++ ) { sorted[i] = sorted[i-1]->next; total += sorted[i]->profileCount; } qsort( sorted, vm->numSymbols, sizeof( *sorted ), VM_ProfileSort ); for ( i = 0 ; i < vm->numSymbols ; i++ ) { int perc; sym = sorted[i]; perc = 100 * (float) sym->profileCount / total; Com_Printf( "%2i%% %9i %s\n", perc, sym->profileCount, sym->symName ); sym->profileCount = 0; } Com_Printf(" %9.0f total\n", total ); Z_Free( sorted ); } /* ============== VM_VmInfo_f ============== */ void VM_VmInfo_f( void ) { vm_t *vm; int i; Com_Printf( "Registered virtual machines:\n" ); for ( i = 0 ; i < MAX_VM ; i++ ) { vm = &vmTable[i]; if ( !vm->name[0] ) { break; } Com_Printf( "%s : ", vm->name ); if ( vm->dllHandle ) { Com_Printf( "native\n" ); continue; } if ( vm->compiled ) { Com_Printf( "compiled on load\n" ); } else { Com_Printf( "interpreted\n" ); } Com_Printf( " code length : %7i\n", vm->codeLength ); Com_Printf( " table length: %7i\n", vm->instructionCount*4 ); Com_Printf( " data length : %7i\n", vm->dataMask + 1 ); } } /* =============== VM_LogSyscalls Insert calls to this while debugging the vm compiler =============== */ void VM_LogSyscalls( int *args ) { static int callnum; static FILE *f; if ( !f ) { f = fopen("syscalls.log", "w" ); } callnum++; fprintf(f, "%i: %p (%i) = %i %i %i %i\n", callnum, (void*)(args - (int *)currentVM->dataBase), args[0], args[1], args[2], args[3], args[4] ); } /* ================= VM_BlockCopy Executes a block copy operation within currentVM data space ================= */ void VM_BlockCopy(unsigned int dest, unsigned int src, size_t n) { unsigned int dataMask = currentVM->dataMask; if ((dest & dataMask) != dest || (src & dataMask) != src || ((dest + n) & dataMask) != dest + n || ((src + n) & dataMask) != src + n) { Com_Error(ERR_DROP, "OP_BLOCK_COPY out of range!"); } Com_Memcpy(currentVM->dataBase + dest, currentVM->dataBase + src, n); }
gpl-2.0
summerpulse/openjdk7
jdk/src/share/demo/jvmti/hprof/hprof_init.c
3
77146
/* * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * - Neither the name of Oracle nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * This source code is provided to illustrate the usage of a given feature * or technique and has been deliberately simplified. Additional steps * required for a production-quality application, such as security checks, * input validation and proper error handling, might not be present in * this sample code. */ /* Main source file, the basic JVMTI connection/startup code. */ #include "hprof.h" #include "java_crw_demo.h" /* * This file contains all the startup logic (Agent_Onload) and * connection to the JVMTI interface. * All JVMTI Event callbacks are in this file. * All setting of global data (gdata) is done here. * Options are parsed here. * Option help messages are here. * Termination handled here (VM_DEATH) and shutdown (Agent_OnUnload). * Spawning of the cpu sample loop thread and listener thread is done here. * * Use of private 'static' data has been limited, most shared static data * should be found in the GlobalData structure pointed to by gdata * (see hprof.h). * */ /* The default output filenames. */ #define DEFAULT_TXT_SUFFIX ".txt" #define DEFAULT_OUTPUTFILE "java.hprof" #define DEFAULT_OUTPUTTEMP "java.hprof.temp" /* The only global variable, defined by this library */ GlobalData *gdata; /* Experimental options */ #define EXPERIMENT_NO_EARLY_HOOK 0x1 /* Default trace depth */ #define DEFAULT_TRACE_DEPTH 4 /* Default sample interval */ #define DEFAULT_SAMPLE_INTERVAL 10 /* Default cutoff */ #define DEFAULT_CUTOFF_POINT 0.0001 /* Stringize macros for help. */ #define _TO_STR(a) #a #define TO_STR(a) _TO_STR(a) /* Macros to surround callback code (non-VM_DEATH callbacks). * Note that this just keeps a count of the non-VM_DEATH callbacks that * are currently active, it does not prevent these callbacks from * operating in parallel. It's the VM_DEATH callback that will wait * for all these callbacks to either complete and block, or just block. * We need to hold back these threads so they don't die during the final * VM_DEATH processing. * If the VM_DEATH callback is active in the beginning, then this callback * just blocks to prevent further execution of the thread. * If the VM_DEATH callback is active at the end, then this callback * will notify the VM_DEATH callback if it's the last one. * In all cases, the last thing they do is Enter/Exit the monitor * gdata->callbackBlock, which will block this callback if VM_DEATH * is running. * * WARNING: No not 'return' or 'goto' out of the BEGIN_CALLBACK/END_CALLBACK * block, this will mess up the count. */ #define BEGIN_CALLBACK() \ { /* BEGIN OF CALLBACK */ \ jboolean bypass; \ rawMonitorEnter(gdata->callbackLock); \ if (gdata->vm_death_callback_active) { \ /* VM_DEATH is active, we will bypass the CALLBACK CODE */ \ bypass = JNI_TRUE; \ rawMonitorExit(gdata->callbackLock); \ /* Bypassed CALLBACKS block here until VM_DEATH done */ \ rawMonitorEnter(gdata->callbackBlock); \ rawMonitorExit(gdata->callbackBlock); \ } else { \ /* We will be executing the CALLBACK CODE in this case */ \ gdata->active_callbacks++; \ bypass = JNI_FALSE; \ rawMonitorExit(gdata->callbackLock); \ } \ if ( !bypass ) { \ /* BODY OF CALLBACK CODE (with no callback locks held) */ #define END_CALLBACK() /* Part of bypass if body */ \ rawMonitorEnter(gdata->callbackLock); \ gdata->active_callbacks--; \ /* If VM_DEATH is active, and last one, send notify. */ \ if (gdata->vm_death_callback_active) { \ if (gdata->active_callbacks == 0) { \ rawMonitorNotifyAll(gdata->callbackLock); \ } \ } \ rawMonitorExit(gdata->callbackLock); \ /* Non-Bypassed CALLBACKS block here until VM_DEATH done */ \ rawMonitorEnter(gdata->callbackBlock); \ rawMonitorExit(gdata->callbackBlock); \ } \ } /* END OF CALLBACK */ /* Forward declarations */ static void set_callbacks(jboolean on); /* ------------------------------------------------------------------- */ /* Global data initialization */ /* Get initialized global data area */ static GlobalData * get_gdata(void) { static GlobalData data; /* Create initial default values */ (void)memset(&data, 0, sizeof(GlobalData)); data.fd = -1; /* Non-zero file or socket. */ data.heap_fd = -1; /* For heap=dump, see hprof_io */ data.check_fd = -1; /* For heap=dump, see hprof_io */ data.max_trace_depth = DEFAULT_TRACE_DEPTH; data.prof_trace_depth = DEFAULT_TRACE_DEPTH; data.sample_interval = DEFAULT_SAMPLE_INTERVAL; data.lineno_in_traces = JNI_TRUE; data.output_format = 'a'; /* 'b' for binary */ data.cutoff_point = DEFAULT_CUTOFF_POINT; data.dump_on_exit = JNI_TRUE; data.gc_start_time = -1L; #ifdef DEBUG data.debug = JNI_TRUE; data.coredump = JNI_TRUE; #endif data.micro_state_accounting = JNI_FALSE; data.force_output = JNI_TRUE; data.verbose = JNI_TRUE; data.primfields = JNI_TRUE; data.primarrays = JNI_TRUE; data.table_serial_number_start = 1; data.class_serial_number_start = 100000; data.thread_serial_number_start = 200000; data.trace_serial_number_start = 300000; data.object_serial_number_start = 400000; data.frame_serial_number_start = 500000; data.gref_serial_number_start = 1; data.table_serial_number_counter = data.table_serial_number_start; data.class_serial_number_counter = data.class_serial_number_start; data.thread_serial_number_counter = data.thread_serial_number_start; data.trace_serial_number_counter = data.trace_serial_number_start; data.object_serial_number_counter = data.object_serial_number_start; data.frame_serial_number_counter = data.frame_serial_number_start; data.gref_serial_number_counter = data.gref_serial_number_start; data.unknown_thread_serial_num = data.thread_serial_number_counter++; return &data; } /* ------------------------------------------------------------------- */ /* Error handler callback for the java_crw_demo (classfile read write) functions. */ static void my_crw_fatal_error_handler(const char * msg, const char *file, int line) { char errmsg[256]; (void)md_snprintf(errmsg, sizeof(errmsg), "%s [%s:%d]", msg, file, line); errmsg[sizeof(errmsg)-1] = 0; HPROF_ERROR(JNI_TRUE, errmsg); } static void list_all_tables(void) { string_list(); class_list(); frame_list(); site_list(); object_list(); trace_list(); monitor_list(); tls_list(); loader_list(); } /* ------------------------------------------------------------------- */ /* Option Parsing support */ /** * Socket connection */ /* * Return a socket connect()ed to a "hostname" that is * accept()ing heap profile data on "port." Return a value <= 0 if * such a connection can't be made. */ static int connect_to_socket(char *hostname, unsigned short port) { int fd; if (port == 0 || port > 65535) { HPROF_ERROR(JNI_FALSE, "invalid port number"); return -1; } if (hostname == NULL) { HPROF_ERROR(JNI_FALSE, "hostname is NULL"); return -1; } /* create a socket */ fd = md_connect(hostname, port); return fd; } /* Accept a filename, and adjust the name so that it is unique for this PID */ static void make_unique_filename(char **filename) { int fd; /* Find a file that doesn't exist */ fd = md_open(*filename); if ( fd >= 0 ) { int pid; char *new_name; char *old_name; char *prefix; char suffix[5]; int new_len; /* Close the file. */ md_close(fd); /* Make filename name.PID[.txt] */ pid = md_getpid(); old_name = *filename; new_len = (int)strlen(old_name)+64; new_name = HPROF_MALLOC(new_len); prefix = old_name; suffix[0] = 0; /* Look for .txt suffix if not binary output */ if (gdata->output_format != 'b') { char *dot; char *format_suffix; format_suffix = DEFAULT_TXT_SUFFIX; (void)strcpy(suffix, format_suffix); dot = strrchr(old_name, '.'); if ( dot != NULL ) { int i; int slen; int match; slen = (int)strlen(format_suffix); match = 1; for ( i = 0; i < slen; i++ ) { if ( dot[i]==0 || tolower(format_suffix[i]) != tolower(dot[i]) ) { match = 0; break; } } if ( match ) { (void)strcpy(suffix, dot); *dot = 0; /* truncates prefix and old_name */ } } } /* Construct the name */ (void)md_snprintf(new_name, new_len, "%s.%d%s", prefix, pid, suffix); *filename = new_name; HPROF_FREE(old_name); /* Odds are with Windows, this file may not be so unique. */ (void)remove(gdata->output_filename); } } static int get_tok(char **src, char *buf, int buflen, int sep) { int len; char *p; buf[0] = 0; if ( **src == 0 ) { return 0; } p = strchr(*src, sep); if ( p==NULL ) { len = (int)strlen(*src); p = (*src) + len; } else { /*LINTED*/ len = (int)(p - (*src)); } if ( (len+1) > buflen ) { return 0; } (void)memcpy(buf, *src, len); buf[len] = 0; if ( *p != 0 && *p == sep ) { (*src) = p+1; } else { (*src) = p; } return len; } static jboolean setBinarySwitch(char **src, jboolean *ptr) { char buf[80]; if (!get_tok(src, buf, (int)sizeof(buf), ',')) { return JNI_FALSE; } if (strcmp(buf, "y") == 0) { *ptr = JNI_TRUE; } else if (strcmp(buf, "n") == 0) { *ptr = JNI_FALSE; } else { return JNI_FALSE; } return JNI_TRUE; } static void print_usage(void) { (void)fprintf(stdout, "\n" " HPROF: Heap and CPU Profiling Agent (JVMTI Demonstration Code)\n" "\n" AGENTNAME " usage: java " AGENTLIB "=[help]|[<option>=<value>, ...]\n" "\n" "Option Name and Value Description Default\n" "--------------------- ----------- -------\n" "heap=dump|sites|all heap profiling all\n" "cpu=samples|times|old CPU usage off\n" "monitor=y|n monitor contention n\n" "format=a|b text(txt) or binary output a\n" "file=<file> write data to file " DEFAULT_OUTPUTFILE "[{" DEFAULT_TXT_SUFFIX "}]\n" "net=<host>:<port> send data over a socket off\n" "depth=<size> stack trace depth " TO_STR(DEFAULT_TRACE_DEPTH) "\n" "interval=<ms> sample interval in ms " TO_STR(DEFAULT_SAMPLE_INTERVAL) "\n" "cutoff=<value> output cutoff point " TO_STR(DEFAULT_CUTOFF_POINT) "\n" "lineno=y|n line number in traces? y\n" "thread=y|n thread in traces? n\n" "doe=y|n dump on exit? y\n" "msa=y|n Solaris micro state accounting n\n" "force=y|n force output to <file> y\n" "verbose=y|n print messages about dumps y\n" "\n" "Obsolete Options\n" "----------------\n" "gc_okay=y|n\n" #ifdef DEBUG "\n" "DEBUG Option Description Default\n" "------------ ----------- -------\n" "primfields=y|n include primitive field values y\n" "primarrays=y|n include primitive array values y\n" "debugflags=MASK Various debug flags 0\n" " 0x01 Report refs in and of unprepared classes\n" "logflags=MASK Logging to stderr 0\n" " " TO_STR(LOG_DUMP_MISC) " Misc logging\n" " " TO_STR(LOG_DUMP_LISTS) " Dump out the tables\n" " " TO_STR(LOG_CHECK_BINARY) " Verify & dump format=b\n" "coredump=y|n Core dump on fatal n\n" "errorexit=y|n Exit on any error n\n" "pause=y|n Pause on onload & echo PID n\n" "debug=y|n Turn on all debug checking n\n" "X=MASK Internal use only 0\n" "\n" "Environment Variables\n" "---------------------\n" "_JAVA_HPROF_OPTIONS\n" " Options can be added externally via this environment variable.\n" " Anything contained in it will get a comma prepended to it (if needed),\n" " then it will be added to the end of the options supplied via the\n" " " XRUN " or " AGENTLIB " command line option.\n" #endif "\n" "Examples\n" "--------\n" " - Get sample cpu information every 20 millisec, with a stack depth of 3:\n" " java " AGENTLIB "=cpu=samples,interval=20,depth=3 classname\n" " - Get heap usage information based on the allocation sites:\n" " java " AGENTLIB "=heap=sites classname\n" #ifdef DEBUG " - Using the external option addition with csh, log details on all runs:\n" " setenv _JAVA_HPROF_OPTIONS \"logflags=0xC\"\n" " java " AGENTLIB "=cpu=samples classname\n" " is the same as:\n" " java " AGENTLIB "=cpu=samples,logflags=0xC classname\n" #endif "\n" "Notes\n" "-----\n" " - The option format=b cannot be used with monitor=y.\n" " - The option format=b cannot be used with cpu=old|times.\n" " - Use of the " XRUN " interface can still be used, e.g.\n" " java " XRUN ":[help]|[<option>=<value>, ...]\n" " will behave exactly the same as:\n" " java " AGENTLIB "=[help]|[<option>=<value>, ...]\n" #ifdef DEBUG " - The debug options and environment variables are available with both java\n" " and java_g versions.\n" #endif "\n" "Warnings\n" "--------\n" " - This is demonstration code for the JVMTI interface and use of BCI,\n" " it is not an official product or formal part of the JDK.\n" " - The " XRUN " interface will be removed in a future release.\n" " - The option format=b is considered experimental, this format may change\n" " in a future release.\n" #ifdef DEBUG " - The obsolete options may be completely removed in a future release.\n" " - The debug options and environment variables are not considered public\n" " interfaces and can change or be removed with any type of update of\n" " " AGENTNAME ", including patches.\n" #endif ); } static void option_error(char *description) { char errmsg[FILENAME_MAX+80]; (void)md_snprintf(errmsg, sizeof(errmsg), "%s option error: %s (%s)", AGENTNAME, description, gdata->options); errmsg[sizeof(errmsg)-1] = 0; HPROF_ERROR(JNI_FALSE, errmsg); error_exit_process(1); } static void parse_options(char *command_line_options) { int file_or_net_option_seen = JNI_FALSE; char *all_options; char *extra_options; char *options; char *default_filename; int ulen; if (command_line_options == 0) command_line_options = ""; if ((strcmp(command_line_options, "help")) == 0) { print_usage(); error_exit_process(0); } extra_options = getenv("_JAVA_HPROF_OPTIONS"); if ( extra_options == NULL ) { extra_options = ""; } all_options = HPROF_MALLOC((int)strlen(command_line_options) + (int)strlen(extra_options) + 2); gdata->options = all_options; (void)strcpy(all_options, command_line_options); if ( extra_options[0] != 0 ) { if ( all_options[0] != 0 ) { (void)strcat(all_options, ","); } (void)strcat(all_options, extra_options); } options = all_options; LOG2("parse_options()", all_options); while (*options) { char option[16]; char suboption[FILENAME_MAX+1]; char *endptr; if (!get_tok(&options, option, (int)sizeof(option), '=')) { option_error("general syntax error parsing options"); } if (strcmp(option, "file") == 0) { if ( file_or_net_option_seen ) { option_error("file or net options should only appear once"); } if (!get_tok(&options, suboption, (int)sizeof(suboption), ',')) { option_error("syntax error parsing file=filename"); } gdata->utf8_output_filename = HPROF_MALLOC((int)strlen(suboption)+1); (void)strcpy(gdata->utf8_output_filename, suboption); file_or_net_option_seen = JNI_TRUE; } else if (strcmp(option, "net") == 0) { char port_number[16]; if (file_or_net_option_seen ) { option_error("file or net options should only appear once"); } if (!get_tok(&options, suboption, (int)sizeof(suboption), ':')) { option_error("net option missing ':'"); } if (!get_tok(&options, port_number, (int)sizeof(port_number), ',')) { option_error("net option missing port"); } gdata->net_hostname = HPROF_MALLOC((int)strlen(suboption)+1); (void)strcpy(gdata->net_hostname, suboption); gdata->net_port = (int)strtol(port_number, NULL, 10); file_or_net_option_seen = JNI_TRUE; } else if (strcmp(option, "format") == 0) { if (!get_tok(&options, suboption, (int)sizeof(suboption), ',')) { option_error("syntax error parsing format=a|b"); } if (strcmp(suboption, "a") == 0) { gdata->output_format = 'a'; } else if (strcmp(suboption, "b") == 0) { gdata->output_format = 'b'; } else { option_error("format option value must be a|b"); } } else if (strcmp(option, "depth") == 0) { if (!get_tok(&options, suboption, (int)sizeof(suboption), ',')) { option_error("syntax error parsing depth=DECIMAL"); } gdata->max_trace_depth = (int)strtol(suboption, &endptr, 10); if ((endptr != NULL && *endptr != 0) || gdata->max_trace_depth < 0) { option_error("depth option value must be decimal and >= 0"); } gdata->prof_trace_depth = gdata->max_trace_depth; } else if (strcmp(option, "interval") == 0) { if (!get_tok(&options, suboption, (int)sizeof(suboption), ',')) { option_error("syntax error parsing interval=DECIMAL"); } gdata->sample_interval = (int)strtol(suboption, &endptr, 10); if ((endptr != NULL && *endptr != 0) || gdata->sample_interval <= 0) { option_error("interval option value must be decimal and > 0"); } } else if (strcmp(option, "cutoff") == 0) { if (!get_tok(&options, suboption, (int)sizeof(suboption), ',')) { option_error("syntax error parsing cutoff=DOUBLE"); } gdata->cutoff_point = strtod(suboption, &endptr); if ((endptr != NULL && *endptr != 0) || gdata->cutoff_point < 0) { option_error("cutoff option value must be floating point and >= 0"); } } else if (strcmp(option, "cpu") == 0) { if (!get_tok(&options, suboption, (int)sizeof(suboption), ',')) { option_error("syntax error parsing cpu=y|samples|times|old"); } if ((strcmp(suboption, "samples") == 0) || (strcmp(suboption, "y") == 0)) { gdata->cpu_sampling = JNI_TRUE; } else if (strcmp(suboption, "times") == 0) { gdata->cpu_timing = JNI_TRUE; gdata->old_timing_format = JNI_FALSE; } else if (strcmp(suboption, "old") == 0) { gdata->cpu_timing = JNI_TRUE; gdata->old_timing_format = JNI_TRUE; } else { option_error("cpu option value must be y|samples|times|old"); } } else if (strcmp(option, "heap") == 0) { if (!get_tok(&options, suboption, (int)sizeof(suboption), ',')) { option_error("syntax error parsing heap=dump|sites|all"); } if (strcmp(suboption, "dump") == 0) { gdata->heap_dump = JNI_TRUE; } else if (strcmp(suboption, "sites") == 0) { gdata->alloc_sites = JNI_TRUE; } else if (strcmp(suboption, "all") == 0) { gdata->heap_dump = JNI_TRUE; gdata->alloc_sites = JNI_TRUE; } else { option_error("heap option value must be dump|sites|all"); } } else if( strcmp(option,"lineno") == 0) { if ( !setBinarySwitch(&options, &(gdata->lineno_in_traces)) ) { option_error("lineno option value must be y|n"); } } else if( strcmp(option,"thread") == 0) { if ( !setBinarySwitch(&options, &(gdata->thread_in_traces)) ) { option_error("thread option value must be y|n"); } } else if( strcmp(option,"doe") == 0) { if ( !setBinarySwitch(&options, &(gdata->dump_on_exit)) ) { option_error("doe option value must be y|n"); } } else if( strcmp(option,"msa") == 0) { if ( !setBinarySwitch(&options, &(gdata->micro_state_accounting)) ) { option_error("msa option value must be y|n"); } } else if( strcmp(option,"force") == 0) { if ( !setBinarySwitch(&options, &(gdata->force_output)) ) { option_error("force option value must be y|n"); } } else if( strcmp(option,"verbose") == 0) { if ( !setBinarySwitch(&options, &(gdata->verbose)) ) { option_error("verbose option value must be y|n"); } } else if( strcmp(option,"primfields") == 0) { if ( !setBinarySwitch(&options, &(gdata->primfields)) ) { option_error("primfields option value must be y|n"); } } else if( strcmp(option,"primarrays") == 0) { if ( !setBinarySwitch(&options, &(gdata->primarrays)) ) { option_error("primarrays option value must be y|n"); } } else if( strcmp(option,"monitor") == 0) { if ( !setBinarySwitch(&options, &(gdata->monitor_tracing)) ) { option_error("monitor option value must be y|n"); } } else if( strcmp(option,"gc_okay") == 0) { if ( !setBinarySwitch(&options, &(gdata->gc_okay)) ) { option_error("gc_okay option value must be y|n"); } } else if (strcmp(option, "logflags") == 0) { if (!get_tok(&options, suboption, (int)sizeof(suboption), ',')) { option_error("logflags option value must be numeric"); } gdata->logflags = (int)strtol(suboption, NULL, 0); } else if (strcmp(option, "debugflags") == 0) { if (!get_tok(&options, suboption, (int)sizeof(suboption), ',')) { option_error("debugflags option value must be numeric"); } gdata->debugflags = (int)strtol(suboption, NULL, 0); } else if (strcmp(option, "coredump") == 0) { if ( !setBinarySwitch(&options, &(gdata->coredump)) ) { option_error("coredump option value must be y|n"); } } else if (strcmp(option, "exitpause") == 0) { option_error("The exitpause option was removed, use -XX:OnError='cmd %%p'"); } else if (strcmp(option, "errorexit") == 0) { if ( !setBinarySwitch(&options, &(gdata->errorexit)) ) { option_error("errorexit option value must be y|n"); } } else if (strcmp(option, "pause") == 0) { if ( !setBinarySwitch(&options, &(gdata->pause)) ) { option_error("pause option value must be y|n"); } } else if (strcmp(option, "debug") == 0) { if ( !setBinarySwitch(&options, &(gdata->debug)) ) { option_error("debug option value must be y|n"); } } else if (strcmp(option, "precrash") == 0) { option_error("The precrash option was removed, use -XX:OnError='precrash -p %%p'"); } else if (strcmp(option, "X") == 0) { if (!get_tok(&options, suboption, (int)sizeof(suboption), ',')) { option_error("X option value must be numeric"); } gdata->experiment = (int)strtol(suboption, NULL, 0); } else { char errmsg[80]; (void)strcpy(errmsg, "Unknown option: "); (void)strcat(errmsg, option); option_error(errmsg); } } if (gdata->output_format == 'b') { if (gdata->cpu_timing) { option_error("cpu=times|old is not supported with format=b"); } if (gdata->monitor_tracing) { option_error("monitor=y is not supported with format=b"); } } if (gdata->old_timing_format) { gdata->prof_trace_depth = 2; } if (gdata->output_format == 'b') { default_filename = DEFAULT_OUTPUTFILE; } else { default_filename = DEFAULT_OUTPUTFILE DEFAULT_TXT_SUFFIX; } if (!file_or_net_option_seen) { gdata->utf8_output_filename = HPROF_MALLOC((int)strlen(default_filename)+1); (void)strcpy(gdata->utf8_output_filename, default_filename); } if ( gdata->utf8_output_filename != NULL ) { /* UTF-8 to platform encoding (fill in gdata->output_filename) */ ulen = (int)strlen(gdata->utf8_output_filename); gdata->output_filename = (char*)HPROF_MALLOC(ulen*3+3); #ifdef SKIP_NPT (void)strcpy(gdata->output_filename, gdata->utf8_output_filename); #else (void)(gdata->npt->utf8ToPlatform) (gdata->npt->utf, (jbyte*)gdata->utf8_output_filename, ulen, gdata->output_filename, ulen*3+3); #endif } /* By default we turn on gdata->alloc_sites and gdata->heap_dump */ if ( !gdata->cpu_timing && !gdata->cpu_sampling && !gdata->monitor_tracing && !gdata->alloc_sites && !gdata->heap_dump) { gdata->heap_dump = JNI_TRUE; gdata->alloc_sites = JNI_TRUE; } if ( gdata->alloc_sites || gdata->heap_dump ) { gdata->obj_watch = JNI_TRUE; } if ( gdata->obj_watch || gdata->cpu_timing ) { gdata->bci = JNI_TRUE; } /* Create files & sockets needed */ if (gdata->heap_dump) { char *base; int len; /* Get a fast tempfile for the heap information */ base = gdata->output_filename; if ( base==NULL ) { base = default_filename; } len = (int)strlen(base); gdata->heapfilename = HPROF_MALLOC(len + 5); (void)strcpy(gdata->heapfilename, base); (void)strcat(gdata->heapfilename, ".TMP"); make_unique_filename(&(gdata->heapfilename)); (void)remove(gdata->heapfilename); if (gdata->output_format == 'b') { if ( gdata->logflags & LOG_CHECK_BINARY ) { char * check_suffix; check_suffix = ".check" DEFAULT_TXT_SUFFIX; gdata->checkfilename = HPROF_MALLOC((int)strlen(default_filename)+ (int)strlen(check_suffix)+1); (void)strcpy(gdata->checkfilename, default_filename); (void)strcat(gdata->checkfilename, check_suffix); (void)remove(gdata->checkfilename); gdata->check_fd = md_creat(gdata->checkfilename); } if ( gdata->debug ) { gdata->logflags |= LOG_CHECK_BINARY; } gdata->heap_fd = md_creat_binary(gdata->heapfilename); } else { gdata->heap_fd = md_creat(gdata->heapfilename); } if ( gdata->heap_fd < 0 ) { char errmsg[FILENAME_MAX+80]; (void)md_snprintf(errmsg, sizeof(errmsg), "can't create temp heap file: %s", gdata->heapfilename); errmsg[sizeof(errmsg)-1] = 0; HPROF_ERROR(JNI_TRUE, errmsg); } } if ( gdata->net_port > 0 ) { LOG2("Agent_OnLoad", "Connecting to socket"); gdata->fd = connect_to_socket(gdata->net_hostname, (unsigned short)gdata->net_port); if (gdata->fd <= 0) { char errmsg[120]; (void)md_snprintf(errmsg, sizeof(errmsg), "can't connect to %s:%u", gdata->net_hostname, gdata->net_port); errmsg[sizeof(errmsg)-1] = 0; HPROF_ERROR(JNI_FALSE, errmsg); error_exit_process(1); } gdata->socket = JNI_TRUE; } else { /* If going out to a file, obey the force=y|n option */ if ( !gdata->force_output ) { make_unique_filename(&(gdata->output_filename)); } /* Make doubly sure this file does NOT exist */ (void)remove(gdata->output_filename); /* Create the file */ if (gdata->output_format == 'b') { gdata->fd = md_creat_binary(gdata->output_filename); } else { gdata->fd = md_creat(gdata->output_filename); } if (gdata->fd < 0) { char errmsg[FILENAME_MAX+80]; (void)md_snprintf(errmsg, sizeof(errmsg), "can't create profile file: %s", gdata->output_filename); errmsg[sizeof(errmsg)-1] = 0; HPROF_ERROR(JNI_FALSE, errmsg); error_exit_process(1); } } } /* ------------------------------------------------------------------- */ /* Data reset and dump functions */ static void reset_all_data(void) { if (gdata->cpu_sampling || gdata->cpu_timing || gdata->monitor_tracing) { rawMonitorEnter(gdata->data_access_lock); } if (gdata->cpu_sampling || gdata->cpu_timing) { trace_clear_cost(); } if (gdata->monitor_tracing) { monitor_clear(); } if (gdata->cpu_sampling || gdata->cpu_timing || gdata->monitor_tracing) { rawMonitorExit(gdata->data_access_lock); } } static void reset_class_load_status(JNIEnv *env, jthread thread); static void dump_all_data(JNIEnv *env) { verbose_message("Dumping"); if (gdata->monitor_tracing) { verbose_message(" contended monitor usage ..."); tls_dump_monitor_state(env); monitor_write_contended_time(env, gdata->cutoff_point); } if (gdata->heap_dump) { verbose_message(" Java heap ..."); /* Update the class table */ reset_class_load_status(env, NULL); site_heapdump(env); } if (gdata->alloc_sites) { verbose_message(" allocation sites ..."); site_write(env, 0, gdata->cutoff_point); } if (gdata->cpu_sampling) { verbose_message(" CPU usage by sampling running threads ..."); trace_output_cost(env, gdata->cutoff_point); } if (gdata->cpu_timing) { if (!gdata->old_timing_format) { verbose_message(" CPU usage by timing methods ..."); trace_output_cost(env, gdata->cutoff_point); } else { verbose_message(" CPU usage in old prof format ..."); trace_output_cost_in_prof_format(env); } } reset_all_data(); io_flush(); verbose_message(" done.\n"); } /* ------------------------------------------------------------------- */ /* Dealing with class load and unload status */ static void reset_class_load_status(JNIEnv *env, jthread thread) { WITH_LOCAL_REFS(env, 1) { jint class_count; jclass *classes; jint i; /* Get all classes from JVMTI, make sure they are in the class table. */ getLoadedClasses(&classes, &class_count); /* We don't know if the class list has changed really, so we * guess by the class count changing. Don't want to do * a bunch of work on classes when it's unnecessary. * I assume that even though we have global references on the * jclass object that the class is still considered unloaded. * (e.g. GC of jclass isn't required for it to be included * in the unloaded list, or not in the load list) * [Note: Use of Weak references was a performance problem.] */ if ( class_count != gdata->class_count ) { rawMonitorEnter(gdata->data_access_lock); { /* Unmark the classes in the load list */ class_all_status_remove(CLASS_IN_LOAD_LIST); /* Pretend like it was a class load event */ for ( i = 0 ; i < class_count ; i++ ) { jobject loader; loader = getClassLoader(classes[i]); event_class_load(env, thread, classes[i], loader); } /* Process the classes that have been unloaded */ class_do_unloads(env); } rawMonitorExit(gdata->data_access_lock); } /* Free the space and save the count. */ jvmtiDeallocate(classes); gdata->class_count = class_count; } END_WITH_LOCAL_REFS; } /* A GC or Death event has happened, so do some cleanup */ static void object_free_cleanup(JNIEnv *env, jboolean force_class_table_reset) { Stack *stack; /* Then we process the ObjectFreeStack */ rawMonitorEnter(gdata->object_free_lock); { stack = gdata->object_free_stack; gdata->object_free_stack = NULL; /* Will trigger new stack */ } rawMonitorExit(gdata->object_free_lock); /* Notice we just grabbed the stack of freed objects so * any object free events will create a new stack. */ if ( stack != NULL ) { int count; int i; count = stack_depth(stack); /* If we saw something freed in this GC */ if ( count > 0 ) { for ( i = 0 ; i < count ; i++ ) { ObjectIndex object_index; jlong tag; tag = *(jlong*)stack_element(stack,i); object_index = tag_extract(tag); (void)object_free(object_index); } /* We reset the class load status (only do this once) */ reset_class_load_status(env, NULL); force_class_table_reset = JNI_FALSE; } /* Just terminate this stack object */ stack_term(stack); } /* We reset the class load status if we haven't and need to */ if ( force_class_table_reset ) { reset_class_load_status(env, NULL); } } /* Main function for thread that watches for GC finish events */ static void JNICALL gc_finish_watcher(jvmtiEnv *jvmti, JNIEnv *env, void *p) { jboolean active; active = JNI_TRUE; /* Indicate the watcher thread is active */ rawMonitorEnter(gdata->gc_finish_lock); { gdata->gc_finish_active = JNI_TRUE; } rawMonitorExit(gdata->gc_finish_lock); /* Loop while active */ while ( active ) { jboolean do_cleanup; do_cleanup = JNI_FALSE; rawMonitorEnter(gdata->gc_finish_lock); { /* Don't wait if VM_DEATH wants us to quit */ if ( gdata->gc_finish_stop_request ) { /* Time to terminate */ active = JNI_FALSE; } else { /* Wait for notification to do cleanup, or terminate */ rawMonitorWait(gdata->gc_finish_lock, 0); /* After wait, check to see if VM_DEATH wants us to quit */ if ( gdata->gc_finish_stop_request ) { /* Time to terminate */ active = JNI_FALSE; } } if ( active && gdata->gc_finish > 0 ) { /* Time to cleanup, reset count and prepare for cleanup */ gdata->gc_finish = 0; do_cleanup = JNI_TRUE; } } rawMonitorExit(gdata->gc_finish_lock); /* Do the cleanup if requested outside gc_finish_lock */ if ( do_cleanup ) { /* Free up all freed objects, don't force class table reset * We cannot let the VM_DEATH complete while we are doing * this cleanup. So if during this, VM_DEATH happens, * the VM_DEATH callback should block waiting for this * loop to terminate, and send a notification to the * VM_DEATH thread. */ object_free_cleanup(env, JNI_FALSE); /* Cleanup the tls table where the Thread objects were GC'd */ tls_garbage_collect(env); } } /* Falling out means VM_DEATH is happening, we need to notify VM_DEATH * that we are done doing the cleanup. VM_DEATH is waiting on this * notify. */ rawMonitorEnter(gdata->gc_finish_lock); { gdata->gc_finish_active = JNI_FALSE; rawMonitorNotifyAll(gdata->gc_finish_lock); } rawMonitorExit(gdata->gc_finish_lock); } /* ------------------------------------------------------------------- */ /* JVMTI Event callback functions */ static void setup_event_mode(jboolean onload_set_only, jvmtiEventMode state) { if ( onload_set_only ) { setEventNotificationMode(state, JVMTI_EVENT_VM_INIT, NULL); setEventNotificationMode(state, JVMTI_EVENT_VM_DEATH, NULL); if (gdata->bci) { setEventNotificationMode(state, JVMTI_EVENT_CLASS_FILE_LOAD_HOOK, NULL); } } else { /* Enable all other JVMTI events of interest now. */ setEventNotificationMode(state, JVMTI_EVENT_THREAD_START, NULL); setEventNotificationMode(state, JVMTI_EVENT_THREAD_END, NULL); setEventNotificationMode(state, JVMTI_EVENT_CLASS_LOAD, NULL); setEventNotificationMode(state, JVMTI_EVENT_CLASS_PREPARE, NULL); setEventNotificationMode(state, JVMTI_EVENT_DATA_DUMP_REQUEST, NULL); if (gdata->cpu_timing) { setEventNotificationMode(state, JVMTI_EVENT_EXCEPTION_CATCH, NULL); } if (gdata->monitor_tracing) { setEventNotificationMode(state, JVMTI_EVENT_MONITOR_WAIT, NULL); setEventNotificationMode(state, JVMTI_EVENT_MONITOR_WAITED, NULL); setEventNotificationMode(state, JVMTI_EVENT_MONITOR_CONTENDED_ENTER, NULL); setEventNotificationMode(state, JVMTI_EVENT_MONITOR_CONTENDED_ENTERED, NULL); } if (gdata->obj_watch) { setEventNotificationMode(state, JVMTI_EVENT_OBJECT_FREE, NULL); } setEventNotificationMode(state, JVMTI_EVENT_GARBAGE_COLLECTION_START, NULL); setEventNotificationMode(state, JVMTI_EVENT_GARBAGE_COLLECTION_FINISH, NULL); } } /* JVMTI_EVENT_VM_INIT */ static void JNICALL cbVMInit(jvmtiEnv *jvmti, JNIEnv *env, jthread thread) { rawMonitorEnter(gdata->data_access_lock); { LoaderIndex loader_index; ClassIndex cnum; TlsIndex tls_index; gdata->jvm_initializing = JNI_TRUE; /* Header to use in heap dumps */ gdata->header = "JAVA PROFILE 1.0.1"; gdata->segmented = JNI_FALSE; if (gdata->output_format == 'b') { /* We need JNI here to call in and get the current maximum memory */ gdata->maxMemory = getMaxMemory(env); gdata->maxHeapSegment = (jlong)2000000000; /* More than 2Gig triggers segments and 1.0.2 */ if ( gdata->maxMemory >= gdata->maxHeapSegment ) { gdata->header = "JAVA PROFILE 1.0.2"; gdata->segmented = JNI_TRUE; /* 1.0.2 */ } } /* We write the initial header after the VM initializes now * because we needed to use JNI to get maxMemory and determine if * a 1.0.1 or a 1.0.2 header will be used. * This used to be done in Agent_OnLoad. */ io_write_file_header(); LOG("cbVMInit begin"); /* Create a system loader entry first */ loader_index = loader_find_or_create(NULL,NULL); /* Find the thread jclass (does JNI calls) */ gdata->thread_cnum = class_find_or_create("Ljava/lang/Thread;", loader_index); class_add_status(gdata->thread_cnum, CLASS_SYSTEM); /* Issue fake system thread start */ tls_index = tls_find_or_create(env, thread); /* Setup the Tracker class (should be first class in table) */ tracker_setup_class(); /* Find selected system classes to keep track of */ gdata->system_class_size = 0; cnum = class_find_or_create("Ljava/lang/Object;", loader_index); gdata->system_trace_index = tls_get_trace(tls_index, env, gdata->max_trace_depth, JNI_FALSE); gdata->system_object_site_index = site_find_or_create( cnum, gdata->system_trace_index); /* Used to ID HPROF generated items */ gdata->hprof_trace_index = tls_get_trace(tls_index, env, gdata->max_trace_depth, JNI_FALSE); gdata->hprof_site_index = site_find_or_create( cnum, gdata->hprof_trace_index); if ( gdata->logflags & LOG_DUMP_LISTS ) { list_all_tables(); } /* Prime the class table */ reset_class_load_status(env, thread); /* Find the tracker jclass and jmethodID's (does JNI calls) */ if ( gdata->bci ) { tracker_setup_methods(env); } /* Start any agent threads (does JNI, JVMTI, and Java calls) */ /* Thread to watch for gc_finish events */ rawMonitorEnter(gdata->gc_finish_lock); { createAgentThread(env, "HPROF gc_finish watcher", &gc_finish_watcher); } rawMonitorExit(gdata->gc_finish_lock); /* Start up listener thread if we need it */ if ( gdata->socket ) { listener_init(env); } /* Start up cpu sampling thread if we need it */ if ( gdata->cpu_sampling ) { /* Note: this could also get started later (see cpu) */ cpu_sample_init(env); } /* Setup event modes */ setup_event_mode(JNI_FALSE, JVMTI_ENABLE); /* Engage tracking (sets Java Tracker field so injections call into * agent library). */ if ( gdata->bci ) { tracker_engage(env); } /* Indicate the VM is initialized now */ gdata->jvm_initialized = JNI_TRUE; gdata->jvm_initializing = JNI_FALSE; LOG("cbVMInit end"); } rawMonitorExit(gdata->data_access_lock); } /* JVMTI_EVENT_VM_DEATH */ static void JNICALL cbVMDeath(jvmtiEnv *jvmti, JNIEnv *env) { /* * Use local flag to minimize gdata->dump_lock hold time. */ jboolean need_to_dump = JNI_FALSE; LOG("cbVMDeath"); /* Shutdown thread watching gc_finish, outside CALLBACK locks. * We need to make sure the watcher thread is done doing any cleanup * work before we continue here. */ rawMonitorEnter(gdata->gc_finish_lock); { /* Notify watcher thread to finish up, it will send * another notify when done. If the watcher thread is busy * cleaning up, it will detect gc_finish_stop_request when it's done. * Then it sets gc_finish_active to JNI_FALSE and will notify us. * If the watcher thread is waiting to be notified, then the * notification wakes it up. * We do not want to do the VM_DEATH while the gc_finish * watcher thread is in the middle of a cleanup. */ gdata->gc_finish_stop_request = JNI_TRUE; rawMonitorNotifyAll(gdata->gc_finish_lock); /* Wait for the gc_finish watcher thread to notify us it's done */ while ( gdata->gc_finish_active ) { rawMonitorWait(gdata->gc_finish_lock,0); } } rawMonitorExit(gdata->gc_finish_lock); /* The gc_finish watcher thread should be done now, or done shortly. */ /* BEGIN_CALLBACK/END_CALLBACK handling. */ /* The callbackBlock prevents any active callbacks from returning * back to the VM, and also blocks all new callbacks. * We want to prevent any threads from premature death, so * that we don't have worry about that during thread queries * in this final dump process. */ rawMonitorEnter(gdata->callbackBlock); { /* We need to wait for all callbacks actively executing to block * on exit, and new ones will block on entry. * The BEGIN_CALLBACK/END_CALLBACK macros keep track of callbacks * that are active. * Once the last active callback is done, it will notify this * thread and block. */ rawMonitorEnter(gdata->callbackLock); { /* Turn off native calls */ if ( gdata->bci ) { tracker_disengage(env); } gdata->vm_death_callback_active = JNI_TRUE; while (gdata->active_callbacks > 0) { rawMonitorWait(gdata->callbackLock, 0); } } rawMonitorExit(gdata->callbackLock); /* Now we know that no threads will die on us, being blocked * on some event callback, at a minimum ThreadEnd. */ /* Make some basic checks. */ rawMonitorEnter(gdata->data_access_lock); { if ( gdata->jvm_initializing ) { HPROF_ERROR(JNI_TRUE, "VM Death during VM Init"); return; } if ( !gdata->jvm_initialized ) { HPROF_ERROR(JNI_TRUE, "VM Death before VM Init"); return; } if (gdata->jvm_shut_down) { HPROF_ERROR(JNI_TRUE, "VM Death more than once?"); return; } } rawMonitorExit(gdata->data_access_lock); /* Shutdown the cpu loop thread */ if ( gdata->cpu_sampling ) { cpu_sample_term(env); } /* Time to dump the final data */ rawMonitorEnter(gdata->dump_lock); { gdata->jvm_shut_down = JNI_TRUE; if (!gdata->dump_in_process) { need_to_dump = JNI_TRUE; gdata->dump_in_process = JNI_TRUE; /* * Setting gdata->dump_in_process will cause cpu sampling to pause * (if we are sampling). We don't resume sampling after the * dump_all_data() call below because the VM is shutting * down. */ } } rawMonitorExit(gdata->dump_lock); /* Dump everything if we need to */ if (gdata->dump_on_exit && need_to_dump) { dump_all_data(env); } /* Disable all events and callbacks now, all of them. * NOTE: It's important that this be done after the dump * it prevents other threads from messing up the data * because they will block on ThreadStart and ThreadEnd * events due to the CALLBACK block. */ set_callbacks(JNI_FALSE); setup_event_mode(JNI_FALSE, JVMTI_DISABLE); setup_event_mode(JNI_TRUE, JVMTI_DISABLE); /* Write tail of file */ io_write_file_footer(); } rawMonitorExit(gdata->callbackBlock); /* Shutdown the listener thread and socket, or flush I/O buffers */ if (gdata->socket) { listener_term(env); } else { io_flush(); } /* Close the file descriptors down */ if ( gdata->fd >= 0 ) { (void)md_close(gdata->fd); gdata->fd = -1; if ( gdata->logflags & LOG_CHECK_BINARY ) { if (gdata->output_format == 'b' && gdata->output_filename != NULL) { check_binary_file(gdata->output_filename); } } } if ( gdata->heap_fd >= 0 ) { (void)md_close(gdata->heap_fd); gdata->heap_fd = -1; } if ( gdata->check_fd >= 0 ) { (void)md_close(gdata->check_fd); gdata->check_fd = -1; } /* Remove the temporary heap file */ if (gdata->heap_dump) { (void)remove(gdata->heapfilename); } /* If logging, dump the tables */ if ( gdata->logflags & LOG_DUMP_LISTS ) { list_all_tables(); } /* Make sure all global references are deleted */ class_delete_global_references(env); loader_delete_global_references(env); tls_delete_global_references(env); } /* JVMTI_EVENT_THREAD_START */ static void JNICALL cbThreadStart(jvmtiEnv *jvmti, JNIEnv *env, jthread thread) { LOG3("cbThreadStart", "thread is", (int)(long)(ptrdiff_t)thread); BEGIN_CALLBACK() { event_thread_start(env, thread); } END_CALLBACK(); } /* JVMTI_EVENT_THREAD_END */ static void JNICALL cbThreadEnd(jvmtiEnv *jvmti, JNIEnv *env, jthread thread) { LOG3("cbThreadEnd", "thread is", (int)(long)(ptrdiff_t)thread); BEGIN_CALLBACK() { event_thread_end(env, thread); } END_CALLBACK(); } /* JVMTI_EVENT_CLASS_FILE_LOAD_HOOK */ static void JNICALL cbClassFileLoadHook(jvmtiEnv *jvmti_env, JNIEnv* env, jclass class_being_redefined, jobject loader, const char* name, jobject protection_domain, jint class_data_len, const unsigned char* class_data, jint* new_class_data_len, unsigned char** new_class_data) { /* WARNING: This will be called before VM_INIT. */ LOG2("cbClassFileLoadHook:",(name==NULL?"Unknown":name)); if (!gdata->bci) { return; } BEGIN_CALLBACK() { rawMonitorEnter(gdata->data_access_lock); { const char *classname; if ( gdata->bci_counter == 0 ) { /* Prime the system classes */ class_prime_system_classes(); } gdata->bci_counter++; *new_class_data_len = 0; *new_class_data = NULL; /* Name could be NULL */ if ( name == NULL ) { classname = ((JavaCrwDemoClassname) (gdata->java_crw_demo_classname_function)) (class_data, class_data_len, &my_crw_fatal_error_handler); if ( classname == NULL ) { HPROF_ERROR(JNI_TRUE, "No classname in classfile"); } } else { classname = strdup(name); if ( classname == NULL ) { HPROF_ERROR(JNI_TRUE, "Ran out of malloc() space"); } } /* The tracker class itself? */ if ( strcmp(classname, TRACKER_CLASS_NAME) != 0 ) { ClassIndex cnum; int system_class; unsigned char * new_image; long new_length; int len; char *signature; LoaderIndex loader_index; LOG2("cbClassFileLoadHook injecting class" , classname); /* Define a unique class number for this class */ len = (int)strlen(classname); signature = HPROF_MALLOC(len+3); signature[0] = JVM_SIGNATURE_CLASS; (void)memcpy(signature+1, classname, len); signature[len+1] = JVM_SIGNATURE_ENDCLASS; signature[len+2] = 0; loader_index = loader_find_or_create(env,loader); if ( class_being_redefined != NULL ) { cnum = class_find_or_create(signature, loader_index); } else { cnum = class_create(signature, loader_index); } HPROF_FREE(signature); signature = NULL; /* Make sure class doesn't get unloaded by accident */ class_add_status(cnum, CLASS_IN_LOAD_LIST); /* Is it a system class? */ system_class = 0; if ( (!gdata->jvm_initialized) && (!gdata->jvm_initializing) && ( ( class_get_status(cnum) & CLASS_SYSTEM) != 0 || gdata->bci_counter < 8 ) ) { system_class = 1; LOG2(classname, " is a system class"); } new_image = NULL; new_length = 0; /* Call the class file reader/write demo code */ ((JavaCrwDemo)(gdata->java_crw_demo_function))( cnum, classname, class_data, class_data_len, system_class, TRACKER_CLASS_NAME, TRACKER_CLASS_SIG, (gdata->cpu_timing)?TRACKER_CALL_NAME:NULL, (gdata->cpu_timing)?TRACKER_CALL_SIG:NULL, (gdata->cpu_timing)?TRACKER_RETURN_NAME:NULL, (gdata->cpu_timing)?TRACKER_RETURN_SIG:NULL, (gdata->obj_watch)?TRACKER_OBJECT_INIT_NAME:NULL, (gdata->obj_watch)?TRACKER_OBJECT_INIT_SIG:NULL, (gdata->obj_watch)?TRACKER_NEWARRAY_NAME:NULL, (gdata->obj_watch)?TRACKER_NEWARRAY_SIG:NULL, &new_image, &new_length, &my_crw_fatal_error_handler, &class_set_methods); if ( new_length > 0 ) { unsigned char *jvmti_space; LOG2("cbClassFileLoadHook DID inject this class", classname); jvmti_space = (unsigned char *)jvmtiAllocate((jint)new_length); (void)memcpy((void*)jvmti_space, (void*)new_image, (int)new_length); *new_class_data_len = (jint)new_length; *new_class_data = jvmti_space; /* VM will deallocate */ } else { LOG2("cbClassFileLoadHook DID NOT inject this class", classname); *new_class_data_len = 0; *new_class_data = NULL; } if ( new_image != NULL ) { (void)free((void*)new_image); /* Free malloc() space with free() */ } } (void)free((void*)classname); } rawMonitorExit(gdata->data_access_lock); } END_CALLBACK(); } /* JVMTI_EVENT_CLASS_LOAD */ static void JNICALL cbClassLoad(jvmtiEnv *jvmti, JNIEnv *env, jthread thread, jclass klass) { /* WARNING: This MAY be called before VM_INIT. */ LOG("cbClassLoad"); BEGIN_CALLBACK() { rawMonitorEnter(gdata->data_access_lock); { WITH_LOCAL_REFS(env, 1) { jobject loader; loader = getClassLoader(klass); event_class_load(env, thread, klass, loader); } END_WITH_LOCAL_REFS; } rawMonitorExit(gdata->data_access_lock); } END_CALLBACK(); } /* JVMTI_EVENT_CLASS_PREPARE */ static void JNICALL cbClassPrepare(jvmtiEnv *jvmti, JNIEnv *env, jthread thread, jclass klass) { /* WARNING: This will be called before VM_INIT. */ LOG("cbClassPrepare"); BEGIN_CALLBACK() { rawMonitorEnter(gdata->data_access_lock); { WITH_LOCAL_REFS(env, 1) { jobject loader; loader = NULL; loader = getClassLoader(klass); event_class_prepare(env, thread, klass, loader); } END_WITH_LOCAL_REFS; } rawMonitorExit(gdata->data_access_lock); } END_CALLBACK(); } /* JVMTI_EVENT_DATA_DUMP_REQUEST */ static void JNICALL cbDataDumpRequest(jvmtiEnv *jvmti) { jboolean need_to_dump; LOG("cbDataDumpRequest"); BEGIN_CALLBACK() { need_to_dump = JNI_FALSE; rawMonitorEnter(gdata->dump_lock); { if (!gdata->dump_in_process) { need_to_dump = JNI_TRUE; gdata->dump_in_process = JNI_TRUE; } } rawMonitorExit(gdata->dump_lock); if (need_to_dump) { dump_all_data(getEnv()); rawMonitorEnter(gdata->dump_lock); { gdata->dump_in_process = JNI_FALSE; } rawMonitorExit(gdata->dump_lock); if (gdata->cpu_sampling && !gdata->jvm_shut_down) { cpu_sample_on(NULL, 0); /* resume sampling */ } } } END_CALLBACK(); } /* JVMTI_EVENT_EXCEPTION_CATCH */ static void JNICALL cbExceptionCatch(jvmtiEnv *jvmti, JNIEnv* env, jthread thread, jmethodID method, jlocation location, jobject exception) { LOG("cbExceptionCatch"); BEGIN_CALLBACK() { event_exception_catch(env, thread, method, location, exception); } END_CALLBACK(); } /* JVMTI_EVENT_MONITOR_WAIT */ static void JNICALL cbMonitorWait(jvmtiEnv *jvmti, JNIEnv* env, jthread thread, jobject object, jlong timeout) { LOG("cbMonitorWait"); BEGIN_CALLBACK() { monitor_wait_event(env, thread, object, timeout); } END_CALLBACK(); } /* JVMTI_EVENT_MONITOR_WAITED */ static void JNICALL cbMonitorWaited(jvmtiEnv *jvmti, JNIEnv* env, jthread thread, jobject object, jboolean timed_out) { LOG("cbMonitorWaited"); BEGIN_CALLBACK() { monitor_waited_event(env, thread, object, timed_out); } END_CALLBACK(); } /* JVMTI_EVENT_MONITOR_CONTENDED_ENTER */ static void JNICALL cbMonitorContendedEnter(jvmtiEnv *jvmti, JNIEnv* env, jthread thread, jobject object) { LOG("cbMonitorContendedEnter"); BEGIN_CALLBACK() { monitor_contended_enter_event(env, thread, object); } END_CALLBACK(); } /* JVMTI_EVENT_MONITOR_CONTENDED_ENTERED */ static void JNICALL cbMonitorContendedEntered(jvmtiEnv *jvmti, JNIEnv* env, jthread thread, jobject object) { LOG("cbMonitorContendedEntered"); BEGIN_CALLBACK() { monitor_contended_entered_event(env, thread, object); } END_CALLBACK(); } /* JVMTI_EVENT_GARBAGE_COLLECTION_START */ static void JNICALL cbGarbageCollectionStart(jvmtiEnv *jvmti) { LOG("cbGarbageCollectionStart"); /* Only calls to Allocate, Deallocate, RawMonitorEnter & RawMonitorExit * are allowed here (see the JVMTI Spec). */ gdata->gc_start_time = md_get_timemillis(); } /* JVMTI_EVENT_GARBAGE_COLLECTION_FINISH */ static void JNICALL cbGarbageCollectionFinish(jvmtiEnv *jvmti) { LOG("cbGarbageCollectionFinish"); /* Only calls to Allocate, Deallocate, RawMonitorEnter & RawMonitorExit * are allowed here (see the JVMTI Spec). */ if ( gdata->gc_start_time != -1L ) { gdata->time_in_gc += (md_get_timemillis() - gdata->gc_start_time); gdata->gc_start_time = -1L; } /* Increment gc_finish counter, notify watcher thread */ rawMonitorEnter(gdata->gc_finish_lock); { /* If VM_DEATH is trying to shut it down, don't do anything at all. * Never send notify if VM_DEATH wants the watcher thread to quit. */ if ( gdata->gc_finish_active ) { gdata->gc_finish++; rawMonitorNotifyAll(gdata->gc_finish_lock); } } rawMonitorExit(gdata->gc_finish_lock); } /* JVMTI_EVENT_OBJECT_FREE */ static void JNICALL cbObjectFree(jvmtiEnv *jvmti, jlong tag) { LOG3("cbObjectFree", "tag", (int)tag); /* Only calls to Allocate, Deallocate, RawMonitorEnter & RawMonitorExit * are allowed here (see the JVMTI Spec). */ HPROF_ASSERT(tag!=(jlong)0); rawMonitorEnter(gdata->object_free_lock); { if ( !gdata->jvm_shut_down ) { Stack *stack; stack = gdata->object_free_stack; if ( stack == NULL ) { gdata->object_free_stack = stack_init(512, 512, sizeof(jlong)); stack = gdata->object_free_stack; } stack_push(stack, (void*)&tag); } } rawMonitorExit(gdata->object_free_lock); } static void set_callbacks(jboolean on) { jvmtiEventCallbacks callbacks; (void)memset(&callbacks,0,sizeof(callbacks)); if ( ! on ) { setEventCallbacks(&callbacks); return; } /* JVMTI_EVENT_VM_INIT */ callbacks.VMInit = &cbVMInit; /* JVMTI_EVENT_VM_DEATH */ callbacks.VMDeath = &cbVMDeath; /* JVMTI_EVENT_THREAD_START */ callbacks.ThreadStart = &cbThreadStart; /* JVMTI_EVENT_THREAD_END */ callbacks.ThreadEnd = &cbThreadEnd; /* JVMTI_EVENT_CLASS_FILE_LOAD_HOOK */ callbacks.ClassFileLoadHook = &cbClassFileLoadHook; /* JVMTI_EVENT_CLASS_LOAD */ callbacks.ClassLoad = &cbClassLoad; /* JVMTI_EVENT_CLASS_PREPARE */ callbacks.ClassPrepare = &cbClassPrepare; /* JVMTI_EVENT_DATA_DUMP_REQUEST */ callbacks.DataDumpRequest = &cbDataDumpRequest; /* JVMTI_EVENT_EXCEPTION_CATCH */ callbacks.ExceptionCatch = &cbExceptionCatch; /* JVMTI_EVENT_MONITOR_WAIT */ callbacks.MonitorWait = &cbMonitorWait; /* JVMTI_EVENT_MONITOR_WAITED */ callbacks.MonitorWaited = &cbMonitorWaited; /* JVMTI_EVENT_MONITOR_CONTENDED_ENTER */ callbacks.MonitorContendedEnter = &cbMonitorContendedEnter; /* JVMTI_EVENT_MONITOR_CONTENDED_ENTERED */ callbacks.MonitorContendedEntered = &cbMonitorContendedEntered; /* JVMTI_EVENT_GARBAGE_COLLECTION_START */ callbacks.GarbageCollectionStart = &cbGarbageCollectionStart; /* JVMTI_EVENT_GARBAGE_COLLECTION_FINISH */ callbacks.GarbageCollectionFinish = &cbGarbageCollectionFinish; /* JVMTI_EVENT_OBJECT_FREE */ callbacks.ObjectFree = &cbObjectFree; setEventCallbacks(&callbacks); } static void getCapabilities(void) { jvmtiCapabilities needed_capabilities; jvmtiCapabilities potential_capabilities; /* Fill in ones that we must have */ (void)memset(&needed_capabilities,0,sizeof(needed_capabilities)); needed_capabilities.can_generate_garbage_collection_events = 1; needed_capabilities.can_tag_objects = 1; if (gdata->bci) { needed_capabilities.can_generate_all_class_hook_events = 1; } if (gdata->obj_watch) { needed_capabilities.can_generate_object_free_events = 1; } if (gdata->cpu_timing || gdata->cpu_sampling) { #if 0 /* Not needed until we call JVMTI for CpuTime */ needed_capabilities.can_get_thread_cpu_time = 1; needed_capabilities.can_get_current_thread_cpu_time = 1; #endif needed_capabilities.can_generate_exception_events = 1; } if (gdata->monitor_tracing) { #if 0 /* Not needed until we call JVMTI for CpuTime */ needed_capabilities.can_get_thread_cpu_time = 1; needed_capabilities.can_get_current_thread_cpu_time = 1; #endif needed_capabilities.can_get_owned_monitor_info = 1; needed_capabilities.can_get_current_contended_monitor = 1; needed_capabilities.can_get_monitor_info = 1; needed_capabilities.can_generate_monitor_events = 1; } /* Get potential capabilities */ getPotentialCapabilities(&potential_capabilities); /* Some capabilities would be nicer to have */ needed_capabilities.can_get_source_file_name = potential_capabilities.can_get_source_file_name; needed_capabilities.can_get_line_numbers = potential_capabilities.can_get_line_numbers; /* Add the capabilities */ addCapabilities(&needed_capabilities); } /* Dynamic library loading */ static void * load_library(char *name) { char lname[FILENAME_MAX+1]; char err_buf[256+FILENAME_MAX+1]; char *boot_path; void *handle; handle = NULL; /* The library may be located in different ways, try both, but * if it comes from outside the SDK/jre it isn't ours. */ getSystemProperty("sun.boot.library.path", &boot_path); md_build_library_name(lname, FILENAME_MAX, boot_path, name); jvmtiDeallocate(boot_path); handle = md_load_library(lname, err_buf, (int)sizeof(err_buf)); if ( handle == NULL ) { /* This may be necessary on Windows. */ md_build_library_name(lname, FILENAME_MAX, "", name); handle = md_load_library(lname, err_buf, (int)sizeof(err_buf)); if ( handle == NULL ) { HPROF_ERROR(JNI_TRUE, err_buf); } } return handle; } /* Lookup dynamic function pointer in shared library */ static void * lookup_library_symbol(void *library, char **symbols, int nsymbols) { void *addr; int i; addr = NULL; for( i = 0 ; i < nsymbols; i++ ) { addr = md_find_library_entry(library, symbols[i]); if ( addr != NULL ) { break; } } if ( addr == NULL ) { char errmsg[256]; (void)md_snprintf(errmsg, sizeof(errmsg), "Cannot find library symbol '%s'", symbols[0]); HPROF_ERROR(JNI_TRUE, errmsg); } return addr; } /* ------------------------------------------------------------------- */ /* The OnLoad interface */ JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM *vm, char *options, void *reserved) { char *boot_path = NULL; char npt_lib[JVM_MAXPATHLEN]; /* See if it's already loaded */ if ( gdata!=NULL && gdata->isLoaded==JNI_TRUE ) { HPROF_ERROR(JNI_TRUE, "Cannot load this JVM TI agent twice, check your java command line for duplicate hprof options."); return JNI_ERR; } gdata = get_gdata(); gdata->isLoaded = JNI_TRUE; error_setup(); LOG2("Agent_OnLoad", "gdata setup"); gdata->jvm = vm; /* Get the JVMTI environment */ getJvmti(); #ifndef SKIP_NPT getSystemProperty("sun.boot.library.path", &boot_path); /* Load in NPT library for character conversions */ md_build_library_name(npt_lib, sizeof(npt_lib), boot_path, NPT_LIBNAME); jvmtiDeallocate(boot_path); NPT_INITIALIZE(npt_lib, &(gdata->npt), NPT_VERSION, NULL); if ( gdata->npt == NULL ) { HPROF_ERROR(JNI_TRUE, "Cannot load npt library"); } gdata->npt->utf = (gdata->npt->utfInitialize)(NULL); if ( gdata->npt->utf == NULL ) { HPROF_ERROR(JNI_TRUE, "Cannot initialize npt utf functions"); } #endif /* Lock needed to protect debug_malloc() code, which is not MT safe */ #ifdef DEBUG gdata->debug_malloc_lock = createRawMonitor("HPROF debug_malloc lock"); #endif parse_options(options); LOG2("Agent_OnLoad", "Has jvmtiEnv and options parsed"); /* Initialize machine dependent code (micro state accounting) */ md_init(); string_init(); /* Table index values look like: 0x10000000 */ class_init(); /* Table index values look like: 0x20000000 */ tls_init(); /* Table index values look like: 0x30000000 */ trace_init(); /* Table index values look like: 0x40000000 */ object_init(); /* Table index values look like: 0x50000000 */ site_init(); /* Table index values look like: 0x60000000 */ frame_init(); /* Table index values look like: 0x70000000 */ monitor_init(); /* Table index values look like: 0x80000000 */ loader_init(); /* Table index values look like: 0x90000000 */ LOG2("Agent_OnLoad", "Tables initialized"); if ( gdata->pause ) { error_do_pause(); } getCapabilities(); /* Set the JVMTI callback functions (do this only once)*/ set_callbacks(JNI_TRUE); /* Create basic locks */ gdata->dump_lock = createRawMonitor("HPROF dump lock"); gdata->data_access_lock = createRawMonitor("HPROF data access lock"); gdata->callbackLock = createRawMonitor("HPROF callback lock"); gdata->callbackBlock = createRawMonitor("HPROF callback block"); gdata->object_free_lock = createRawMonitor("HPROF object free lock"); gdata->gc_finish_lock = createRawMonitor("HPROF gc_finish lock"); /* Set Onload events mode. */ setup_event_mode(JNI_TRUE, JVMTI_ENABLE); LOG2("Agent_OnLoad", "JVMTI capabilities, callbacks and initial notifications setup"); /* Used in VM_DEATH to wait for callbacks to complete */ gdata->jvm_initializing = JNI_FALSE; gdata->jvm_initialized = JNI_FALSE; gdata->vm_death_callback_active = JNI_FALSE; gdata->active_callbacks = 0; /* Write the header information */ io_setup(); /* We sample the start time now so that the time increments can be * placed in the various heap dump segments in micro seconds. */ gdata->micro_sec_ticks = md_get_microsecs(); /* Load java_crw_demo library and find function "java_crw_demo" */ if ( gdata->bci ) { /* Load the library or get the handle to it */ gdata->java_crw_demo_library = load_library("java_crw_demo"); { /* "java_crw_demo" */ static char *symbols[] = JAVA_CRW_DEMO_SYMBOLS; gdata->java_crw_demo_function = lookup_library_symbol(gdata->java_crw_demo_library, symbols, (int)(sizeof(symbols)/sizeof(char*))); } { /* "java_crw_demo_classname" */ static char *symbols[] = JAVA_CRW_DEMO_CLASSNAME_SYMBOLS; gdata->java_crw_demo_classname_function = lookup_library_symbol(gdata->java_crw_demo_library, symbols, (int)(sizeof(symbols)/sizeof(char*))); } } return JNI_OK; } JNIEXPORT void JNICALL Agent_OnUnload(JavaVM *vm) { Stack *stack; LOG("Agent_OnUnload"); gdata->isLoaded = JNI_FALSE; stack = gdata->object_free_stack; gdata->object_free_stack = NULL; if ( stack != NULL ) { stack_term(stack); } io_cleanup(); loader_cleanup(); tls_cleanup(); monitor_cleanup(); trace_cleanup(); site_cleanup(); object_cleanup(); frame_cleanup(); class_cleanup(); string_cleanup(); /* Deallocate any memory in gdata */ if ( gdata->net_hostname != NULL ) { HPROF_FREE(gdata->net_hostname); } if ( gdata->utf8_output_filename != NULL ) { HPROF_FREE(gdata->utf8_output_filename); } if ( gdata->output_filename != NULL ) { HPROF_FREE(gdata->output_filename); } if ( gdata->heapfilename != NULL ) { HPROF_FREE(gdata->heapfilename); } if ( gdata->checkfilename != NULL ) { HPROF_FREE(gdata->checkfilename); } if ( gdata->options != NULL ) { HPROF_FREE(gdata->options); } /* Verify all allocated memory has been taken care of. */ malloc_police(); /* Cleanup is hard to do when other threads might still be running * so we skip destroying some raw monitors which still might be in use * and we skip disposal of the jvmtiEnv* which might still be needed. * Only raw monitors that could be held by other threads are left * alone. So we explicitly do NOT do this: * destroyRawMonitor(gdata->callbackLock); * destroyRawMonitor(gdata->callbackBlock); * destroyRawMonitor(gdata->gc_finish_lock); * destroyRawMonitor(gdata->object_free_lock); * destroyRawMonitor(gdata->listener_loop_lock); * destroyRawMonitor(gdata->cpu_loop_lock); * disposeEnvironment(); * gdata->jvmti = NULL; */ /* Destroy basic locks */ destroyRawMonitor(gdata->dump_lock); gdata->dump_lock = NULL; destroyRawMonitor(gdata->data_access_lock); gdata->data_access_lock = NULL; if ( gdata->cpu_sample_lock != NULL ) { destroyRawMonitor(gdata->cpu_sample_lock); gdata->cpu_sample_lock = NULL; } #ifdef DEBUG destroyRawMonitor(gdata->debug_malloc_lock); gdata->debug_malloc_lock = NULL; #endif /* Unload java_crw_demo library */ if ( gdata->bci && gdata->java_crw_demo_library != NULL ) { md_unload_library(gdata->java_crw_demo_library); gdata->java_crw_demo_library = NULL; } /* You would think you could clear out gdata and set it to NULL, but * turns out that isn't a good idea. Some of the threads could be * blocked inside the CALLBACK*() macros, where they got blocked up * waiting for the VM_DEATH callback to complete. They only have * some raw monitor actions to do, but they need access to gdata to do it. * So do not do this: * (void)memset(gdata, 0, sizeof(GlobalData)); * gdata = NULL; */ }
gpl-2.0
d9magai/freeciv
utility/netfile.c
3
8534
/*********************************************************************** Freeciv - Copyright (C) 1996 - A Kjeldberg, L Gregersen, P Unold This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. ***********************************************************************/ #ifdef HAVE_CONFIG_H #include <fc_config.h> #endif #include <curl/curl.h> #ifdef FREECIV_MSWINDOWS #include <windows.h> #endif /* utility */ #include "fcintl.h" #include "ioz.h" #include "mem.h" #include "rand.h" #include "registry.h" #include "netfile.h" struct netfile_post { struct curl_httppost *first; struct curl_httppost *last; }; typedef size_t (*netfile_write_cb)(char *ptr, size_t size, size_t nmemb, void *userdata); static char error_buf_curl[CURL_ERROR_SIZE]; /********************************************************************** Set handle to usable state. ***********************************************************************/ static CURL *netfile_init_handle(void) { /* Consecutive transfers can use same handle for better performance */ static CURL *handle = NULL; if (handle == NULL) { handle = curl_easy_init(); } else { curl_easy_reset(handle); } error_buf_curl[0] = '\0'; curl_easy_setopt(handle, CURLOPT_ERRORBUFFER, error_buf_curl); return handle; } /********************************************************************** curl write callback to store received file to memory. ***********************************************************************/ static size_t netfile_memwrite_cb(char *ptr, size_t size, size_t nmemb, void *userdata) { struct netfile_write_cb_data *data = (struct netfile_write_cb_data *)userdata; if (size > 0) { data->mem = fc_realloc(data->mem, data->size + size * nmemb); memcpy(data->mem + data->size, ptr, size * nmemb); data->size += size * nmemb; } return size * nmemb; } /********************************************************************** Fetch file from given URL to given file stream. This is core function of netfile module. ***********************************************************************/ static bool netfile_download_file_core(const char *URL, FILE *fp, struct netfile_write_cb_data *mem_data, nf_errmsg cb, void *data) { CURLcode curlret; struct curl_slist *headers = NULL; static CURL *handle; bool ret = TRUE; handle = netfile_init_handle(); headers = curl_slist_append(headers,"User-Agent: Freeciv/" VERSION_STRING); curl_easy_setopt(handle, CURLOPT_URL, URL); if (mem_data != NULL) { mem_data->mem = NULL; mem_data->size = 0; curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, netfile_memwrite_cb); curl_easy_setopt(handle, CURLOPT_WRITEDATA, mem_data); } else { curl_easy_setopt(handle, CURLOPT_WRITEDATA, fp); } curl_easy_setopt(handle, CURLOPT_HTTPHEADER, headers); curl_easy_setopt(handle, CURLOPT_FAILONERROR, 1); curlret = curl_easy_perform(handle); curl_slist_free_all(headers); if (curlret != CURLE_OK) { if (cb != NULL) { char buf[2048 + CURL_ERROR_SIZE]; fc_snprintf(buf, sizeof(buf), /* TRANS: first %s is URL, second is Curl error message * (not in Freeciv translation domain) */ _("Failed to fetch %s: %s"), URL, error_buf_curl[0] != '\0' ? error_buf_curl : curl_easy_strerror(curlret)); cb(buf, data); } ret = FALSE; } return ret; } /********************************************************************** Fetch section file from net ***********************************************************************/ struct section_file *netfile_get_section_file(const char *URL, nf_errmsg cb, void *data) { bool success; struct section_file *out = NULL; struct netfile_write_cb_data mem_data; fz_FILE *file; success = netfile_download_file_core(URL, NULL, &mem_data, cb, data); if (success) { file = fz_from_memory(mem_data.mem, mem_data.size, TRUE); out = secfile_from_stream(file, TRUE); } return out; } /********************************************************************** Fetch file from given URL and save as given filename. ***********************************************************************/ bool netfile_download_file(const char *URL, const char *filename, nf_errmsg cb, void *data) { bool success; FILE *fp; fp = fc_fopen(filename, "w+b"); if (fp == NULL) { if (cb != NULL) { char buf[2048]; fc_snprintf(buf, sizeof(buf), _("Could not open %s for writing"), filename); cb(buf, data); } return FALSE; } success = netfile_download_file_core(URL, fp, NULL, cb, data); fclose(fp); return success; } /********************************************************************** Allocate netfile_post ***********************************************************************/ struct netfile_post *netfile_start_post(void) { return fc_calloc(1, sizeof(struct netfile_post)); } /********************************************************************** Add one entry to netfile post form ***********************************************************************/ void netfile_add_form_str(struct netfile_post *post, const char *name, const char *val) { curl_formadd(&post->first, &post->last, CURLFORM_COPYNAME, name, CURLFORM_COPYCONTENTS, val, CURLFORM_END); } /********************************************************************** Add one integer entry to netfile post form ***********************************************************************/ void netfile_add_form_int(struct netfile_post *post, const char *name, const int val) { char buf[50]; fc_snprintf(buf, sizeof(buf), "%d", val); netfile_add_form_str(post, name, buf); } /********************************************************************** Free netfile_post resources ***********************************************************************/ void netfile_close_post(struct netfile_post *post) { curl_formfree(post->first); FC_FREE(post); } /********************************************************************** Dummy write callback used only to make sure curl's default write function does not get used as we don't want reply to stdout ***********************************************************************/ static size_t dummy_write(void *buffer, size_t size, size_t nmemb, void *userp) { return size * nmemb; } /********************************************************************** Send HTTP POST ***********************************************************************/ bool netfile_send_post(const char *URL, struct netfile_post *post, FILE *reply_fp, struct netfile_write_cb_data *mem_data, const char *addr) { CURLcode curlret; long http_resp; struct curl_slist *headers = NULL; static CURL *handle; handle = netfile_init_handle(); headers = curl_slist_append(headers,"User-Agent: Freeciv/" VERSION_STRING); curl_easy_setopt(handle, CURLOPT_URL, URL); curl_easy_setopt(handle, CURLOPT_HTTPPOST, post->first); if (mem_data != NULL) { mem_data->mem = NULL; mem_data->size = 0; curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, netfile_memwrite_cb); curl_easy_setopt(handle, CURLOPT_WRITEDATA, mem_data); } else if (reply_fp == NULL) { curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, dummy_write); } else { curl_easy_setopt(handle, CURLOPT_WRITEDATA, reply_fp); } if (addr != NULL) { curl_easy_setopt(handle, CURLOPT_INTERFACE, addr); } curl_easy_setopt(handle, CURLOPT_HTTPHEADER, headers); curlret = curl_easy_perform(handle); curl_slist_free_all(headers); if (curlret != CURLE_OK) { return FALSE; } curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE, &http_resp); if (http_resp != 200) { return FALSE; } return TRUE; }
gpl-2.0
zhongweiy/rtems
cpukit/libfs/src/dosfs/msdos_statvfs.c
3
1799
/** * @file msdos_statvfs.c * * @brief Obtain MS-DOS filesystem information * @ingroup libfs_msdos MSDOS FileSystem */ /* * Copyright (c) 2013 Andrey Mozzhuhin * Copyright (c) 2013 Vitaly Belov * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at * http://www.rtems.com/license/LICENSE. */ #include "fat.h" #include "fat_fat_operations.h" #include "msdos.h" int msdos_statvfs(const rtems_filesystem_location_info_t *root_loc, struct statvfs *sb) { msdos_fs_info_t *fs_info = root_loc->mt_entry->fs_info; fat_vol_t *vol = &fs_info->fat.vol; rtems_status_code sc = RTEMS_SUCCESSFUL; sc = rtems_semaphore_obtain(fs_info->vol_sema, RTEMS_WAIT, MSDOS_VOLUME_SEMAPHORE_TIMEOUT); if (sc != RTEMS_SUCCESSFUL) rtems_set_errno_and_return_minus_one(EIO); sb->f_bsize = FAT_SECTOR512_SIZE; sb->f_frsize = vol->bpc; sb->f_blocks = vol->data_cls; sb->f_bfree = 0; sb->f_bavail = 0; sb->f_files = 0; // FAT doesn't store inodes sb->f_ffree = 0; sb->f_favail = 0; sb->f_flag = 0; sb->f_namemax = MSDOS_NAME_MAX_LNF_LEN; if (vol->free_cls == FAT_UNDEFINED_VALUE) { int rc; uint32_t cur_cl = 2; uint32_t value = 0; uint32_t data_cls_val = vol->data_cls + 2; for (; cur_cl < data_cls_val; ++cur_cl) { rc = fat_get_fat_cluster(&fs_info->fat, cur_cl, &value); if (rc != RC_OK) { rtems_semaphore_release(fs_info->vol_sema); return rc; } if (value == FAT_GENFAT_FREE) { sb->f_bfree++; sb->f_bavail++; } } } else { sb->f_bfree = vol->free_cls; sb->f_bavail = vol->free_cls; } rtems_semaphore_release(fs_info->vol_sema); return RC_OK; }
gpl-2.0
MoKee/android_kernel_lge_msm8992
drivers/input/touchscreen/touch_synaptics.c
3
213196
/* Touch_synaptics.c * * Copyright (C) 2013 LGE. * * Author: yehan.ahn@lge.com, hyesung.shin@lge.com * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/err.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/regulator/machine.h> #include <linux/async.h> #include <linux/atomic.h> #include <linux/gpio.h> #include <linux/file.h> /*for file access*/ #include <linux/syscalls.h> /*for file access*/ #include <linux/uaccess.h> /*for file access*/ #include <linux/workqueue.h> #include <linux/wakelock.h> #include <linux/interrupt.h> #include <linux/input/lge_touch_core.h> #include <linux/input/touch_synaptics.h> #include <linux/firmware.h> #include "./DS5/RefCode_F54.h" #include <soc/qcom/lge/board_lge.h> /* RMI4 spec from 511-000405-01 Rev.D * Function Purpose See page * $01 RMI Device Control 45 * $1A 0-D capacitive button sensors 61 * $05 Image Reporting 68 * $07 Image Reporting 75 * $08 BIST 82 * $09 BIST 87 * $11 2-D TouchPad sensors 93 * $19 0-D capacitive button sensors 141 * $30 GPIO/LEDs 148 * $31 LEDs 162 * $34 Flash Memory Management 163 * $36 Auxiliary ADC 174 * $54 Test Reporting 176 */ struct i2c_client *ds4_i2c_client; static char power_state; static char *productcode_parse(unsigned char *product); static int get_ic_info(struct synaptics_ts_data *ts); static int read_page_description_table(struct i2c_client *client); static int get_type_bootloader(struct i2c_client *client); static void set_param_incoming_call(struct i2c_client *client, int call_state); static void synaptics_change_sleepmode(struct i2c_client *client); static void synaptics_toggle_swipe(struct i2c_client *client); static int synaptics_ts_im_test(struct i2c_client *client); static int synaptics_ts_adc_test(struct i2c_client *client); static int synaptics_ts_lpwg_adc_test(struct i2c_client *client); int f54_window_crack_check_mode = 0; int f54_window_crack; int after_crack_check = 0; static unsigned long im_sum; static unsigned long cns_sum; static unsigned long cid_im_sum; static unsigned long freq_scan_im_sum; static u16 im_aver; static u16 cns_aver; static u16 cid_im_aver; static u16 freq_scan_im_aver; static unsigned int cnt; u8 int_mask_cust; int is_sensing; bool lpwg_by_lcd_notifier; bool ghost_do_not_reset; int sp_link_touch; int incoming_call_state = 0; /*static int ts_suspend = 0; int thermal_status = 0; extern int touch_thermal_mode;*/ /* Register Map & Register bit mask * - Please check "One time" this map before using this device driver */ /* RMI_DEVICE_CONTROL */ /* Manufacturer ID */ #define MANUFACTURER_ID_REG (ts->f01.dsc.query_base) /* CUSTOMER_FAMILY QUERY */ #define CUSTOMER_FAMILY_REG (ts->f01.dsc.query_base + 2) /* FW revision */ #define FW_REVISION_REG (ts->f01.dsc.query_base + 3) /* Product ID */ #define PRODUCT_ID_REG (ts->f01.dsc.query_base + 11) #define DEVICE_COMMAND_REG (ts->f01.dsc.command_base) /* Device Control */ #define DEVICE_CONTROL_REG (ts->f01.dsc.control_base) /* sleep mode : go to doze mode after 500 ms */ #define DEVICE_CONTROL_NORMAL_OP 0x00 /* sleep mode : go to sleep */ #define DEVICE_CONTROL_SLEEP 0x01 /* sleep mode : go to sleep. no-recalibration */ #define DEVICE_CONTROL_SLEEP_NO_RECAL 0x02 #define DEVICE_CONTROL_NOSLEEP 0x04 #define DEVICE_CHARGER_CONNECTED 0x20 #define DEVICE_CONTROL_CONFIGURED 0x80 /* Device Command */ #define DEVICE_COMMAND_RESET 0x01 /* Interrupt Enable 0 */ #define INTERRUPT_ENABLE_REG (ts->f01.dsc.control_base + 1) /* Doze Interval : unit 10ms */ #define DOZE_INTERVAL_REG (ts->f01.dsc.control_base + 2) #define DOZE_WAKEUP_THRESHOLD_REG (ts->f01.dsc.control_base + 3) /* Device Status */ #define DEVICE_STATUS_REG (ts->f01.dsc.data_base) #define DEVICE_FAILURE_MASK 0x03 #define DEVICE_CRC_ERROR_MASK 0x04 #define DEVICE_STATUS_FLASH_PROG 0x40 #define DEVICE_STATUS_UNCONFIGURED 0x80 /* Interrupt Status */ #define INTERRUPT_STATUS_REG (ts->f01.dsc.data_base + 1) #define INTERRUPT_MASK_FLASH 0x01 #define INTERRUPT_MASK_STATUS 0x02 #define INTERRUPT_MASK_ABS0 0x04 #define INTERRUPT_MASK_BUTTON 0x10 #define INTERRUPT_MASK_CUSTOM 0x40 /* TOUCHPAD_SENSORS */ #define FINGER_COMMAND_REG (ts->f12.dsc.command_base) #define MOTION_SUPPRESSION (ts->f12.dsc.control_base + 5) /* ts->f12_reg.ctrl[20] */ #define GLOVED_FINGER_MASK 0x20 /* Finger State */ #define OBJECT_TYPE_AND_STATUS_REG (ts->f12.dsc.data_base) #define OBJECT_ATTENTION_REG (ts->f12.dsc.data_base + 2) /* Finger Data Register */ #define FINGER_DATA_REG_START (ts->f12.dsc.data_base) #define REG_OBJECT_TYPE_AND_STATUS 0 #define REG_X_LSB 1 #define REG_X_MSB 2 #define REG_Y_LSB 3 #define REG_Y_MSB 4 #define REG_Z 5 #define REG_WX 6 #define REG_WY 7 #define MAXIMUM_XY_COORDINATE_REG (ts->f12.dsc.control_base) /* ANALOG_CONTROL */ #define ANALOG_COMMAND_REG (ts->f54.dsc.command_base) #define ANALOG_CONTROL_REG (ts->f54.dsc.control_base) #define SATURATION_CAP_LSB_REG (ts->f54.dsc.control_base + 1) #define SATURATION_CAP_MSB_REG (ts->f54.dsc.control_base + 2) #define THERMAL_UPDATE_INTERVAL_REG 0x2F /* 1-page */ /* FLASH_MEMORY_MANAGEMENT */ /* Flash Control */ #define FLASH_CONFIG_ID_REG (ts->f34.dsc.control_base) #define FLASH_CONTROL_REG (ts->f34.dsc.data_base + 2) #define FLASH_STATUS_REG (ts->f34.dsc.data_base + 3) #define FLASH_STATUS_MASK 0xFF /* Page number */ #define COMMON_PAGE (ts->f01.page) #define FINGER_PAGE (ts->f12.page) #define ANALOG_PAGE (ts->f54.page) #define FLASH_PAGE (ts->f34.page) #define DEFAULT_PAGE 0x00 #define LPWG_PAGE (ts->f51.page) /* Others */ #define LPWG_STATUS_REG (ts->f51.dsc.data_base) #define LPWG_DATA_REG (ts->f51.dsc.data_base + 1) #define LPWG_TAPCOUNT_REG (ts->f51.dsc.control_base) #define LPWG_MIN_INTERTAP_REG (ts->f51.dsc.control_base + 1) #define LPWG_MAX_INTERTAP_REG (ts->f51.dsc.control_base + 2) #define LPWG_TOUCH_SLOP_REG (ts->f51.dsc.control_base + 3) #define LPWG_TAP_DISTANCE_REG (ts->f51.dsc.control_base + 4) #define LPWG_INTERRUPT_DELAY_REG (ts->f51.dsc.control_base + 6) #define LPWG_BLKSIZ 7 /* 4-page */ #define LPWG_TAPCOUNT_REG2 (LPWG_TAPCOUNT_REG + LPWG_BLKSIZ) #define LPWG_MIN_INTERTAP_REG2 (LPWG_MIN_INTERTAP_REG + LPWG_BLKSIZ) #define LPWG_MAX_INTERTAP_REG2 (LPWG_MAX_INTERTAP_REG + LPWG_BLKSIZ) #define LPWG_TOUCH_SLOP_REG2 (LPWG_TOUCH_SLOP_REG + LPWG_BLKSIZ) #define LPWG_TAP_DISTANCE_REG2 (LPWG_TAP_DISTANCE_REG + LPWG_BLKSIZ) #define LPWG_INTERRUPT_DELAY_REG2 (LPWG_INTERRUPT_DELAY_REG + LPWG_BLKSIZ) #define LPWG_PARTIAL_REG (LPWG_INTERRUPT_DELAY_REG2 + 35) #define MISC_HOST_CONTROL_REG \ (ts->f51.dsc.control_base + 7 + LPWG_BLKSIZ) /* finger_amplitude(0x80) = 0.5 */ #define THERMAL_HIGH_FINGER_AMPLITUDE 0x60 #define LPWG_HAS_DEBUG_MODULE (ts->f51.dsc.query_base + 4) #define LPWG_MAX_BUFFER 10 /* LPWG Control Value */ #define REPORT_MODE_CTRL 1 #define TCI_ENABLE_CTRL 2 #define TAP_COUNT_CTRL 3 #define MIN_INTERTAP_CTRL 4 #define MAX_INTERTAP_CTRL 5 #define TOUCH_SLOP_CTRL 6 #define TAP_DISTANCE_CTRL 7 #define INTERRUPT_DELAY_CTRL 8 #define PARTIAL_LPWG_ON 9 #define TCI_ENABLE_CTRL2 22 #define TAP_COUNT_CTRL2 23 #define MIN_INTERTAP_CTRL2 24 #define MAX_INTERTAP_CTRL2 25 #define TOUCH_SLOP_CTRL2 26 #define TAP_DISTANCE_CTRL2 27 #define INTERRUPT_DELAY_CTRL2 28 /* Palm / Hover */ #define PALM_TYPE 3 #define HOVER_TYPE 5 #define MAX_PRESSURE 255 #define I2C_DELAY 50 #define UEVENT_DELAY 200 #define REBASE_DELAY 100 #define CAP_DIFF_MAX 500 #define CAP_MIN_MAX_DIFF 1000 #define KNOCKON_DELAY 68 /* 700ms */ #define KNOCKCODE_DELAY 20 /* 200ms */ /* F/W calibration */ #define CALIBRATION_STATUS_REG (ts->f54.dsc.data_base + 14) #define CALIBRATION_FLAGS_REG (ts->f54.dsc.control_base + 35) #define F54_FIFO_INDEX_LSB (ts->f54.dsc.data_base + 1) #define F54_FIFO_INDEX_MSB (ts->f54.dsc.data_base + 2) #define F54_REPORT_DATA (ts->f54.dsc.data_base + 3) #define MAX_CAL_DATA_SIZE (32*18*2) #define MAX_ND_CAL_DATA_SIZE (32*2*2) #define MAX_DETAIL_SIZE (32*18) #define MAX_COARSE_SIZE (32*18) #define MAX_FINE_SIZE (32*18) #define MAX_ND_DETAIL_SIZE (32*2) #define MAX_ND_COARSE_SIZE (32*2) #define MAX_ND_FINE_SIZE (32*2) #define MAX_CAL_LOG_SIZE (MAX_CAL_DATA_SIZE*20) /* Get user-finger-data from register. */ #define TS_POSITION(_msb_reg, _lsb_reg) \ (((u16)((_msb_reg << 8) & 0xFF00) | (u16)((_lsb_reg) & 0xFF))) #define TS_SNTS_GET_ORIENTATION(_width_y, _width_x) \ (((_width_y - _width_x) > 0) ? 0 : 1) #define TS_SNTS_GET_PRESSURE(_pressure) \ _pressure #define jitter_abs(x) (x > 0 ? x : -x) #define GET_OBJECT_REPORT_INFO(_reg, _type) \ (((_reg) & ((u8)(1 << (_type)))) >> (_type)) #define GET_HIGH_U8_FROM_U16(_u16_data) \ ((u8)(((_u16_data) & 0xFF00) >> 8)) #define GET_LOW_U8_FROM_U16(_u16_data) \ ((u8)((_u16_data) & 0x00FF)) #define GET_U16_FROM_U8(_u8_hi_data, _u8_lo_data) \ ((u16)(((_u8_hi_data) << 8) | (_u8_lo_data))) static int ref_chk_enable; static int raw_cap_file_exist; static bool touch_wake_test; unsigned int touch_wake_count; #define TOUCH_WAKE_COUNTER_LOG_PATH "/mnt/sdcard/wake_cnt.txt" static enum error_type synaptics_ts_ic_ctrl(struct i2c_client *client, u8 code, u32 value, u32 *ret); static int set_doze_param(struct synaptics_ts_data *ts, int value); static bool need_scan_pdt = true; bool is_product(struct synaptics_ts_data *ts, const char *product_id, size_t len) { return strncmp(ts->fw_info.product_id, product_id, len) ? false : true; } bool is_img_product(struct synaptics_ts_data *ts, const char *product_id, size_t len) { return strncmp(ts->fw_info.img_product_id, product_id, len) ? false : true; } void write_firmware_version_log(struct synaptics_ts_data *ts) { #define LOGSIZ 448 char *version_string = NULL; int ret = 0; int rc = 0; version_string = kzalloc(LOGSIZ * sizeof(char), GFP_KERNEL); if (mfts_mode) { mutex_lock(&ts->pdata->thread_lock); read_page_description_table(ts->client); rc = get_ic_info(ts); mutex_unlock(&ts->pdata->thread_lock); if (rc < 0) { ret += snprintf(version_string + ret, LOGSIZ - ret, "-1\n"); ret += snprintf(version_string + ret, LOGSIZ - ret, "Read Fail Touch IC Info\n"); return; } } ret += snprintf(version_string + ret, LOGSIZ - ret, "===== Firmware Info =====\n"); if (ts->fw_info.version[0] > 0x50) { ret += snprintf(version_string + ret, LOGSIZ - ret, "ic_version[%s]\n", ts->fw_info.version); } else { ret += snprintf(version_string + ret, LOGSIZ - ret, "version : v%d.%02d\n", ((ts->fw_info.version[3] & 0x80) >> 7), (ts->fw_info.version[3] & 0x7F)); } ret += snprintf(version_string + ret, LOGSIZ - ret, "IC_product_id[%s]\n", ts->fw_info.product_id); if (is_product(ts, "PLG349", 6)) { ret += snprintf(version_string + ret, LOGSIZ - ret, "Touch IC : s3528\n"); } else if (is_product(ts, "s3320", 5) || is_product(ts, "PLG446", 6) || is_product(ts, "PLG468", 6)) { ret += snprintf(version_string + ret, LOGSIZ - ret, "Touch IC : s3320\n"); } else { ret += snprintf(version_string + ret, LOGSIZ - ret, "Touch Product ID read error\n"); } ret += snprintf(version_string + ret, LOGSIZ - ret, "=========================\n\n"); write_log(NULL, version_string); msleep(30); kfree(version_string); return; } /* wrapper function for i2c communication - except defalut page * if you have to select page for reading or writing, * then using this wrapper function */ int synaptics_ts_set_page(struct i2c_client *client, u8 page) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); DO_SAFE(touch_i2c_write_byte(ts->client, PAGE_SELECT_REG, page), error); ts->curr_page = page; return 0; error: TOUCH_E("%s, %d : read page failed\n", __func__, __LINE__); return -EPERM; } int synaptics_ts_page_data_read(struct i2c_client *client, u8 page, u8 reg, int size, u8 *data) { DO_SAFE(synaptics_ts_set_page(client, page), error); DO_SAFE(touch_i2c_read(client, reg, size, data), error); DO_SAFE(synaptics_ts_set_page(client, DEFAULT_PAGE), error); return 0; error: TOUCH_E("%s, %d : read page failed\n", __func__, __LINE__); return -EPERM; } int synaptics_ts_page_data_write(struct i2c_client *client, u8 page, u8 reg, int size, u8 *data) { DO_SAFE(synaptics_ts_set_page(client, page), error); DO_SAFE(touch_i2c_write(client, reg, size, data), error); DO_SAFE(synaptics_ts_set_page(client, DEFAULT_PAGE), error); return 0; error: TOUCH_E("%s, %d : write page failed\n", __func__, __LINE__); return -EPERM; } int synaptics_ts_page_data_write_byte(struct i2c_client *client, u8 page, u8 reg, u8 data) { DO_SAFE(synaptics_ts_set_page(client, page), error); DO_SAFE(touch_i2c_write_byte(client, reg, data), error); DO_SAFE(synaptics_ts_set_page(client, DEFAULT_PAGE), error); return 0; error: TOUCH_E("%s, %d : write page byte failed\n", __func__, __LINE__); return -EPERM; } const char *f_str[] = { "ERROR", "DISTANCE_INTER_TAP", "DISTANCE_TOUCHSLOP", "TIMEOUT_INTER_TAP", "MULTI_FINGER", "DELAY_TIME" }; static int print_tci_debug_result(struct synaptics_ts_data *ts, int num) { u8 count = 0; u8 index = 0; u8 buf = 0; u8 i = 0; u8 addr = 0; u8 offset = num ? LPWG_MAX_BUFFER + 2 : 0; DO_SAFE(synaptics_ts_page_data_read(ts->client, LPWG_PAGE, ts->f51_reg.lpwg_fail_count_reg + offset, 1, &count), error); DO_SAFE(synaptics_ts_page_data_read(ts->client, LPWG_PAGE, ts->f51_reg.lpwg_fail_index_reg + offset, 1, &index), error); for (i = 1; i <= count ; i++) { addr = ts->f51_reg.lpwg_fail_reason_reg + offset + ((index + LPWG_MAX_BUFFER - i) % LPWG_MAX_BUFFER); DO_SAFE(synaptics_ts_page_data_read(ts->client, LPWG_PAGE, addr, 1, &buf), error); TOUCH_D(DEBUG_BASE_INFO, "TCI(%d)-Fail[%d/%d]: %s\n", num, count - i + 1, count, (buf > 0 && buf < 6) ? f_str[buf] : f_str[0]); if (i == LPWG_MAX_BUFFER) break; } return 0; error: return -EPERM; } #define SWIPE_F_STR_SIZE 8 static const char *swipe_f_str[SWIPE_F_STR_SIZE] = { "SUCCESS", "FINGER_RELEASED", "MULTIPLE_FINGERS", "TOO_FAST", "TOO_SLOW", "OUT_OF_AREA", "RATIO_EXECESS", "UNKNOWN" }; static int print_swipe_fail_reason(struct synaptics_ts_data *ts) { struct swipe_data *swp = &ts->swipe; u8 buf = 0; u8 direction = 0; u8 fail_reason = 0; if (mfts_mode && !ts->pdata->role->mfts_lpwg) { TOUCH_E("do not print swipe fail reason - mfts\n"); return -EPERM; } else { TOUCH_E("%s, %d : swipe fail reason\n", __func__, __LINE__); } if (swp->support_swipe == NO_SUPPORT_SWIPE) { TOUCH_E("support_swipe:0x%02X\n", swp->support_swipe); return -EPERM; } synaptics_ts_page_data_read(ts->client, LPWG_PAGE, swp->fail_reason_reg, 1, &buf); if (swp->support_swipe & SUPPORT_SWIPE_DOWN) { direction = SWIPE_DIRECTION_DOWN; fail_reason = buf; } if (swp->support_swipe & SUPPORT_SWIPE_UP) { direction = buf & 0x03; fail_reason = (buf & 0xfc) >> 2; } if (fail_reason >= SWIPE_F_STR_SIZE) fail_reason = SWIPE_F_STR_SIZE - 1; TOUCH_I("last swipe_%s fail reason:%d(%s)\n", direction ? "up" : "down", fail_reason, swipe_f_str[fail_reason]); return 0; } /** * Knock on * * Type Value * * 1 WakeUp_gesture_only=1 / Normal=0 * 2 TCI enable=1 / disable=0 * 3 Tap Count * 4 Min InterTap * 5 Max InterTap * 6 Touch Slop * 7 Tap Distance * 8 Interrupt Delay */ static int tci_control(struct synaptics_ts_data *ts, int type, u8 value) { struct i2c_client *client = ts->client; u8 buffer[3] = {0}; u8 data = 0; switch (type) { case REPORT_MODE_CTRL: DO_SAFE(touch_i2c_read(ts->client, INTERRUPT_ENABLE_REG, 1, &data), error); if (value) data &= ~INTERRUPT_MASK_ABS0; else data |= INTERRUPT_MASK_ABS0; DO_SAFE(touch_i2c_write_byte(ts->client, INTERRUPT_ENABLE_REG, data), error); if (value) { buffer[0] = ts->min_finger_amplitude; buffer[1] = ts->min_finger_amplitude; } else { buffer[0] = ts->default_finger_amplitude; buffer[1] = ts->default_small_finger_amplitude; } TOUCH_I( "finger_amplitude(finger:0x%02X, small_finger:0x%02X)\n", buffer[0], buffer[1]); DO_SAFE(touch_i2c_write(client, ts->f12_reg.ctrl[15], 2, buffer), error); DO_SAFE(touch_i2c_read(client, ts->f12_reg.ctrl[20], 3, buffer), error); buffer[2] = (buffer[2] & 0xfc) | (value ? 0x2 : 0x0); DO_SAFE(touch_i2c_write(client, ts->f12_reg.ctrl[20], 3, buffer), error); break; case TCI_ENABLE_CTRL: DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, ts->f51_reg.lpwg_tapcount_reg, 1, buffer), error); buffer[0] = (buffer[0] & 0xfe) | (value & 0x1); DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, ts->f51_reg.lpwg_tapcount_reg, 1, buffer), error); break; case TCI_ENABLE_CTRL2: DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, ts->f51_reg.lpwg_tapcount_reg2, 1, buffer), error); buffer[0] = (buffer[0] & 0xfe) | (value & 0x1); DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, ts->f51_reg.lpwg_tapcount_reg2, 1, buffer), error); break; case TAP_COUNT_CTRL: DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, ts->f51_reg.lpwg_tapcount_reg, 1, buffer), error); buffer[0] = ((value << 3) & 0xf8) | (buffer[0] & 0x7); DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, ts->f51_reg.lpwg_tapcount_reg, 1, buffer), error); break; case TAP_COUNT_CTRL2: DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, ts->f51_reg.lpwg_tapcount_reg2, 1, buffer), error); buffer[0] = ((value << 3) & 0xf8) | (buffer[0] & 0x7); DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, ts->f51_reg.lpwg_tapcount_reg2, 1, buffer), error); break; case MIN_INTERTAP_CTRL: DO_SAFE(synaptics_ts_page_data_write_byte(client, LPWG_PAGE, ts->f51_reg.lpwg_min_intertap_reg, value), error); break; case MIN_INTERTAP_CTRL2: DO_SAFE(synaptics_ts_page_data_write_byte(client, LPWG_PAGE, ts->f51_reg.lpwg_min_intertap_reg2, value), error); break; case MAX_INTERTAP_CTRL: DO_SAFE(synaptics_ts_page_data_write_byte(client, LPWG_PAGE, ts->f51_reg.lpwg_max_intertap_reg, value), error); break; case MAX_INTERTAP_CTRL2: DO_SAFE(synaptics_ts_page_data_write_byte(client, LPWG_PAGE, ts->f51_reg.lpwg_max_intertap_reg2, value), error); break; case TOUCH_SLOP_CTRL: DO_SAFE(synaptics_ts_page_data_write_byte(client, LPWG_PAGE, ts->f51_reg.lpwg_touch_slop_reg, value), error); break; case TOUCH_SLOP_CTRL2: DO_SAFE(synaptics_ts_page_data_write_byte(client, LPWG_PAGE, ts->f51_reg.lpwg_touch_slop_reg2, value), error); break; case TAP_DISTANCE_CTRL: DO_SAFE(synaptics_ts_page_data_write_byte(client, LPWG_PAGE, ts->f51_reg.lpwg_tap_distance_reg, value), error); break; case TAP_DISTANCE_CTRL2: DO_SAFE(synaptics_ts_page_data_write_byte(client, LPWG_PAGE, ts->f51_reg.lpwg_tap_distance_reg2, value), error); break; case INTERRUPT_DELAY_CTRL: DO_SAFE(synaptics_ts_page_data_write_byte(client, LPWG_PAGE, ts->f51_reg.lpwg_interrupt_delay_reg, value ? (buffer[0] = (KNOCKON_DELAY << 1) | 0x1) : (buffer[0] = 0)), error); break; case INTERRUPT_DELAY_CTRL2: if (ts->lpwg_ctrl.has_lpwg_overtap_module) { DO_SAFE(synaptics_ts_page_data_write_byte(client, LPWG_PAGE, ts->f51_reg.lpwg_interrupt_delay_reg2, buffer[0] = (KNOCKCODE_DELAY << 1) | 0x1), error); } else { DO_SAFE(synaptics_ts_page_data_write_byte(client, LPWG_PAGE, ts->f51_reg.lpwg_interrupt_delay_reg2, value), error); } break; case PARTIAL_LPWG_ON: if (is_product(ts, "PLG468", 6)) { DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, ts->f51_reg.lpwg_partial_reg, 1, buffer), error); TOUCH_I("%s: partial lpwg, prev:0x%02X, next:0x%02X (value:%d)\n", __func__, buffer[0], value ? (buffer[0] & 0xfc) | value : (buffer[0] & 0xfe) | value, value); if (value) buffer[0] = (buffer[0] & 0xfc) | value; else buffer[0] = (buffer[0] & 0xfe) | value; DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, ts->f51_reg.lpwg_partial_reg, 1, buffer), error); } else { DO_SAFE(synaptics_ts_page_data_write_byte(client, LPWG_PAGE, ts->f51_reg.lpwg_partial_reg, value), error); } break; default: break; } return 0; error: TOUCH_E("%s, %d : tci control failed\n", __func__, __LINE__); return -EPERM; } static int swipe_down_enable(struct synaptics_ts_data *ts) { struct i2c_client *client = ts->client; struct swipe_data *swp = &ts->swipe; struct swipe_ctrl_info *down = &swp->down; u8 buf[2] = {0}; TOUCH_I("%s: (swipe_mode:0x%02X)\n", __func__, swp->swipe_mode); if (swp->swipe_mode & SWIPE_DOWN_BIT) { DO_SAFE(synaptics_ts_set_page(client, LPWG_PAGE), error); DO_SAFE(touch_i2c_read(client, swp->enable_reg, 1, buf), error); DO_SAFE(touch_i2c_write_byte(client, swp->enable_reg, buf[0] | down->enable_mask), error); DO_SAFE(touch_i2c_write_byte(client, down->min_distance_reg, down->min_distance), error); DO_SAFE(touch_i2c_write_byte(client, down->ratio_thres_reg, down->ratio_thres), error); DO_SAFE(touch_i2c_write_byte(client, down->ratio_chk_period_reg, down->ratio_chk_period), error); DO_SAFE(touch_i2c_write_byte(client, down->ratio_chk_min_distance_reg, down->ratio_chk_min_distance), error); buf[0] = GET_LOW_U8_FROM_U16(down->min_time_thres); buf[1] = GET_HIGH_U8_FROM_U16(down->min_time_thres); DO_SAFE(touch_i2c_write(client, down->min_time_thres_reg, 2, buf), error); buf[0] = GET_LOW_U8_FROM_U16(down->max_time_thres); buf[1] = GET_HIGH_U8_FROM_U16(down->max_time_thres); DO_SAFE(touch_i2c_write(client, down->max_time_thres_reg, 2, buf), error); if (swp->support_swipe & SUPPORT_SWIPE_UP) { buf[0] = GET_LOW_U8_FROM_U16(down->active_area_x0); buf[1] = GET_HIGH_U8_FROM_U16(down->active_area_x0); DO_SAFE(touch_i2c_write(client, down->active_area_x0_reg, 2, buf), error); buf[0] = GET_LOW_U8_FROM_U16(down->active_area_y0); buf[1] = GET_HIGH_U8_FROM_U16(down->active_area_y0); DO_SAFE(touch_i2c_write(client, down->active_area_y0_reg, 2, buf), error); buf[0] = GET_LOW_U8_FROM_U16(down->active_area_x1); buf[1] = GET_HIGH_U8_FROM_U16(down->active_area_x1); DO_SAFE(touch_i2c_write(client, down->active_area_x1_reg, 2, buf), error); buf[0] = GET_LOW_U8_FROM_U16(down->active_area_y1); buf[1] = GET_HIGH_U8_FROM_U16(down->active_area_y1); DO_SAFE(touch_i2c_write(client, down->active_area_y1_reg, 2, buf), error); } DO_SAFE(synaptics_ts_set_page(client, DEFAULT_PAGE), error); } else { TOUCH_I("swipe_down is not used.\n"); } return 0; error: synaptics_ts_set_page(client, DEFAULT_PAGE); TOUCH_E("%s failed\n", __func__); return -EPERM; } static int swipe_up_enable(struct synaptics_ts_data *ts) { struct i2c_client *client = ts->client; struct swipe_data *swp = &ts->swipe; struct swipe_ctrl_info *up = &swp->up; u8 buf[2] = {0}; TOUCH_I("%s: (swipe_mode:0x%02X)\n", __func__, swp->swipe_mode); if (swp->swipe_mode & SWIPE_UP_BIT) { DO_SAFE(synaptics_ts_set_page(client, LPWG_PAGE), error); DO_SAFE(touch_i2c_read(client, swp->enable_reg, 1, buf), error); DO_SAFE(touch_i2c_write_byte(client, swp->enable_reg, buf[0] | up->enable_mask), error); DO_SAFE(touch_i2c_write_byte(client, up->min_distance_reg, up->min_distance), error); DO_SAFE(touch_i2c_write_byte(client, up->ratio_thres_reg, up->ratio_thres), error); DO_SAFE(touch_i2c_write_byte(client, up->ratio_chk_period_reg, up->ratio_chk_period), error); DO_SAFE(touch_i2c_write_byte(client, up->ratio_chk_min_distance_reg, up->ratio_chk_min_distance), error); buf[0] = GET_LOW_U8_FROM_U16(up->min_time_thres); buf[1] = GET_HIGH_U8_FROM_U16(up->min_time_thres); DO_SAFE(touch_i2c_write(client, up->min_time_thres_reg, 2, buf), error); buf[0] = GET_LOW_U8_FROM_U16(up->max_time_thres); buf[1] = GET_HIGH_U8_FROM_U16(up->max_time_thres); DO_SAFE(touch_i2c_write(client, up->max_time_thres_reg, 2, buf), error); buf[0] = GET_LOW_U8_FROM_U16(up->active_area_x0); buf[1] = GET_HIGH_U8_FROM_U16(up->active_area_x0); DO_SAFE(touch_i2c_write(client, up->active_area_x0_reg, 2, buf), error); buf[0] = GET_LOW_U8_FROM_U16(up->active_area_y0); buf[1] = GET_HIGH_U8_FROM_U16(up->active_area_y0); DO_SAFE(touch_i2c_write(client, up->active_area_y0_reg, 2, buf), error); buf[0] = GET_LOW_U8_FROM_U16(up->active_area_x1); buf[1] = GET_HIGH_U8_FROM_U16(up->active_area_x1); DO_SAFE(touch_i2c_write(client, up->active_area_x1_reg, 2, buf), error); buf[0] = GET_LOW_U8_FROM_U16(up->active_area_y1); buf[1] = GET_HIGH_U8_FROM_U16(up->active_area_y1); DO_SAFE(touch_i2c_write(client, up->active_area_y1_reg, 2, buf), error); DO_SAFE(synaptics_ts_set_page(client, DEFAULT_PAGE), error); } else { TOUCH_I("swipe_up is not used.\n"); } return 0; error: synaptics_ts_set_page(client, DEFAULT_PAGE); TOUCH_E("%s failed\n", __func__); return -EPERM; } static int swipe_enable(struct synaptics_ts_data *ts) { struct swipe_data *swp = &ts->swipe; if (swp->support_swipe & SUPPORT_SWIPE_DOWN) swipe_down_enable(ts); if (swp->support_swipe & SUPPORT_SWIPE_UP) swipe_up_enable(ts); return 0; } static int swipe_down_disable(struct synaptics_ts_data *ts) { struct i2c_client *client = ts->client; struct swipe_data *swp = &ts->swipe; struct swipe_ctrl_info *down = &swp->down; u8 buf[1] = {0}; TOUCH_I("%s: (swipe_mode:0x%02X)\n", __func__, swp->swipe_mode); DO_SAFE(synaptics_ts_set_page(client, LPWG_PAGE), error); DO_SAFE(touch_i2c_read(client, swp->enable_reg, 1, buf), error); DO_SAFE(touch_i2c_write_byte(client, swp->enable_reg, buf[0] & ~(down->enable_mask)), error); DO_SAFE(synaptics_ts_set_page(client, DEFAULT_PAGE), error); return 0; error: synaptics_ts_set_page(client, DEFAULT_PAGE); TOUCH_E("%s failed\n", __func__); return -EPERM; } static int swipe_up_disable(struct synaptics_ts_data *ts) { struct i2c_client *client = ts->client; struct swipe_data *swp = &ts->swipe; struct swipe_ctrl_info *up = &swp->up; u8 buf[1] = {0}; TOUCH_I("%s: (swipe_mode:0x%02X)\n", __func__, swp->swipe_mode); DO_SAFE(synaptics_ts_set_page(client, LPWG_PAGE), error); DO_SAFE(touch_i2c_read(client, swp->enable_reg, 1, buf), error); DO_SAFE(touch_i2c_write_byte(client, swp->enable_reg, buf[0] & ~(up->enable_mask)), error); DO_SAFE(synaptics_ts_set_page(client, DEFAULT_PAGE), error); return 0; error: synaptics_ts_set_page(client, DEFAULT_PAGE); TOUCH_E("%s failed\n", __func__); return -EPERM; } static int swipe_disable(struct synaptics_ts_data *ts) { struct swipe_data *swp = &ts->swipe; if (swp->support_swipe & SUPPORT_SWIPE_DOWN) swipe_down_disable(ts); if (swp->support_swipe & SUPPORT_SWIPE_UP) swipe_up_disable(ts); return 0; } static int get_tci_data(struct synaptics_ts_data *ts, int count) { struct i2c_client *client = ts->client; u8 i = 0; u8 buffer[12][4] = {{0} }; ts->pw_data.data_num = count; if (!count) return 0; DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, ts->f51_reg.lpwg_data_reg, 4 * count, &buffer[0][0]), error); TOUCH_D(DEBUG_BASE_INFO || DEBUG_LPWG, "%s : knock code coordinates, count = %d\n", __func__, count); for (i = 0; i < count; i++) { ts->pw_data.data[i].x = TS_POSITION(buffer[i][1], buffer[i][0]); ts->pw_data.data[i].y = TS_POSITION(buffer[i][3], buffer[i][2]); if (ts->pdata->role->use_security_mode) { if (ts->lpwg_ctrl.password_enable) { TOUCH_I("LPWG data xxxx, xxxx\n"); } else { TOUCH_I("LPWG data %d, %d\n", ts->pw_data.data[i].x, ts->pw_data.data[i].y); } } else { TOUCH_I("LPWG data %d, %d\n", ts->pw_data.data[i].x, ts->pw_data.data[i].y); } } return 0; error: TOUCH_E("%s, %d : get tci_control failed, count : %d\n", __func__, __LINE__, count); return -EPERM; } static void set_lpwg_mode(struct lpwg_control *ctrl, int mode) { ctrl->double_tap_enable = ((mode == LPWG_DOUBLE_TAP) | (mode == LPWG_PASSWORD)) ? 1 : 0; ctrl->password_enable = (mode == LPWG_PASSWORD) ? 1 : 0; ctrl->signature_enable = (mode == LPWG_SIGNATURE) ? 1 : 0; ctrl->lpwg_is_enabled = ctrl->double_tap_enable || ctrl->password_enable || ctrl->signature_enable; } static int sleep_control(struct synaptics_ts_data *ts, int mode, int recal) { u8 curr = 0; u8 next = 0; int ret = 0; /* * NORMAL == 0 : resume & lpwg state * SLEEP == 1 : uevent reporting time - sleep * NO_CAL == 2 : proxi near - sleep when recal is not needed */ DO_SAFE(touch_i2c_read(ts->client, DEVICE_CONTROL_REG, 1, &curr), error); TOUCH_D(DEBUG_BASE_INFO, "%s : curr:0x%02x\n", __func__, curr); if (mode == 3) { /* in this case, reset Touch IC for sensor reset -> IC Reset */ TOUCH_I("IC Soft reset for sensor reset.\n"); if (synaptics_ts_ic_ctrl(ts->client, IC_CTRL_RESET, DEVICE_COMMAND_RESET, &ret) < 0) TOUCH_E("IC_RESET handling fail\n"); return 0; } if (is_product(ts, "PLG446", 6) || is_product(ts, "PLG468", 6)) { if (((curr & 0xF8) == DEVICE_CONTROL_SLEEP) && !mode) { /* curr - sleep, next - sleep */ TOUCH_D(DEBUG_BASE_INFO, "%s : It's odd case (sleep)(sleep)\n", __func__); return 0; } next = (curr & 0xF8) | (mode ? DEVICE_CONTROL_NOSLEEP : DEVICE_CONTROL_SLEEP); TOUCH_D(DEBUG_BASE_INFO, "%s : next:0x%02x\n", __func__, next); } else { next = (curr & 0xF8) | (mode ? DEVICE_CONTROL_NORMAL_OP : DEVICE_CONTROL_SLEEP); /* (recal ? DEVICE_CONTROL_SLEEP * : DEVICE_CONTROL_SLEEP_NO_RECAL); */ TOUCH_D(DEBUG_BASE_INFO, "%s : next:0x%02x\n", __func__, next); } /*TOUCH_D(DEBUG_BASE_INFO, "%s : curr = [%6s] next[%6s]\n", __func__, (curr == 0 ? "NORMAL" : (curr == 1 ? "SLEEP" : "NOSLEEP")), (next == 0 ? "NORMAL" : (next == 1 ? "SLEEP" : "NOSLEEP")));*/ DO_SAFE(touch_i2c_write_byte(ts->client, DEVICE_CONTROL_REG, next), error); return 0; error: TOUCH_E("%s, %d : sleep control failed\n", __func__, __LINE__); return -EPERM; } static int lpwg_control(struct synaptics_ts_data *ts, int mode) { set_lpwg_mode(&ts->lpwg_ctrl, mode); synaptics_toggle_swipe(ts->client); switch (mode) { case LPWG_SIGNATURE: break; case LPWG_DOUBLE_TAP: /* Only TCI-1 */ tci_control(ts, TCI_ENABLE_CTRL, 1); /* Tci-1 enable */ tci_control(ts, TAP_COUNT_CTRL, 2); /* tap count = 2 */ tci_control(ts, MIN_INTERTAP_CTRL, 0); /* min inter_tap = 60ms */ tci_control(ts, MAX_INTERTAP_CTRL, 70); /* max inter_tap = 700ms */ tci_control(ts, TOUCH_SLOP_CTRL, 100); /* touch_slop = 10mm */ tci_control(ts, TAP_DISTANCE_CTRL, 10); /* tap distance = 10mm */ tci_control(ts, INTERRUPT_DELAY_CTRL, 0); /* interrupt delay = 0ms */ tci_control(ts, TCI_ENABLE_CTRL2, 0); /* Tci-2 disable */ if (is_product(ts, "PLG349", 6)) { /* wakeup_gesture_only */ tci_control(ts, REPORT_MODE_CTRL, 1); } if (is_product(ts, "PLG446", 6) || is_product(ts, "PLG468", 6)) { if (lpwg_by_lcd_notifier) TOUCH_I( "Partial LPWG doens't work after LPWG ON command\n"); else tci_control(ts, PARTIAL_LPWG_ON, 1); } break; case LPWG_PASSWORD: /* TCI-1 and TCI-2 */ tci_control(ts, TCI_ENABLE_CTRL, 1); /* Tci-1 enable */ tci_control(ts, TAP_COUNT_CTRL, 2); /* tap count = 2 */ tci_control(ts, MIN_INTERTAP_CTRL, 0); /* min inter_tap = 60ms */ tci_control(ts, MAX_INTERTAP_CTRL, 70); /* max inter_tap = 700ms */ tci_control(ts, TOUCH_SLOP_CTRL, 100); /* touch_slop = 10mm */ tci_control(ts, TAP_DISTANCE_CTRL, 7); /* tap distance = 7mm */ tci_control(ts, INTERRUPT_DELAY_CTRL, (u8)ts->pw_data.double_tap_check); tci_control(ts, TCI_ENABLE_CTRL2, 1); /* Tci-2 ensable */ tci_control(ts, TAP_COUNT_CTRL2, (u8)ts->pw_data.tap_count); /* tap count = user_setting */ tci_control(ts, MIN_INTERTAP_CTRL2, 0); /* min inter_tap = 60ms */ tci_control(ts, MAX_INTERTAP_CTRL2, 70); /* max inter_tap = 700ms */ tci_control(ts, TOUCH_SLOP_CTRL2, 100); /* touch_slop = 10mm */ tci_control(ts, TAP_DISTANCE_CTRL2, 255); /* tap distance = MAX */ tci_control(ts, INTERRUPT_DELAY_CTRL2, 0); /* interrupt delay = 0ms */ /* wakeup_gesture_only */ if (is_product(ts, "PLG349", 6)) tci_control(ts, REPORT_MODE_CTRL, 1); if (is_product(ts, "PLG446", 6) || is_product(ts, "PLG468", 6)) { if (lpwg_by_lcd_notifier) TOUCH_I( "Partial LPWG doens't work after LPWG ON command\n"); else tci_control(ts, PARTIAL_LPWG_ON, 1); } break; default: if (is_product(ts, "PLG446", 6) || is_product(ts, "PLG468", 6)) tci_control(ts, PARTIAL_LPWG_ON, 0); tci_control(ts, TCI_ENABLE_CTRL, 0); /* Tci-1 disable */ tci_control(ts, TCI_ENABLE_CTRL2, 0); /* tci-2 disable */ if (is_product(ts, "PLG349", 6)) tci_control(ts, REPORT_MODE_CTRL, 0); /* normal */ break; } TOUCH_I("%s : lpwg_mode[%d]\n", __func__, mode); return 0; } struct synaptics_ts_exp_fhandler { struct synaptics_ts_exp_fn *exp_fn; bool inserted; bool initialized; }; static struct synaptics_ts_exp_fhandler prox_fhandler; static struct synaptics_ts_exp_fhandler rmidev_fhandler; void synaptics_ts_prox_function(struct synaptics_ts_exp_fn *prox_fn, bool insert) { prox_fhandler.inserted = insert; if (insert) prox_fhandler.exp_fn = prox_fn; else prox_fhandler.exp_fn = NULL; return; } void synaptics_ts_rmidev_function(struct synaptics_ts_exp_fn *rmidev_fn, bool insert) { rmidev_fhandler.inserted = insert; if (insert) rmidev_fhandler.exp_fn = rmidev_fn; else rmidev_fhandler.exp_fn = NULL; return; } void matchUp_f51_regMap(struct synaptics_ts_data *ts) { TOUCH_I("Start [%s]\n", __func__); if (is_product(ts, "PLG349", 6) || is_product(ts, "s3320", 6) || is_product(ts, "PLG446", 6) || is_product(ts, "PLG468", 6)) { if (is_product(ts, "PLG349", 6)) TOUCH_I("[%s] This is Z2\n", __func__); else TOUCH_I("[%s] This is P1\n", __func__); ts->f51_reg.lpwg_status_reg = LPWG_STATUS_REG; ts->f51_reg.lpwg_data_reg = LPWG_DATA_REG; ts->f51_reg.lpwg_tapcount_reg = LPWG_TAPCOUNT_REG; ts->f51_reg.lpwg_min_intertap_reg = LPWG_MIN_INTERTAP_REG; ts->f51_reg.lpwg_max_intertap_reg = LPWG_MAX_INTERTAP_REG; ts->f51_reg.lpwg_touch_slop_reg = LPWG_TOUCH_SLOP_REG; ts->f51_reg.lpwg_tap_distance_reg = LPWG_TAP_DISTANCE_REG; ts->f51_reg.lpwg_interrupt_delay_reg = LPWG_INTERRUPT_DELAY_REG; ts->f51_reg.lpwg_tapcount_reg2 = (LPWG_TAPCOUNT_REG + LPWG_BLKSIZ); ts->f51_reg.lpwg_min_intertap_reg2 = (LPWG_MIN_INTERTAP_REG + LPWG_BLKSIZ); ts->f51_reg.lpwg_max_intertap_reg2 = (LPWG_MAX_INTERTAP_REG + LPWG_BLKSIZ); ts->f51_reg.lpwg_touch_slop_reg2 = (LPWG_TOUCH_SLOP_REG + LPWG_BLKSIZ); ts->f51_reg.lpwg_tap_distance_reg2 = (LPWG_TAP_DISTANCE_REG + LPWG_BLKSIZ); ts->f51_reg.lpwg_interrupt_delay_reg2 = (LPWG_INTERRUPT_DELAY_REG + LPWG_BLKSIZ); if (is_product(ts, "PLG468", 6)) { if (ts->lpwg_ctrl.has_lpwg_overtap_module && ts->lpwg_ctrl.has_request_reset_reg) { ts->f51_reg.overtap_cnt_reg = ts->f51.dsc.data_base + 57; ts->f51_reg.request_reset_reg = ts->f51.dsc.data_base + 69; } ts->f51_reg.lpwg_partial_reg = LPWG_PARTIAL_REG + 71; ts->f51_reg.lpwg_fail_count_reg = ts->f51.dsc.data_base + 0x21; ts->f51_reg.lpwg_fail_index_reg = ts->f51.dsc.data_base + 0x22; ts->f51_reg.lpwg_fail_reason_reg = ts->f51.dsc.data_base + 0x23; } else { if (ts->lpwg_ctrl.has_lpwg_overtap_module) { ts->f51_reg.overtap_cnt_reg = ts->f51.dsc.data_base + 73; ts->f51_reg.lpwg_adc_offset_reg = ts->f51_reg.lpwg_interrupt_delay_reg2 + 44; ts->f51_reg.lpwg_adc_fF_reg1 = ts->f51_reg.lpwg_interrupt_delay_reg2 + 45; ts->f51_reg.lpwg_adc_fF_reg2 = ts->f51_reg.lpwg_interrupt_delay_reg2 + 46; ts->f51_reg.lpwg_adc_fF_reg3 = ts->f51_reg.lpwg_interrupt_delay_reg2 + 47; ts->f51_reg.lpwg_adc_fF_reg4 = ts->f51_reg.lpwg_interrupt_delay_reg2 + 48; } else { ts->f51_reg.lpwg_adc_offset_reg = ts->f51_reg.lpwg_interrupt_delay_reg2 + 45; ts->f51_reg.lpwg_adc_fF_reg1 = ts->f51_reg.lpwg_interrupt_delay_reg2 + 46; ts->f51_reg.lpwg_adc_fF_reg2 = ts->f51_reg.lpwg_interrupt_delay_reg2 + 47; ts->f51_reg.lpwg_adc_fF_reg3 = ts->f51_reg.lpwg_interrupt_delay_reg2 + 48; ts->f51_reg.lpwg_adc_fF_reg4 = ts->f51_reg.lpwg_interrupt_delay_reg2 + 49; } ts->f51_reg.lpwg_partial_reg = LPWG_PARTIAL_REG; ts->f51_reg.lpwg_fail_count_reg = ts->f51.dsc.data_base + 0x31; ts->f51_reg.lpwg_fail_index_reg = ts->f51.dsc.data_base + 0x32; ts->f51_reg.lpwg_fail_reason_reg = ts->f51.dsc.data_base + 0x33; } } else { TOUCH_I("[%s] No supported product.\n", __func__); return; } TOUCH_I("[%s] Complete to match-up regmap.\n", __func__); return; } void matchUp_f54_regMap(struct synaptics_ts_data *ts) { if (is_product(ts, "PLG349", 6)) { TOUCH_I("[%s] This is Z2\n", __func__); ts->f54_reg.interference__metric_LSB = 0x04; ts->f54_reg.interference__metric_MSB = 0x05; ts->f54_reg.current_noise_status = 0x08; ts->f54_reg.cid_im = 0x09; ts->f54_reg.freq_scan_im = 0x0A; } else if (is_product(ts, "s3320", 5) || is_product(ts, "PLG446", 6)) { TOUCH_I("[%s] This is P1\n", __func__); ts->f54_reg.interference__metric_LSB = 0x04; ts->f54_reg.interference__metric_MSB = 0x05; ts->f54_reg.current_noise_status = 0x08; ts->f54_reg.cid_im = 0x0A; ts->f54_reg.freq_scan_im = 0x0B; ts->f54_reg.incell_statistic = 0x10; } else if (is_product(ts, "PLG468", 6)) { TOUCH_I("[%s] This is P1\n", __func__); ts->f54_reg.interference__metric_LSB = 0x05; ts->f54_reg.interference__metric_MSB = 0x06; ts->f54_reg.current_noise_status = 0x09; ts->f54_reg.freq_scan_im = 0x0A; } else { TOUCH_I("[%s] No supported product.\n", __func__); return; } TOUCH_I("[%s] Complete to match-up regmap.\n", __func__); return; } void get_f12_info(struct synaptics_ts_data *ts) { int retval; struct synaptics_ts_f12_query_5 query_5; struct synaptics_ts_f12_query_8 query_8; struct synaptics_ts_f12_ctrl_23 ctrl_23; struct synaptics_ts_f12_ctrl_8 ctrl_8; int i; u8 offset; u32 query_5_present = 0; u16 query_8_present = 0; if (!ts) { TOUCH_E("ts is null\n"); return; } /* ctrl_reg_info setting */ retval = touch_i2c_read(ts->client, (ts->f12.dsc.query_base + 5), sizeof(query_5.data), query_5.data); if (retval < 0) { TOUCH_E( "Failed to read from F12_2D_QUERY_05_Control_Presence register\n"); return; } query_5_present = (query_5_present << 8) | query_5.data[4]; query_5_present = (query_5_present << 8) | query_5.data[3]; query_5_present = (query_5_present << 8) | query_5.data[2]; query_5_present = (query_5_present << 8) | query_5.data[1]; TOUCH_I("qeury_5_present=0x%08X [%02X %02X %02X %02X %02X]\n", query_5_present, query_5.data[0], query_5.data[1], query_5.data[2], query_5.data[3], query_5.data[4]); offset = 0; for (i = 0; i < 32; i++) { ts->f12_reg.ctrl[i] = ts->f12.dsc.control_base + offset; if (query_5_present & (1 << i)) { TOUCH_I( "ts->f12_reg.ctrl[%d]=0x%02X (0x%02x+%d)\n", i, ts->f12_reg.ctrl[i], ts->f12.dsc.control_base, offset); offset++; } } /* data_reg_info setting */ retval = touch_i2c_read(ts->client, (ts->f12.dsc.query_base + 8), sizeof(query_8.data), query_8.data); if (retval < 0) { TOUCH_E( "Failed to read from F12_2D_QUERY_08_Data_Presence register\n" ); return; } query_8_present = (query_8_present << 8) | query_8.data[2]; query_8_present = (query_8_present << 8) | query_8.data[1]; TOUCH_I("qeury_8_present=0x%08X [%02X %02X %02X]\n", query_8_present, query_8.data[0], query_8.data[1], query_8.data[2]); offset = 0; for (i = 0; i < 16; i++) { ts->f12_reg.data[i] = ts->f12.dsc.data_base + offset; if (query_8_present & (1 << i)) { TOUCH_I( "ts->f12_reg.data[%d]=0x%02X (0x%02x+%d)\n", i, ts->f12_reg.data[i], ts->f12.dsc.data_base, offset); offset++; } } retval = touch_i2c_read(ts->client, ts->f12_reg.ctrl[23], sizeof(ctrl_23.data), ctrl_23.data); ts->object_report = ctrl_23.obj_type_enable; ts->num_of_fingers = min_t(u8, ctrl_23.max_reported_objects, (u8) MAX_NUM_OF_FINGERS); TOUCH_I( "ts->object_report[0x%02X], ts->num_of_fingers[%d]\n", ts->object_report, ts->num_of_fingers); retval = touch_i2c_read(ts->client, ts->f12_reg.ctrl[8], sizeof(ctrl_8.data), ctrl_8.data); TOUCH_I( "ctrl_8 - sensor_max_x[%d], sensor_max_y[%d]\n", ((unsigned short)ctrl_8.max_x_coord_lsb << 0) | ((unsigned short)ctrl_8.max_x_coord_msb << 8), ((unsigned short)ctrl_8.max_y_coord_lsb << 0) | ((unsigned short)ctrl_8.max_y_coord_msb << 8)); return; } void get_finger_amplitude(struct synaptics_ts_data *ts) { int retval = 0; u8 buf[2] = {0}; u8 min_peak_amplitude = 0; u16 saturation_cap = 0; u8 temp_min_finger_amplitude = 0; retval = touch_i2c_read(ts->client, ts->f12_reg.ctrl[15], sizeof(buf), buf); if (retval < 0) { TOUCH_E("Failed to read finger_amplitude data\n"); return; } ts->default_finger_amplitude = buf[0]; ts->default_small_finger_amplitude = buf[1]; TOUCH_I( "default_finger_amplitude = 0x%02X, default_small_finger_amplitude = 0x%02X\n", ts->default_finger_amplitude, ts->default_small_finger_amplitude); retval = touch_i2c_read(ts->client, ts->f12_reg.ctrl[10], sizeof(buf), buf); if (retval < 0) { TOUCH_E("Failed to read min_peak_amplitude data\n"); return; } min_peak_amplitude = buf[1]; TOUCH_I("min_peak_amplitude = 0x%02X\n", min_peak_amplitude); retval = synaptics_ts_page_data_read(ts->client, ANALOG_PAGE, SATURATION_CAP_LSB_REG, 1, &buf[0]); if (retval < 0) { TOUCH_E("Failed to read saturation_cap_lsb data\n"); return; } retval = synaptics_ts_page_data_read(ts->client, ANALOG_PAGE, SATURATION_CAP_MSB_REG, 1, &buf[1]); if (retval < 0) { TOUCH_E("Failed to read saturation_cap_msb data\n"); return; } saturation_cap = (u16)((buf[1] << 8) & 0xff00) | (u16)(buf[0] & 0x00ff); TOUCH_I("saturation_cap = 0x%04X\n", saturation_cap); if (saturation_cap == 0) saturation_cap = 1; temp_min_finger_amplitude = 1 + ((min_peak_amplitude * 0xff) / saturation_cap); ts->min_finger_amplitude = ts->default_finger_amplitude; TOUCH_I("min_finger_amplitude = 0x%02X\n", ts->min_finger_amplitude); return; } static int synaptics_get_cap_diff(struct synaptics_ts_data *ts) { int t_diff = 0; int r_diff = 0; int x = 0; int y = 0; int ret = 0; s8 *rx_cap_diff = NULL; s8 *tx_cap_diff = NULL; unsigned short *raw_cap = NULL; char *f54_cap_wlog_buf = NULL; static int cap_outbuf; unsigned char txcnt = TxChannelCount; unsigned char rxcnt = RxChannelCount; /* allocation of cap_diff */ rx_cap_diff = NULL; tx_cap_diff = NULL; raw_cap = NULL; cap_outbuf = 0; ASSIGN(rx_cap_diff = kzalloc(rxcnt * sizeof(u8), GFP_KERNEL), error_mem); ASSIGN(tx_cap_diff = kzalloc(txcnt * sizeof(u8), GFP_KERNEL), error_mem); ASSIGN(raw_cap = kzalloc(txcnt * rxcnt * sizeof(unsigned char) * 2, GFP_KERNEL), error_mem); ASSIGN(f54_cap_wlog_buf = kzalloc(DS5_BUFFER_SIZE, GFP_KERNEL), error_mem); memset(f54_cap_wlog_buf, 0, DS5_BUFFER_SIZE); if (diffnode(raw_cap) < 0) { TOUCH_I("check_diff_node fail!!\n"); kfree(rx_cap_diff); kfree(tx_cap_diff); kfree(raw_cap); kfree(f54_cap_wlog_buf); return -EAGAIN; } ts->bad_sample = 0; for (y = 0; y < (int)rxcnt - 1; y++) { t_diff = 0; for (x = 0; x < (int)txcnt; x++) t_diff += (raw_cap[x * rxcnt + y + 1] - raw_cap[x * rxcnt + y]); t_diff = t_diff / (int)txcnt; if (jitter_abs(t_diff) > 1000) ts->bad_sample = 1; if (t_diff < -0x7F) /* limit diff max */ rx_cap_diff[y + 1] = -0x7F; else if (t_diff > 0x7F) rx_cap_diff[y + 1] = 0x7F; else rx_cap_diff[y + 1] = (s8)t_diff; /*need to modify*/ cap_outbuf += snprintf(f54_cap_wlog_buf+cap_outbuf, DS5_BUFFER_SIZE - cap_outbuf, "%5d\n", rx_cap_diff[y + 1]); } if (tx_cap_diff != NULL && ts->bad_sample == 0) { for (x = 0; x < (int)txcnt - 1; x++) { r_diff = 0; for (y = 0; y < (int)rxcnt; y++) r_diff += (raw_cap[(x + 1) * rxcnt + y] - raw_cap[x * rxcnt + y]); r_diff = r_diff / (int)rxcnt; /*need to tunning limit_value*/ if (jitter_abs(r_diff) > 1000) ts->bad_sample = 1; /* limit diff max */ if (r_diff < -0x7F) tx_cap_diff[x + 1] = -0x7F; else if (r_diff > 0x7F) tx_cap_diff[x + 1] = 0x7F; else tx_cap_diff[x + 1] = (s8)r_diff; /*need to modify*/ cap_outbuf += snprintf(f54_cap_wlog_buf+cap_outbuf, DS5_BUFFER_SIZE - cap_outbuf, "%5d\n", tx_cap_diff[x + 1]); } } /*need to modify*/ if (write_log(CAP_FILE_PATH, f54_cap_wlog_buf) == 1) raw_cap_file_exist = 1; read_log(CAP_FILE_PATH, ts->pdata); /*average of Rx_line Cap Value*/ kfree(rx_cap_diff); kfree(tx_cap_diff); kfree(raw_cap); kfree(f54_cap_wlog_buf); return ret; error_mem: TOUCH_I("error_mem\n"); return -ENOMEM; } static char *productcode_parse(unsigned char *product) { static char str[128] = {0}; int len = 0; char inch[2] = {0}; char paneltype = 0; char version[2] = {0}; const char *str_panel[] = { "ELK", "Suntel", "Tovis", "Innotek", "JDI", "LGD", }; const char *str_ic[] = { "Synaptics", }; int i; i = (product[0] & 0xF0) >> 4; if (i < 6) len += snprintf(str + len, sizeof(str)-len, "%s\n", str_panel[i]); else len += snprintf(str + len, sizeof(str)-len, "Unknown\n"); i = (product[0] & 0x0F); if (i < 5 && i != 1) len += snprintf(str + len, sizeof(str)-len, "%dkey\n", i); else len += snprintf(str + len, sizeof(str)-len, "Unknown\n"); i = (product[1] & 0xF0) >> 4; if (i < 1) len += snprintf(str + len, sizeof(str)-len, "%s\n", str_ic[i]); else len += snprintf(str + len, sizeof(str)-len, "Unknown\n"); inch[0] = (product[1] & 0x0F); inch[1] = ((product[2] & 0xF0) >> 4); len += snprintf(str+len, sizeof(str)-len, "%d.%d\n", inch[0], inch[1]); paneltype = (product[2] & 0x0F); len += snprintf(str+len, sizeof(str)-len, "PanelType %d\n", paneltype); version[0] = ((product[3] & 0x80) >> 7); version[1] = (product[3] & 0x7F); len += snprintf(str+len, sizeof(str)-len, "version : v%d.%02d\n", version[0], version[1]); return str; } static void lpwg_timer_func(struct work_struct *work_timer) { struct synaptics_ts_data *ts = container_of(to_delayed_work(work_timer), struct synaptics_ts_data, work_timer); send_uevent_lpwg(ts->client, LPWG_PASSWORD); wake_unlock(&ts->timer_wake_lock); TOUCH_D(DEBUG_LPWG, "u-event timer occur!\n"); return; } static void all_palm_released_func(struct work_struct *work_palm) { struct synaptics_ts_data *ts = container_of(to_delayed_work(work_palm), struct synaptics_ts_data, work_palm); ts->palm.all_released = false; TOUCH_I("%s: ABS0 event disable time is expired.\n", __func__); return; } static void sleepmode_func(struct work_struct *work_sleep) { struct synaptics_ts_data *ts = container_of(to_delayed_work(work_sleep), struct synaptics_ts_data, work_sleep); mutex_lock(&ts->pdata->thread_lock); synaptics_change_sleepmode(ts->client); mutex_unlock(&ts->pdata->thread_lock); return; } static int get_binFW_version(struct synaptics_ts_data *ts) { const struct firmware *fw_entry = NULL; const u8 *firmware = NULL; int rc = 0; rc = request_firmware(&fw_entry, ts->pdata->inbuilt_fw_name, &ts->client->dev); if (rc != 0) { TOUCH_E("[%s] request_firmware() failed %d\n", __func__, rc); return -EIO; } firmware = fw_entry->data; memcpy(ts->fw_info.img_product_id, &firmware[ts->pdata->fw_pid_addr], 6); memcpy(ts->fw_info.img_version, &firmware[ts->pdata->fw_ver_addr], 4); release_firmware(fw_entry); return rc; } static ssize_t show_firmware(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; int rc = 0; u8 crc_buffer = 0; mutex_lock(&ts->pdata->thread_lock); read_page_description_table(ts->client); rc = get_ic_info(ts); rc += get_binFW_version(ts); mutex_unlock(&ts->pdata->thread_lock); if (rc < 0) { ret += snprintf(buf + ret, PAGE_SIZE - ret, "-1\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Read Fail Touch IC Info or Touch Bin Info.\n"); return ret; } ret = snprintf(buf + ret, PAGE_SIZE - ret, "\n======== Firmware Info ========\n"); if (ts->fw_info.version[0] > 0x50) { ret += snprintf(buf + ret, PAGE_SIZE - ret, "ic_version[%s]\n", ts->fw_info.version); } else { ret += snprintf(buf + ret, PAGE_SIZE - ret, "ic_version RAW = %02X %02X %02X %02X\n", ts->fw_info.version[0], ts->fw_info.version[1], ts->fw_info.version[2], ts->fw_info.version[3]); ret += snprintf(buf+ret, PAGE_SIZE-ret, "=== ic_fw_version info ===\n%s", productcode_parse(ts->fw_info.version)); } ret += snprintf(buf + ret, PAGE_SIZE - ret, "IC_product_id[%s]\n", ts->fw_info.product_id); if (is_product(ts, "PLG349", 6)) { ret += snprintf(buf + ret, PAGE_SIZE - ret, "Touch IC : s3528\n\n"); } else if (is_product(ts, "s3320", 5) || is_product(ts, "PLG468", 6)) { rc = synaptics_ts_page_data_read(client, ANALOG_PAGE, CALIBRATION_STATUS_REG, 1, &crc_buffer); if (rc < 0) { TOUCH_I("Can not read Calibration CRC Register\n"); crc_buffer = -1; } crc_buffer &= 0x03; ret += snprintf(buf + ret, PAGE_SIZE - ret, "Touch IC : s3320(cal_crc : %u) \n\n", crc_buffer); } else if (is_product(ts, "PLG446", 6)) { if (ts->pdata->role->fw_index == BL_VER_HIGHER) ret += snprintf(buf + ret, PAGE_SIZE - ret, "Touch IC : s3320 / BL 7.2\n\n"); else ret += snprintf(buf + ret, PAGE_SIZE - ret, "Touch IC : s3320 / BL 6.0\n\n"); } else { ret += snprintf(buf + ret, PAGE_SIZE - ret, "Touch product ID read fail\n\n"); } if (ts->fw_info.img_version[0] > 0x50) { ret += snprintf(buf + ret, PAGE_SIZE - ret, "img_version[%s]\n", ts->fw_info.img_version); } else { ret += snprintf(buf + ret, PAGE_SIZE - ret, "img_version RAW = %02X %02X %02X %02X\n", ts->fw_info.img_version[0], ts->fw_info.img_version[1], ts->fw_info.img_version[2], ts->fw_info.img_version[3]); ret += snprintf(buf + ret, PAGE_SIZE - ret, "=== img_version info ===\n%s", productcode_parse(ts->fw_info.img_version)); } ret += snprintf(buf + ret, PAGE_SIZE - ret, "Img_product_id[%s]\n", ts->fw_info.img_product_id); if (is_img_product(ts, "PLG349", 6)) { ret += snprintf(buf + ret, PAGE_SIZE - ret, "Touch IC : s3528\n"); } else if (is_img_product(ts, "s3320", 5) || is_img_product(ts, "PLG446", 6) || is_img_product(ts, "PLG468", 6)) { ret += snprintf(buf + ret, PAGE_SIZE - ret, "Touch IC : s3320\n"); } else { ret += snprintf(buf + ret, PAGE_SIZE - ret, "Touch product ID read fail\n"); } return ret; } static ssize_t show_synaptics_fw_version(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; int rc = 0; mutex_lock(&ts->pdata->thread_lock); read_page_description_table(ts->client); rc = get_ic_info(ts); mutex_unlock(&ts->pdata->thread_lock); if (rc < 0) { ret += snprintf(buf + ret, PAGE_SIZE - ret, "-1\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Read Fail Touch IC Info.\n"); return ret; } ret = snprintf(buf + ret, PAGE_SIZE - ret, "\n======== Auto Touch Test ========\n"); if (ts->fw_info.version[0] > 0x50) { ret += snprintf(buf + ret, PAGE_SIZE - ret, "ic_version[%s]\n", ts->fw_info.version); } else { ret += snprintf(buf + ret, PAGE_SIZE - ret, "version : (v%d.%02d)\n", ((ts->fw_info.version[3] & 0x80) >> 7), (ts->fw_info.version[3] & 0x7F)); } ret += snprintf(buf + ret, PAGE_SIZE - ret, "IC_product_id[%s]\n", ts->fw_info.product_id); if (is_product(ts, "PLG349", 6)) { ret += snprintf(buf + ret, PAGE_SIZE - ret, "Touch IC : s3528\n\n"); } else if (is_product(ts, "s3320", 5) || is_product(ts, "PLG446", 6) || is_product(ts, "PLG468", 6)) { ret += snprintf(buf + ret, PAGE_SIZE - ret, "Touch IC : s3320\n\n"); } else { ret += snprintf(buf + ret, PAGE_SIZE - ret, "Touch product ID read fail\n\n"); } return ret; } static void check_incalibration_crc(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int rc = 0; u8 crc_buffer = 0; char crc_file_buf[100] = {0}; int crc_file_buf_len = 0; rc = synaptics_ts_page_data_read(client, ANALOG_PAGE, CALIBRATION_STATUS_REG, 1, &crc_buffer); if (rc < 0) { TOUCH_I("[%s] Can not read Calibration CRC Register\n",__func__); crc_file_buf_len += snprintf(crc_file_buf + crc_file_buf_len, sizeof(crc_file_buf) - crc_file_buf_len, "Can not read Calibration CRC Register\n\n"); write_log(NULL, crc_file_buf); return; } crc_buffer &= 0x03; crc_file_buf_len += snprintf(crc_file_buf + crc_file_buf_len, sizeof(crc_file_buf) - crc_file_buf_len, "LGD_In_Calibration CRC = %d\n\n", crc_buffer); write_log(NULL, crc_file_buf); } ssize_t _show_sd(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; int full_raw_cap = 0; int trx2trx = 0; int high_resistance = 0; int adc_range = 0; int sensor_speed = 0; int noise_delta = 0; int gnd = 0; int Rsp_grp = 0; int Rsp_short = 0; int Rsp_im = 0; int Rsp_coarse_cal = 0; int adc_test = 0; int lower_img = 0; int upper_img = 0; int lower_sensor = 0; int upper_sensor = 0; int lower_adc = 0; int upper_adc = 0; int noise_limit = 0; char *temp_buf = NULL; int len = 0; int upgrade = 0; if (power_state == POWER_ON || power_state == POWER_WAKE) { temp_buf = kzalloc(100, GFP_KERNEL); if (!temp_buf) { TOUCH_I("%s Failed to allocate memory\n", __func__); return 0; } if (!ts->pdata->panel_id && mfts_mode) { TOUCH_I("%s JDI MFTS FW UPGRADE\n", __func__); get_ic_info(ts); upgrade = firmware_upgrade_func_mfts(client); if (upgrade == NO_ERROR) { TOUCH_I("%s FW Upgrade Done\n", __func__); ret += snprintf(buf + ret, PAGE_SIZE - ret, "upgraded\n"); return ret; } else if (upgrade == ERROR) { TOUCH_I("%s FW Upgrade Error\n", __func__); ret += snprintf(buf + ret, PAGE_SIZE - ret, "0\n"); return ret; } msleep(30); } write_time_log(NULL, NULL, 0); msleep(30); write_firmware_version_log(ts); mutex_lock(&ts->pdata->thread_lock); touch_disable_irq(ts->client->irq); SCAN_PDT(); if (ts->pdata->panel_id) { /*RSP Product Test*/ lower_img = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "RspLowerImageLimit", LowerImage); upper_img = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "RspUpperImageLimit", UpperImage); noise_limit = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "RspNoiseP2PLimit", RspNoise); check_incalibration_crc(client); if ((((ts->fw_info.version[3] & 0x80) >> 7) == 1) && ((ts->fw_info.version[3] & 0x7F) > 18)) { Rsp_im = synaptics_ts_im_test(client); msleep(20); } Rsp_grp = F54Test('q', 0, buf); msleep(100); Rsp_short = F54Test('s', 0, buf); msleep(100); Rsp_coarse_cal = F54Test('q', 4, buf); msleep(100); if (lower_img < 0 || upper_img < 0) { TOUCH_I( "[%s] lower return = %d upper return = %d\n", __func__, lower_img, upper_img); TOUCH_I( "[%s][FAIL] Can not check the limit of raw cap\n", __func__); ret = snprintf(buf + ret, PAGE_SIZE - ret, "Can not check the limit of raw cap\n"); } else { TOUCH_I( "Getting limit of raw cap is success\n"); } if (noise_limit < 0) { TOUCH_I( "[%s] noise limit return = %d\n", __func__, noise_limit); TOUCH_I( "[%s][FAIL] Can not check the limit of noise\n", __func__); ret = snprintf(buf + ret, PAGE_SIZE - ret, "Can not check the limit of noise\n" ); } else { TOUCH_I( "Getting limit of noise is success\n" ); } synaptics_ts_init(ts->client); touch_enable_irq(ts->client->irq); mutex_unlock(&ts->pdata->thread_lock); msleep(30); write_time_log(NULL, NULL, 0); msleep(20); ret = snprintf(buf, PAGE_SIZE, "========RESULT=======\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Channel Status : %s", (Rsp_short == 1) ? "Pass\n" : "Fail\n"); if ((((ts->fw_info.version[3] & 0x80) >> 7) == 1) && ((ts->fw_info.version[3] & 0x7F) > 18)) { ret += snprintf(buf + ret, PAGE_SIZE - ret, "Raw Data : %s", (Rsp_grp == 1 && Rsp_im == 1 && Rsp_coarse_cal == 1) ? "Pass\n" : "Fail"); if (!(Rsp_grp && Rsp_im && Rsp_coarse_cal)) { ret += snprintf(buf + ret, PAGE_SIZE - ret, " ("); ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s /%s /%s", (Rsp_im == 0 ? " 0" : " 1"), (Rsp_grp == 0 ? " 0" : " 1"), (Rsp_coarse_cal == 0 ? " 0" : " 1")); ret += snprintf(buf + ret, PAGE_SIZE - ret, " )\n"); } } else { ret += snprintf(buf + ret, PAGE_SIZE - ret, "Raw Data : %s", (Rsp_grp == 1) ? "Pass\n" : "Fail\n"); } } else { lower_img = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "LowerImageLimit", LowerImage); upper_img = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "UpperImageLimit", UpperImage); lower_sensor = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "SensorSpeedLowerImageLimit", SensorSpeedLowerImage); upper_sensor = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "SensorSpeedUpperImageLimit", SensorSpeedUpperImage); lower_adc = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "ADCLowerImageLimit", ADCLowerImage); upper_adc = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "ADCUpperImageLimit", ADCUpperImage); adc_test = synaptics_ts_adc_test(client); msleep(20); if (lower_img < 0 || upper_img < 0) { TOUCH_I( "[%s] lower return = %d upper return = %d\n", __func__, lower_img, upper_img); TOUCH_I( "[%s][FAIL] Can not check the limit of raw cap\n", __func__); ret = snprintf(buf + ret, PAGE_SIZE - ret, "Can not check the limit of raw cap\n"); } else { TOUCH_I( "Getting limit of raw cap is success\n"); full_raw_cap = F54Test('a', 0, buf); if (ts->pdata->ref_chk_option[0]) { msleep(30); synaptics_get_cap_diff(ts); } msleep(30); } trx2trx = F54Test('f', 0, buf); msleep(50); high_resistance = F54Test('g', 0, buf); msleep(50); noise_delta = F54Test('x', 0, buf); msleep(100); gnd = F54Test('y', 0, buf); msleep(100); if (lower_sensor < 0 || upper_sensor < 0) { TOUCH_I( "[%s] lower return = %d upper return = %d\n" , __func__, lower_sensor, upper_sensor); TOUCH_I( "[%s][FAIL] Can not check the limit of sensor speed image\n" , __func__); ret = snprintf(buf + ret, PAGE_SIZE - ret, "Can not check the limit of sensor speed image limit\n"); } else { TOUCH_I( "Getting limit of Sensor Speed Test is success\n"); sensor_speed = F54Test('c', 0, buf); } msleep(50); if (lower_adc < 0 || upper_adc < 0) { TOUCH_I( "[%s] lower return = %d upper return = %d\n", __func__, lower_adc, upper_adc); TOUCH_I( "[%s][FAIL] Can not check the limit of ADC image\n", __func__); ret = snprintf(buf + ret, PAGE_SIZE - ret, "Can not check the limit of ADC image limit\n"); } else { TOUCH_I( "Getting limit of ADC Range Test is success\n"); adc_range = F54Test('b', 0, buf); } synaptics_ts_init(ts->client); touch_enable_irq(ts->client->irq); mutex_unlock(&ts->pdata->thread_lock); msleep(30); if (ts->h_err_cnt || ts->v_err_cnt || ts->bad_sample) full_raw_cap = 0; len += snprintf(temp_buf+len, PAGE_SIZE-len, "Cap Diff : %s\n", ts->bad_sample == 0 ? "PASS" : "FAIL"); len += snprintf(temp_buf+len, PAGE_SIZE - len, "Error node Check h_err_cnt: %s(err count:%d)\n", (ts->h_err_cnt == 0 ? "PASS" : "FAIL"), ts->h_err_cnt); len += snprintf(temp_buf+len, PAGE_SIZE-len, "Error node Check v_err_cnt: %s(err count:%d)\n\n", (ts->v_err_cnt == 0 ? "PASS" : "FAIL"), ts->v_err_cnt); write_log(NULL, temp_buf); msleep(30); write_time_log(NULL, NULL, 0); msleep(20); ret = snprintf(buf, PAGE_SIZE, "========RESULT=======\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Channel Status : %s", (trx2trx == 1 && high_resistance == 1) ? "Pass\n" : "Fail"); if (!(trx2trx && high_resistance)) { ret += snprintf(buf + ret, PAGE_SIZE - ret, " ("); ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s /%s", (trx2trx == 0 ? " 0" : " 1"), (high_resistance == 0 ? " 0" : " 1")); ret += snprintf(buf + ret, PAGE_SIZE - ret, " )\n"); } ret += snprintf(buf + ret, PAGE_SIZE - ret, "Raw Data : %s", (full_raw_cap > 0 && adc_test == 1) ? "Pass\n" : "Fail"); if (!(full_raw_cap && adc_test)) { ret += snprintf(buf + ret, PAGE_SIZE - ret, " ("); ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s /%s", (full_raw_cap == 0 ? " 0" : " 1"), (adc_test == 0 ? " 0" : " 1")); ret += snprintf(buf + ret, PAGE_SIZE - ret, " )\n"); } } } else { TOUCH_I("Power Suspend Can not Use I2C\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Power Suspend Can not Use I2C\n"); } kfree(temp_buf); return ret; } ssize_t getsd(struct i2c_client *client, char *buf) { ssize_t ret; ret = _show_sd(client, buf); return ret; } EXPORT_SYMBOL(getsd); static ssize_t show_sd(struct i2c_client *client, char *buf) { return _show_sd(client, buf); } static ssize_t show_rawdata(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; int lower_ret = 0; int upper_ret = 0; if (power_state == POWER_ON || power_state == POWER_WAKE || ts->pdata->panel_id == 1) { mutex_lock(&ts->pdata->thread_lock); wake_lock(&ts->touch_rawdata); if (need_scan_pdt) { SCAN_PDT(); need_scan_pdt = false; } touch_disable_irq(ts->client->irq); lower_ret = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "LowerImageLimit", LowerImage); upper_ret = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "UpperImageLimit", UpperImage); if (lower_ret < 0 || upper_ret < 0) { TOUCH_I( "[%s] lower return = %d upper return = %d\n", __func__, lower_ret, upper_ret); TOUCH_I( "[%s][FAIL] Can not check the limit of raw cap\n", __func__); ret = snprintf(buf + ret, PAGE_SIZE - ret, "Can not check the limit of raw cap\n"); } else { TOUCH_I( "[%s] lower return = %d upper return = %d\n", __func__, lower_ret, upper_ret); TOUCH_I( "[%s][SUCCESS] Can check the limit of raw cap\n", __func__); if (is_product(ts, "PLG446", 6) || is_product(ts, "PLG349", 6)) { TOUCH_I( "Display Rawdata Start PLG446\n"); ret = F54Test('a', 1, buf); } else if (is_product(ts, "PLG468", 6)) { TOUCH_I( "Display Rawdata Start PLG468\n"); ret = F54Test('q', 1, buf); } else { TOUCH_I( "Unknown Model : %s\n", ts->fw_info.product_id); } } touch_enable_irq(ts->client->irq); synaptics_ts_init(ts->client); wake_unlock(&ts->touch_rawdata); mutex_unlock(&ts->pdata->thread_lock); } else { ret += snprintf(buf + ret, PAGE_SIZE - ret, "state=[suspend]. we cannot use I2C, now. Test Result: Fail\n"); } if (ret == 0) ret += snprintf(buf + ret, PAGE_SIZE - ret, "ERROR: full_raw_cap failed.\n"); return ret; } static ssize_t show_delta(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; if (power_state == POWER_ON || power_state == POWER_WAKE || ts->pdata->panel_id == 1) { mutex_lock(&ts->pdata->thread_lock); if (need_scan_pdt) { SCAN_PDT(); need_scan_pdt = false; } touch_disable_irq(ts->client->irq); wake_lock(&ts->touch_rawdata); if (is_product(ts, "PLG446", 6)) { ret = F54Test('m', 0, buf); } else if(is_product(ts, "PLG468", 6)){ ret = F54Test('q', 2, buf); } else { TOUCH_I("[%s] -- Not support this test\n", __func__); ret = snprintf(buf, PAGE_SIZE, "Not support this test\n"); } touch_enable_irq(ts->client->irq); wake_unlock(&ts->touch_rawdata); mutex_unlock(&ts->pdata->thread_lock); } else { ret += snprintf(buf + ret, PAGE_SIZE - ret, "state=[suspend]. we cannot use I2C, now. Test Result: Fail\n"); } if (ret == 0) ret += snprintf(buf + ret, PAGE_SIZE - ret, "ERROR: full_raw_cap failed.\n"); return ret; } static ssize_t show_chstatus(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; int high_resistance = 0; int trx2trx = 0; if (is_product(ts, "PLG468", 6)) { TOUCH_I("[%s] -- Not support this test\n", __func__); ret = snprintf(buf, PAGE_SIZE, "Not support this test\n"); return ret; } if (power_state == POWER_ON || power_state == POWER_WAKE) { mutex_lock(&ts->pdata->thread_lock); touch_disable_irq(ts->client->irq); if (need_scan_pdt) { SCAN_PDT(); need_scan_pdt = false; } high_resistance = F54Test('g', 0, buf); trx2trx = F54Test('f', 0, buf); ret = snprintf(buf, PAGE_SIZE - ret, "========RESULT=======\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "TRex Short Report : RESULT: %s", (trx2trx > 0) ? "Pass\n" : "Fail\n"); /*High Resistance always return fail, you should see raw data.*/ ret += snprintf(buf + ret, PAGE_SIZE - ret, "High Resistance : RESULT: %s", (high_resistance > 0) ? "Pass\n" : "Fail\n"); synaptics_ts_init(ts->client); touch_enable_irq(ts->client->irq); mutex_unlock(&ts->pdata->thread_lock); } else { ret += snprintf(buf + ret, PAGE_SIZE - ret, "state=[suspend]. we cannot use I2C, now. Test Result: Fail\n"); } return ret; } static ssize_t show_abs_test(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; int abs_raw_short = 0; int abs_raw_open = 0; if (is_product(ts, "PLG468", 6)) { TOUCH_I("[%s] -- Not support this test\n", __func__); ret = snprintf(buf, PAGE_SIZE, "Not support this test\n"); return ret; } if (power_state == POWER_ON || power_state == POWER_WAKE) { mutex_lock(&ts->pdata->thread_lock); touch_disable_irq(ts->client->irq); if (need_scan_pdt) { SCAN_PDT(); need_scan_pdt = false; } abs_raw_short = F54Test('n', 1, buf); abs_raw_open = F54Test('o', 2, buf); ret = snprintf(buf, PAGE_SIZE, "========RESULT=======\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Absolute Sensing Short Test : RESULT: %s", (abs_raw_short > 0) ? "Pass\n" : "Fail\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Absolute Sensing Open Test : RESULT: %s", (abs_raw_open > 0) ? "Pass\n" : "Fail\n"); synaptics_ts_init(ts->client); touch_enable_irq(ts->client->irq); mutex_unlock(&ts->pdata->thread_lock); } else { ret += snprintf(buf + ret, PAGE_SIZE - ret, "state=[suspend]. we cannot use I2C, now. Test Result: Fail\n"); } return ret; } static ssize_t show_sensor_speed_test(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; int sensor_speed = 0; int lower_ret = 0; int upper_ret = 0; if (is_product(ts, "PLG468", 6)) { TOUCH_I("[%s] -- Not support this test\n", __func__); ret = snprintf(buf, PAGE_SIZE, "Not support this test\n"); return ret; } if (power_state == POWER_ON || power_state == POWER_WAKE) { mutex_lock(&ts->pdata->thread_lock); touch_disable_irq(ts->client->irq); if (need_scan_pdt) { SCAN_PDT(); need_scan_pdt = false; } lower_ret = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "SensorSpeedLowerImageLimit", SensorSpeedLowerImage); upper_ret = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "SensorSpeedUpperImageLimit", SensorSpeedUpperImage); if (lower_ret < 0 || upper_ret < 0) { TOUCH_I( "[%s] lower return = %d upper return = %d\n", __func__, lower_ret, upper_ret); TOUCH_I( "[%s][FAIL] Can not check the limit of sensor speed image\n", __func__); ret = snprintf(buf + ret, PAGE_SIZE - ret, "Can not check the limit of sensor speed image limit\n"); } else { TOUCH_I( "[%s] lower return = %d upper return = %d\n", __func__, lower_ret, upper_ret); TOUCH_I( "[%s][SUCCESS] Can check the limit of sensor speed image limit\n", __func__); sensor_speed = F54Test('c', 0, buf); } ret = snprintf(buf, PAGE_SIZE, "========RESULT=======\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Sensor Speed Test : RESULT: %s", (sensor_speed > 0) ? "Pass\n" : "Fail\n"); synaptics_ts_init(ts->client); touch_enable_irq(ts->client->irq); mutex_unlock(&ts->pdata->thread_lock); } else { ret += snprintf(buf + ret, PAGE_SIZE - ret, "state=[suspend]. we cannot use I2C, now. Test Result: Fail\n"); } return ret; } static ssize_t show_adc_range_test(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; int adc_range = 0; int lower_ret = 0; int upper_ret = 0; if (is_product(ts, "PLG468", 6)) { TOUCH_I("[%s] -- Not support this test\n", __func__); ret = snprintf(buf, PAGE_SIZE, "Not support this test\n"); return ret; } if (power_state == POWER_ON || power_state == POWER_WAKE) { mutex_lock(&ts->pdata->thread_lock); touch_disable_irq(ts->client->irq); if (need_scan_pdt) { SCAN_PDT(); need_scan_pdt = false; } lower_ret = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "ADCLowerImageLimit", ADCLowerImage); upper_ret = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "ADCUpperImageLimit", ADCUpperImage); if (lower_ret < 0 || upper_ret < 0) { TOUCH_I( "[%s] lower return = %d upper return = %d\n", __func__, lower_ret, upper_ret); TOUCH_I( "[%s][FAIL] Can not check the limit of ADC image\n", __func__); ret = snprintf(buf + ret, PAGE_SIZE - ret, "Can not check the limit of ADC image limit\n"); } else { TOUCH_I( "[%s] lower return = %d upper return = %d\n", __func__, lower_ret, upper_ret); TOUCH_I( "[%s][SUCCESS] Can check the limit of ADC image limit\n", __func__); adc_range = F54Test('b', 0, buf); } ret = snprintf(buf, PAGE_SIZE, "========RESULT=======\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "ADC Range Test : RESULT: %s", (adc_range > 0) ? "Pass\n" : "Fail\n"); synaptics_ts_init(ts->client); touch_enable_irq(ts->client->irq); mutex_unlock(&ts->pdata->thread_lock); } else { ret += snprintf(buf + ret, PAGE_SIZE - ret, "state=[suspend]. we cannot use I2C, now. Test Result: Fail\n"); } return ret; } static ssize_t show_gnd_test(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; int gnd = 0; if (is_product(ts, "PLG468", 6)) { TOUCH_I("[%s] -- Not support this test\n", __func__); ret = snprintf(buf, PAGE_SIZE, "Not support this test\n"); return ret; } if (power_state == POWER_ON || power_state == POWER_WAKE) { mutex_lock(&ts->pdata->thread_lock); if (need_scan_pdt) { SCAN_PDT(); need_scan_pdt = false; } touch_disable_irq(ts->client->irq); gnd = F54Test('y', 0, buf); synaptics_ts_init(ts->client); touch_enable_irq(ts->client->irq); mutex_unlock(&ts->pdata->thread_lock); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Gnd Test : RESULT: %s", (gnd > 0) ? "Pass\n" : "Fail\n"); } else { ret += snprintf(buf + ret, PAGE_SIZE - ret, "state=[suspend]. we cannot use I2C, now. Test Result: Fail\n"); } return ret; } /* * show_atcmd_fw_ver * * show only firmware version. * It will be used for AT-COMMAND */ static ssize_t show_atcmd_fw_ver(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; if (ts->fw_info.version[0] > 0x50) ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", ts->fw_info.version); else ret = snprintf(buf, PAGE_SIZE - ret, "V%d.%02d (0x%X/0x%X/0x%X/0x%X)\n", (ts->fw_info.version[3] & 0x80 ? 1 : 0), ts->fw_info.version[3] & 0x7F, ts->fw_info.version[0], ts->fw_info.version[1], ts->fw_info.version[2], ts->fw_info.version[3]); return ret; } static ssize_t store_tci(struct i2c_client *client, const char *buf, size_t count) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); u32 type = 0, value = 0; if (sscanf(buf, "%d %d", &type, &value) <= 0) return count; mutex_lock(&ts->pdata->thread_lock); tci_control(ts, type, (u8)value); mutex_unlock(&ts->pdata->thread_lock); return count; } static ssize_t show_tci(struct i2c_client *client, char *buf) { int ret = 0; u8 buffer[5] = {0}; struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); mutex_lock(&ts->pdata->thread_lock); touch_i2c_read(client, ts->f12_reg.ctrl[20], 3, buffer); ret += snprintf(buf + ret, PAGE_SIZE - ret, "report_mode [%s]\n", (buffer[2] & 0x3) == 0x2 ? "WAKEUP_ONLY" : "NORMAL"); touch_i2c_read(client, ts->f12_reg.ctrl[27], 1, buffer); ret += snprintf(buf + ret, PAGE_SIZE - ret, "wakeup_gesture [%d]\n", buffer[0]); synaptics_ts_page_data_read(client, LPWG_PAGE, ts->f51_reg.lpwg_tapcount_reg, 5, buffer); ret += snprintf(buf + ret, PAGE_SIZE - ret, "TCI [%s]\n", (buffer[0] & 0x1) == 1 ? "enabled" : "disabled"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Tap Count [%d]\n", (buffer[0] & 0xf8) >> 3); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Min InterTap [%d]\n", buffer[1]); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Max InterTap [%d]\n", buffer[2]); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Touch Slop [%d]\n", buffer[3]); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Tap Distance [%d]\n", buffer[4]); mutex_unlock(&ts->pdata->thread_lock); return ret; } static ssize_t store_reg_ctrl(struct i2c_client *client, const char *buf, size_t count) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); u8 buffer[50] = {0}; char command[6] = {0}; int page = 0; int reg = 0; int offset = 0; int value = 0; if (sscanf(buf, "%s %d %d %d %d ", command, &page, &reg, &offset, &value) <= 0) return count; mutex_lock(&ts->pdata->thread_lock); if (!strcmp(command, "write")) { synaptics_ts_page_data_read(client, page, reg, offset+1, buffer); buffer[offset] = (u8)value; synaptics_ts_page_data_write(client, page, reg, offset+1, buffer); } else if (!strcmp(command, "read")) { synaptics_ts_page_data_read(client, page, reg, offset+1, buffer); TOUCH_D(DEBUG_BASE_INFO, "page[%d] reg[%d] offset[%d] = 0x%x\n", page, reg, offset, buffer[offset]); } else { TOUCH_D(DEBUG_BASE_INFO, "Usage\n"); TOUCH_D(DEBUG_BASE_INFO, "Write page reg offset value\n"); TOUCH_D(DEBUG_BASE_INFO, "Read page reg offset\n"); } mutex_unlock(&ts->pdata->thread_lock); return count; } static ssize_t show_object_report(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; u8 object_report_enable_reg_addr = 0; u8 object_report_enable_reg = 0; object_report_enable_reg_addr = ts->f12_reg.ctrl[23]; mutex_lock(&ts->pdata->thread_lock); ret = touch_i2c_read(client, object_report_enable_reg_addr, sizeof(object_report_enable_reg), &object_report_enable_reg); mutex_unlock(&ts->pdata->thread_lock); if (ret < 0) { ret = snprintf(buf, PAGE_SIZE, "%s: Failed to read object_report_enable register\n", __func__); } else { u8 temp[8]; int i; for (i = 0; i < 8; i++) temp[i] = (object_report_enable_reg >> i) & 0x01; ret = snprintf(buf, PAGE_SIZE, "\n============ read object_report_enable register ============\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, " Addr Bit7 Bit6 Bit5 Bit4 Bit3 Bit2 Bit1 Bit0 HEX\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "------------------------------------------------------------\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, " 0x%02X %d %d %d %d %d %d %d %d 0x%02X\n", object_report_enable_reg_addr, temp[7], temp[6], temp[5], temp[4], temp[3], temp[2], temp[1], temp[0], object_report_enable_reg); ret += snprintf(buf + ret, PAGE_SIZE - ret, "------------------------------------------------------------\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, " Bit0 : [F]inger -> %s\n", temp[0] ? "Enable" : "Disable"); ret += snprintf(buf + ret, PAGE_SIZE - ret, " Bit1 : [S]tylus -> %s\n", temp[1] ? "Enable" : "Disable"); ret += snprintf(buf + ret, PAGE_SIZE - ret, " Bit2 : [P]alm -> %s\n", temp[2] ? "Enable" : "Disable"); ret += snprintf(buf + ret, PAGE_SIZE - ret, " Bit3 : [U]nclassified Object -> %s\n", temp[3] ? "Enable" : "Disable"); ret += snprintf(buf + ret, PAGE_SIZE - ret, " Bit4 : [H]overing Finger -> %s\n", temp[4] ? "Enable" : "Disable"); ret += snprintf(buf + ret, PAGE_SIZE - ret, " Bit5 : [G]loved Finger -> %s\n", temp[5] ? "Enable" : "Disable"); ret += snprintf(buf + ret, PAGE_SIZE - ret, " Bit6 : [N]arrow Object Swipe -> %s\n", temp[6] ? "Enable" : "Disable"); ret += snprintf(buf + ret, PAGE_SIZE - ret, " Bit7 : Hand[E]dge -> %s\n", temp[7] ? "Enable" : "Disable"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "============================================================\n\n"); } return ret; } static ssize_t store_object_report(struct i2c_client *client, const char *buf, size_t count) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret; char select[16]; u8 value = 2; int select_cnt; int i; u8 bit_select = 0; u8 object_report_enable_reg_addr = 0; u8 object_report_enable_reg_old = 0; u8 object_report_enable_reg_new = 0; u8 old[8]; u8 new[8]; if (sscanf(buf, "%s %hhu", select, &value) <= 0) return count; if ((strlen(select) > 8) || (value > 1)) { TOUCH_I( "<writing object_report guide>\n"); TOUCH_I( "echo [select] [value] > object_report\n"); TOUCH_I( "select: [F]inger, [S]tylus, [P]alm, [U]nclassified Object, [H]overing Finger, [G]loved Finger, [N]arrow Object Swipe, Hand[E]dge\n"); TOUCH_I( "select length: 1~8, value: 0~1\n"); TOUCH_I( "ex) echo F 1 > object_report (enable [F]inger)\n"); TOUCH_I( "ex) echo s 1 > object_report (enable [S]tylus)\n"); TOUCH_I( "ex) echo P 0 > object_report (disable [P]alm)\n"); TOUCH_I( "ex) echo u 0 > object_report (disable [U]nclassified Object)\n"); TOUCH_I( "ex) echo HgNe 1 > object_report (enable [H]overing Finger, [G]loved Finger, [N]arrow Object Swipe, Hand[E]dge)\n"); TOUCH_I( "ex) echo eNGh 1 > object_report (enable Hand[E]dge, [N]arrow Object Swipe, [G]loved Finger, [H]overing Finger)\n"); TOUCH_I( "ex) echo uPsF 0 > object_report (disable [U]nclassified Object, [P]alm, [S]tylus, [F]inger)\n"); TOUCH_I( "ex) echo HguP 0 > object_report (disable [H]overing Finger, [G]loved Finger, [U]nclassified Object, [P]alm)\n"); TOUCH_I( "ex) echo HFnuPSfe 1 > object_report (enable all object)\n"); TOUCH_I( "ex) echo enghupsf 0 > object_report (disbale all object)\n"); } else { select_cnt = strlen(select); for (i = 0; i < select_cnt; i++) { switch ((char)(*(select + i))) { case 'F': case 'f': bit_select |= (0x01 << 0); break; /* Bit0 : (F)inger*/ case 'S': case 's': bit_select |= (0x01 << 1); break; /* Bit1 : (S)tylus*/ case 'P': case 'p': bit_select |= (0x01 << 2); break; /* Bit2 : (P)alm*/ case 'U': case 'u': bit_select |= (0x01 << 3); break; /* Bit3 : (U)nclassified Object*/ case 'H': case 'h': bit_select |= (0x01 << 4); break; /* Bit4 : (H)overing Finger*/ case 'G': case 'g': bit_select |= (0x01 << 5); break; /* Bit5 : (G)loved Finger*/ case 'N': case 'n': bit_select |= (0x01 << 6); break; /* Bit6 : (N)arrow Object Swipe*/ case 'E': case 'e': bit_select |= (0x01 << 7); break; /* Bit7 : Hand(E)dge*/ default: break; } } object_report_enable_reg_addr = ts->f12_reg.ctrl[23]; mutex_lock(&ts->pdata->thread_lock); ret = touch_i2c_read(client, object_report_enable_reg_addr, sizeof(object_report_enable_reg_old), &object_report_enable_reg_old); if (ret < 0) { TOUCH_E( "Failed to read object_report_enable_reg old value\n"); mutex_unlock(&ts->pdata->thread_lock); return count; } object_report_enable_reg_new = object_report_enable_reg_old; if (value > 0) object_report_enable_reg_new |= bit_select; else object_report_enable_reg_new &= ~(bit_select); ret = touch_i2c_write_byte(client, object_report_enable_reg_addr, object_report_enable_reg_new); if (ret < 0) { TOUCH_E( "Failed to write object_report_enable_reg new value\n"); mutex_unlock(&ts->pdata->thread_lock); return count; } ret = touch_i2c_read(client, object_report_enable_reg_addr, sizeof(object_report_enable_reg_new), &object_report_enable_reg_new); mutex_unlock(&ts->pdata->thread_lock); if (ret < 0) { TOUCH_E( "Failed to read object_report_enable_reg new value\n"); return count; } for (i = 0; i < 8; i++) { old[i] = (object_report_enable_reg_old >> i) & 0x01; new[i] = (object_report_enable_reg_new >> i) & 0x01; } TOUCH_I( "======= write object_report_enable register (before) =======\n"); TOUCH_I( " Addr Bit7 Bit6 Bit5 Bit4 Bit3 Bit2 Bit1 Bit0 HEX\n"); TOUCH_I( "------------------------------------------------------------\n"); TOUCH_I( " 0x%02X %d %d %d %d %d %d %d %d 0x%02X\n", object_report_enable_reg_addr, old[7], old[6], old[5], old[4], old[3], old[2], old[1], old[0], object_report_enable_reg_old); TOUCH_I( "============================================================\n"); TOUCH_I( "======= write object_report_enable register (after) ========\n"); TOUCH_I( " Addr Bit7 Bit6 Bit5 Bit4 Bit3 Bit2 Bit1 Bit0 HEX\n"); TOUCH_I( "------------------------------------------------------------\n"); TOUCH_I( " 0x%02X %d %d %d %d %d %d %d %d 0x%02X\n", object_report_enable_reg_addr, new[7], new[6], new[5], new[4], new[3], new[2], new[1], new[0], object_report_enable_reg_new); TOUCH_I( "------------------------------------------------------------\n"); TOUCH_I( " Bit0 : [F]inger -> %s\n", new[0] ? "Enable" : "Disable"); TOUCH_I( " Bit1 : [S]tylus -> %s\n", new[1] ? "Enable" : "Disable"); TOUCH_I( " Bit2 : [P]alm -> %s\n", new[2] ? "Enable" : "Disable"); TOUCH_I( " Bit3 : [U]nclassified Object -> %s\n", new[3] ? "Enable" : "Disable"); TOUCH_I( " Bit4 : [H]overing Finger -> %s\n", new[4] ? "Enable" : "Disable"); TOUCH_I( " Bit5 : [G]loved Finger -> %s\n", new[5] ? "Enable" : "Disable"); TOUCH_I( " Bit6 : [N]arrow Object Swipe -> %s\n", new[6] ? "Enable" : "Disable"); TOUCH_I( " Bit7 : Hand[E]dge -> %s\n", new[7] ? "Enable" : "Disable"); TOUCH_I( "============================================================\n"); } return count; } static ssize_t store_boot_mode(struct i2c_client *client, const char *buf, size_t count) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); u8 buffer[1] = {0}; if (sscanf(buf, "%d", &boot_mode) <= 0) return count; mutex_lock(&ts->pdata->thread_lock); switch (boot_mode) { case CHARGERLOGO_MODE: TOUCH_I("%s: Charger mode!!! Disable irq\n", __func__); if (is_product(ts, "PLG446", 6)) { DO_SAFE(synaptics_ts_page_data_read(client, ANALOG_PAGE, 0x54, 1, buffer), error); buffer[0] = 0x00; DO_SAFE(synaptics_ts_page_data_write(client, ANALOG_PAGE, 0x54, 1, buffer), error); DO_SAFE(synaptics_ts_page_data_read(client, ANALOG_PAGE, 0x54, 1, buffer), error); if (!buffer[0]) { TOUCH_I( "%s: DDIC Control bit cleared.\n", __func__); } sleep_control(ts, 0, 1); } if (is_product(ts, "PLG349", 6)) sleep_control(ts, 0, 1); break; case NORMAL_BOOT_MODE: TOUCH_I("%s: Normal boot mode!!! Enable irq\n", __func__); if (is_product(ts, "PLG446", 6)) { DO_SAFE(synaptics_ts_page_data_read(client, ANALOG_PAGE, 0x54, 1, buffer), error); buffer[0] = 0x01; DO_SAFE(synaptics_ts_page_data_write(client, ANALOG_PAGE, 0x54, 1, buffer), error); DO_SAFE(synaptics_ts_page_data_read(client, ANALOG_PAGE, 0x54, 1, buffer), error); if (buffer[0]) { TOUCH_I( "%s: DDIC Control bit set 1 again.\n", __func__); } } sleep_control(ts, 1, 1); break; default: break; } mutex_unlock(&ts->pdata->thread_lock); return count; error: mutex_unlock(&ts->pdata->thread_lock); TOUCH_E("%s, failed DDIC Control\n", __func__); return count; } static ssize_t store_sensing_test(struct i2c_client *client, const char *buf, size_t count) { int value = 0; if (sscanf(buf, "%d", &value) <= 0) return count; is_sensing = value; TOUCH_I("is_sensing:%d\n", is_sensing); return count; } static ssize_t show_noise_delta_test(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; int noise_delta = 0; if (is_product(ts, "PLG468", 6)) { TOUCH_I("[%s] -- Not support this test\n", __func__); ret = snprintf(buf, PAGE_SIZE, "Not support this test\n"); return ret; } if (power_state == POWER_ON || power_state == POWER_WAKE) { mutex_lock(&ts->pdata->thread_lock); if (need_scan_pdt) { SCAN_PDT(); need_scan_pdt = false; } touch_disable_irq(ts->client->irq); noise_delta = F54Test('x', 0, buf); touch_enable_irq(ts->client->irq); synaptics_ts_init(ts->client); mutex_unlock(&ts->pdata->thread_lock); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Noise Delta Test : RESULT: %s", (noise_delta > 0) ? "Pass\n" : "Fail\n"); } else { ret += snprintf(buf + ret, PAGE_SIZE - ret, "state=[suspend]. we cannot use I2C, now. Test Result: Fail\n"); } return ret; } static ssize_t show_ts_noise(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; ret += snprintf(buf + ret, PAGE_SIZE - ret, "Test Count : %u\n", cnt); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Current Noise State : %d\n", cns_aver); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Interference Metric : %d\n", im_aver); if (!ts->pdata->panel_id) { ret += snprintf(buf + ret, PAGE_SIZE - ret, "CID IM : %d\n", cid_im_aver); } ret += snprintf(buf + ret, PAGE_SIZE - ret, "Freq Scan IM : %d\n", freq_scan_im_aver); return ret; } static ssize_t store_ts_noise(struct i2c_client *client, const char *buf, size_t count) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int value; if (sscanf(buf, "%d", &value) <= 0) return count; if ((ts->ts_state_flag.check_noise_menu == MENU_OUT) && (value == MENU_ENTER)) { ts->ts_state_flag.check_noise_menu = MENU_ENTER; } else if ((ts->ts_state_flag.check_noise_menu == MENU_ENTER) && (value == MENU_OUT)) { ts->ts_state_flag.check_noise_menu = MENU_OUT; } else { TOUCH_I("Already entered Check Noise menu .\n"); TOUCH_I("check_noise_menu:%d, value:%d\n", ts->ts_state_flag.check_noise_menu, value); return count; } TOUCH_I("Check Noise = %s\n", (ts->ts_state_flag.check_noise_menu == MENU_OUT) ? "MENU_OUT" : "MENU_ENTER"); TOUCH_I("TA state = %s\n", (touch_ta_status) ? "TA_CONNECTED" : "TA_DISCONNECTED"); return count; } static ssize_t show_ts_noise_log_enable(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d\n", ts->ts_state_flag.ts_noise_log_flag); TOUCH_I("ts noise log flag = %s\n", (ts->ts_state_flag.ts_noise_log_flag == TS_NOISE_LOG_DISABLE) ? "TS_NOISE_LOG_DISABLE" : "TS_NOISE_LOG_ENABLE"); return ret; } static ssize_t store_ts_noise_log_enable(struct i2c_client *client, const char *buf, size_t count) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int value; if (sscanf(buf, "%d", &value) <= 0) return count; if ((ts->ts_state_flag.ts_noise_log_flag == TS_NOISE_LOG_DISABLE) && (value == TS_NOISE_LOG_ENABLE)) { ts->ts_state_flag.ts_noise_log_flag = TS_NOISE_LOG_ENABLE; } else if ((ts->ts_state_flag.ts_noise_log_flag == TS_NOISE_LOG_ENABLE) && (value == TS_NOISE_LOG_DISABLE)) { ts->ts_state_flag.ts_noise_log_flag = TS_NOISE_LOG_DISABLE; } else { TOUCH_I("Already Enable TS Noise Log.\n"); TOUCH_I("ts_noise_log_flag:%d, value:%d\n", ts->ts_state_flag.ts_noise_log_flag, value); return count; } TOUCH_I("ts noise log flag = %s\n", (ts->ts_state_flag.ts_noise_log_flag == TS_NOISE_LOG_DISABLE) ? "TS_NOISE_LOG_DISABLE" : "TS_NOISE_LOG_ENABLE"); TOUCH_I("TA state = %s\n", (touch_ta_status) ? "TA_CONNECTED" : "TA_DISCONNECTED"); return count; } static ssize_t show_diff_node(struct i2c_client *client, char *buf) { int ret = 0; ret = snprintf(buf, PAGE_SIZE, "show_diff_node: %d\n", ref_chk_enable); return ret; } /* test code for operating ref chk code */ static ssize_t store_diff_node(struct i2c_client *client, const char *buf, size_t count) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; if (sscanf(buf, "%d", &ref_chk_enable) <= 0) return count; mutex_lock(&ts->pdata->thread_lock); if (synaptics_ts_ic_ctrl(ts->client, IC_CTRL_BASELINE_REBASE, FORCE_CAL, &ret) < 0) TOUCH_E("IC_CTRL_REBASE handling fail\n"); mutex_unlock(&ts->pdata->thread_lock); return count; } /* code for operating sp mirroring */ static ssize_t show_sp_link_touch_off(struct i2c_client *client, char *buf) { int ret = 0; ret = snprintf(buf, PAGE_SIZE, "sp link touch status %d\n", sp_link_touch); return ret; } /* code for operating sp mirroing */ static ssize_t store_sp_link_touch_off(struct i2c_client *client, const char *buf, size_t count) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); if (sscanf(buf, "%d", &sp_link_touch) <= 0) { TOUCH_E("Invalid value\n"); return count; } TOUCH_D(DEBUG_BASE_INFO, "sp link touch off : %d\n", sp_link_touch); if (sp_link_touch) { if (is_product(ts, "PLG446", 6) || is_product(ts, "PLG468", 6)) { touch_disable_irq(ts->client->irq); /* tci_control(ts, PARTIAL_LPWG_ON, 1); DO_SAFE(touch_i2c_write_byte(client, DEVICE_CONTROL_REG, DEVICE_CONTROL_SLEEP | DEVICE_CONTROL_CONFIGURED), error); */ } } else { if (is_product(ts, "PLG446", 6) || is_product(ts, "PLG468", 6)) { touch_enable_irq(ts->client->irq); /* tci_control(ts, PARTIAL_LPWG_ON, 0); DO_SAFE(touch_i2c_write_byte(client, DEVICE_CONTROL_REG, DEVICE_CONTROL_NORMAL_OP | DEVICE_CONTROL_CONFIGURED), error); */ } } return count; /* error: TOUCH_E("Fail to change status\n"); return count; */ } static ssize_t show_lpwg_test_info(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; ret = snprintf(buf + ret, PAGE_SIZE - ret, "%d\n", atomic_read(&ts->lpwg_ctrl.is_suspend)); return ret; } static ssize_t show_touch_wake_up_test(struct i2c_client *client, char *buf) { int ret = 0; ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d\n", touch_wake_count); ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d\n", touch_wake_test); return ret; } static ssize_t store_touch_wake_up_test(struct i2c_client *client, const char *buf, size_t count) { int cmd = 0; if (sscanf(buf, "%d", &cmd) <= 0) return -EINVAL; switch (cmd) { case 0: if (touch_wake_test) { TOUCH_I("Stop touch wake test !\n"); write_time_log(TOUCH_WAKE_COUNTER_LOG_PATH, "Stop touch wake test !\n", 1); touch_wake_test = false; touch_wake_count = 0; } break; case 1: if (!touch_wake_test) { TOUCH_I("Start touch wake test !\n"); write_time_log(TOUCH_WAKE_COUNTER_LOG_PATH, "Start touch wake test !\n", 1); touch_wake_test = true; } break; case 2: TOUCH_I("Reset touch wake count !\n"); touch_wake_count = 0; break; default: TOUCH_I("else case.\n"); } return count; } static ssize_t show_pen_support(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; int pen_support = 0; /* 1: Support, 0: Not support */ pen_support = GET_OBJECT_REPORT_INFO(ts->object_report, OBJECT_STYLUS_BIT); ret = snprintf(buf, PAGE_SIZE, "%d\n", pen_support); return ret; } static ssize_t show_palm_ctrl_mode(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; ret = snprintf(buf, PAGE_SIZE, "%u\n", ts->pdata->role->palm_ctrl_mode); return ret; } static ssize_t store_palm_ctrl_mode(struct i2c_client *client, const char *buf, size_t count) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int value; if (sscanf(buf, "%d", &value) <= 0) return count; if (value < PALM_REJECT_FW || value > PALM_REPORT) { TOUCH_I( "Invalid palm_ctrl_mode:%d (palm_ctrl_mode -> PALM_REJECT_FW)\n", value); value = PALM_REJECT_FW; } ts->pdata->role->palm_ctrl_mode = value; TOUCH_I("palm_ctrl_mode:%u\n", ts->pdata->role->palm_ctrl_mode); return count; } static ssize_t show_use_hover_finger(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; ret = snprintf(buf, PAGE_SIZE, "%u\n", ts->pdata->role->use_hover_finger); return ret; } static ssize_t store_use_hover_finger(struct i2c_client *client, const char *buf, size_t count) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int value; if (sscanf(buf, "%d", &value) <= 0) return count; if (value < 0 || value > 1) { TOUCH_I("Invalid use_hover_finger value:%d\n", value); return count; } ts->pdata->role->use_hover_finger = value; TOUCH_I("use_hover_finger:%u\n", ts->pdata->role->use_hover_finger); return count; } static ssize_t show_use_rmi_dev(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; ret = snprintf(buf, PAGE_SIZE, "%u\n", ts->pdata->role->use_rmi_dev); return ret; } static ssize_t store_use_rmi_dev(struct i2c_client *client, const char *buf, size_t count) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int value; if (sscanf(buf, "%d", &value) <= 0) return count; if (value < 0 || value > 1) { TOUCH_I("Invalid use_rmi_dev value:%d\n", value); return count; } ts->pdata->role->use_rmi_dev = value; TOUCH_I("use_rmi_dev:%u\n", ts->pdata->role->use_rmi_dev); return count; } static ssize_t show_status_normal_calibration(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; u8 crc_buffer = 0; u8 start_buffer = 0; u8 calibration_status = 0; u8 crc_status = 0; if (ts->pdata->panel_id != 1) { TOUCH_I("Panel id : %d, Not supproted f/w calibration\n", ts->pdata->panel_id); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Not supproted f/w calibration\n"); return ret; } mutex_lock(&ts->pdata->thread_lock); if (power_state == POWER_ON || power_state == POWER_WAKE) { ret = synaptics_ts_page_data_read(client, ANALOG_PAGE, CALIBRATION_STATUS_REG, 1, &crc_buffer); if (ret < 0) { TOUCH_E("Failed to read calibration_status_reg\n"); goto error; } ret = synaptics_ts_page_data_read(client, ANALOG_PAGE, CALIBRATION_FLAGS_REG, 1, &start_buffer); if (ret < 0) { TOUCH_E("Failed to read calibration start reg\n"); goto error; } TOUCH_I( "[%s] start_buffer = 0x%02x, crc_buffer = 0x%02x\n", __func__, start_buffer, crc_buffer); calibration_status = (start_buffer & 0x01); crc_status = (crc_buffer & 0x02) >> 1; if (calibration_status == 0) { ret += snprintf(buf + ret, PAGE_SIZE - ret, "calibration_status = %d\n", calibration_status); if (!crc_status) { TOUCH_I( "Checksum of calibration values is good, Calibration is not going on\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "CRC_status = %d\n", crc_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Checksum of calibration values is good.\n"); } else { TOUCH_E( "Checksum of calibration values is bad.\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "CRC_status = %d\n", crc_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Checksum of calibration values is bad.\n"); } } else if (calibration_status == 1) { TOUCH_I("Calibration is in progress\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "calibration_status = %d\n", calibration_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Calibration is in progress\n"); } else { TOUCH_E("Invalidated to calibration_status\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "calibration_status = %d\n", calibration_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Invalidated to calibration_status\n"); } } else { TOUCH_E( "state is suspend, Failed to read register because cannot use I2C\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "state is suspend, Failed to read register because cannot use I2C\n"); } mutex_unlock(&ts->pdata->thread_lock); return ret; error: mutex_unlock(&ts->pdata->thread_lock); return ret; } static ssize_t show_normal_calibration(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; int cal_exist = 0; u8 buffer = 0; u8 calibration_on = 0x01; if (ts->pdata->panel_id != 1) { TOUCH_I("Panel id : %d, Not supproted f/w calibration\n", ts->pdata->panel_id); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Not supproted f/w calibration\n"); return ret; } mutex_lock(&ts->pdata->thread_lock); cal_exist = check_cal_magic_key(); if (cal_exist < 0) { TOUCH_I("[%s] In Cal MAGIC Key Not Exist\n", __func__); mutex_unlock(&ts->pdata->thread_lock); return ret; } if (power_state == POWER_ON || power_state == POWER_WAKE) { ret = synaptics_ts_page_data_read(client, ANALOG_PAGE, CALIBRATION_FLAGS_REG, 1, &buffer); if (ret < 0) { TOUCH_E("Failed to read calibration_flag_reg\n"); goto error; } TOUCH_I("[%s] buffer = 0x%02x\n", __func__, buffer); if ((buffer & calibration_on)) { TOUCH_E("Now Running Calibration....\n"); goto error; } buffer = buffer | calibration_on; ret = synaptics_ts_page_data_write_byte(client, ANALOG_PAGE, CALIBRATION_FLAGS_REG, buffer); if (ret < 0) { TOUCH_E("Failed to write calibration_flag_reg value\n"); goto error; } TOUCH_I( "Start Normal Calibration\n"); } else { TOUCH_E( "state is suspend, Failed to Calibration because cannot use I2C\n"); } mutex_unlock(&ts->pdata->thread_lock); return ret; error: mutex_unlock(&ts->pdata->thread_lock); return ret; } static ssize_t show_status_lpwg_calibration(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; u8 crc_buffer = 0; u8 start_buffer = 0; u8 calibration_status = 0; u8 crc_status = 0; if (ts->pdata->panel_id != 1) { TOUCH_I( "Panel id : %d, Not supproted f/w calibration\n", ts->pdata->panel_id); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Not supproted f/w calibration\n"); return ret; } mutex_lock(&ts->pdata->thread_lock); ret = synaptics_ts_page_data_read(client, ANALOG_PAGE, CALIBRATION_STATUS_REG, 1, &crc_buffer); if (ret < 0) { TOUCH_E("Failed to read calibration_status_reg\n"); goto error; } ret = synaptics_ts_page_data_read(client, ANALOG_PAGE, CALIBRATION_FLAGS_REG, 1, &start_buffer); if (ret < 0) { TOUCH_E("Failed to read calibration_start_reg\n"); goto error; } TOUCH_I( "[%s] start_buffer = 0x%02x, crc_buffer = 0x%02x\n", __func__, start_buffer, crc_buffer); calibration_status = (start_buffer & 0x02) >> 1; crc_status = (crc_buffer & 0x01); if (calibration_status == 0) { ret += snprintf(buf + ret, PAGE_SIZE - ret, "calibration_status = %d\n", calibration_status); if (!crc_status) { TOUCH_I( "Checksum of calibration values is good, Calibration is not going on\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "CRC_status = %d\n", crc_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Checksum of calibration values is good\n"); } else { TOUCH_E( "Checksum of calibration values is bad, Retry calibration\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "CRC_status = %d\n", crc_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Checksum of calibration values is bad, Retry calibration\n"); } } else if (calibration_status == 1) { TOUCH_I("Calibration is in progress\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "calibration_status = %d\n", calibration_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Calibration is in progress\n"); } else { TOUCH_E("Invalidated to calibration_status\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "calibration_status = %d\n", calibration_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Invalidated to calibration_status\n"); } mutex_unlock(&ts->pdata->thread_lock); return ret; error: mutex_unlock(&ts->pdata->thread_lock); return ret; } static ssize_t show_lpwg_calibration(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; int cal_exist = 0; u8 buffer = 0; u8 calibration_on = 0x02; if (ts->pdata->panel_id != 1) { TOUCH_I("Panel id : %d, Not supproted f/w calibration\n", ts->pdata->panel_id); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Not supproted f/w calibration\n"); return ret; } mutex_lock(&ts->pdata->thread_lock); cal_exist = check_cal_magic_key(); if (cal_exist < 0) { TOUCH_I("[%s] In Cal MAGIC Key Not Exist\n", __func__); mutex_unlock(&ts->pdata->thread_lock); return ret; } if (power_state == POWER_SLEEP) { ret = synaptics_ts_page_data_read(client, ANALOG_PAGE, CALIBRATION_FLAGS_REG, 1, &buffer); if (ret < 0) { TOUCH_E("Failed to read calibration_flag_reg\n"); goto error; } TOUCH_I("[%s] buffer = 0x%02x\n", __func__, buffer); if ((buffer & calibration_on)) { TOUCH_E("Now Running Calibration....\n"); goto error; } buffer = buffer | calibration_on; ret = synaptics_ts_page_data_write_byte(client, ANALOG_PAGE, CALIBRATION_FLAGS_REG, buffer); if (ret < 0) { TOUCH_E("Failed to write calibration_flag_reg value\n"); goto error; } TOUCH_I("Start LPWG Calibration\n"); } else { TOUCH_E( "state is suspend, Failed to Calibration because cannot use I2C\n"); } mutex_unlock(&ts->pdata->thread_lock); return ret; error: mutex_unlock(&ts->pdata->thread_lock); return ret; } static ssize_t show_get_calibration(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; int ret_size = 0; int err = 0; u8 buffer = 0; u8 old_report_type = 0x0; u8 start_get_cal_data = 0x54; u8 fifo_init = 0x00; int retry_cnt = 300; u8 cal_data[MAX_CAL_DATA_SIZE] = {0,}; u8 nd_cal_data[MAX_ND_CAL_DATA_SIZE] = {0,}; u8 *save_buf = NULL; u8 freq = 0; int i = 0; int k = 0; int line = 0; u8 *detail; u8 *coarse; u8 *fine; u8 *nd_detail; u8 *nd_coarse; u8 *nd_fine; char *f_path = "/sdcard/touch_cal_data.txt"; int cal_exist = 0; cal_exist = check_cal_magic_key(); if (cal_exist < 0) { TOUCH_I("[%s] In Cal MAGIC Key Not Exist\n", __func__); return ret; } if (save_buf == NULL) { TOUCH_E("fail to allocate memory\n"); return ret; } if (ts->pdata->panel_id != 1) { TOUCH_I("Panel id : %d, Not supproted f/w calibration\n", ts->pdata->panel_id); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Not supproted f/w calibration\n"); return ret; } save_buf = kzalloc(sizeof(u8)*MAX_CAL_LOG_SIZE, GFP_KERNEL); detail = kzalloc(sizeof(u8)*MAX_DETAIL_SIZE, GFP_KERNEL); coarse = kzalloc(sizeof(u8)*MAX_COARSE_SIZE, GFP_KERNEL); fine = kzalloc(sizeof(u8)*MAX_FINE_SIZE, GFP_KERNEL); nd_detail = kzalloc(sizeof(u8)*MAX_ND_DETAIL_SIZE, GFP_KERNEL); nd_coarse = kzalloc(sizeof(u8)*MAX_ND_COARSE_SIZE, GFP_KERNEL); nd_fine = kzalloc(sizeof(u8)*MAX_ND_FINE_SIZE, GFP_KERNEL); mutex_lock(&ts->pdata->thread_lock); /*Step 0. Backup Old Report Type*/ err = synaptics_ts_page_data_read(client, ANALOG_PAGE, ts->f54.dsc.data_base, 1, &buffer); if (err < 0) { TOUCH_E("Failed to read F54 Data Base\n"); goto error; } old_report_type = buffer; /*Step 1. Set 84 to FIFO Index 1, FIFO Index 2 */ err = synaptics_ts_page_data_write_byte(client, ANALOG_PAGE, ts->f54.dsc.data_base, start_get_cal_data); if (err < 0) { TOUCH_E("Failed to write F54 Data Base\n"); goto error; } /*Step 2. Set 0 to FIFO Index 1, FIFO Index 2 */ err = synaptics_ts_page_data_write_byte(client, ANALOG_PAGE, F54_FIFO_INDEX_LSB, fifo_init); if (err < 0) { TOUCH_E("Failed to write F54_FIFO_INDEX_LSB\n"); goto error; } err = synaptics_ts_page_data_write_byte(client, ANALOG_PAGE, F54_FIFO_INDEX_MSB, fifo_init); if (err < 0) { TOUCH_E("Failed to write F54_FIFO_INDEX_MSB\n"); goto error; } /*Step 3. Send Get Report Command*/ write_time_log(f_path, NULL, 0); for (freq = 0; freq < 3; freq++) { err = synaptics_ts_page_data_read(client, ANALOG_PAGE, ts->f54.dsc.command_base, 1, &buffer); if (err < 0) { TOUCH_E("Failed to read F54 CMD Base\n"); goto error; } /*For collecting lpwg calibration Data*/ if (power_state == POWER_SLEEP) freq = 3; buffer |= (freq << 2); buffer |= 0x01; err = synaptics_ts_page_data_write_byte(client, ANALOG_PAGE, ts->f54.dsc.command_base, buffer); if (err < 0) { TOUCH_E("Failed to write F54 CMD Base\n"); goto error; } /*waiting clear get report bit*/ retry_cnt = 300; do { err = synaptics_ts_page_data_read(client, ANALOG_PAGE, ts->f54.dsc.command_base, 1, &buffer); buffer = (buffer & 0x01); usleep(10000); retry_cnt--; if (retry_cnt <= 0) { TOUCH_E("Fail to Read Get Report type.\n"); goto error; } } while (buffer); memset(cal_data, 0, sizeof(cal_data)); memset(cal_data, 0, sizeof(nd_cal_data)); /*Step 4. Get Cal Data*/ err = synaptics_ts_page_data_read(client, ANALOG_PAGE, F54_REPORT_DATA, MAX_CAL_DATA_SIZE, &cal_data[0]); if (err < 0) { TOUCH_E("Failed to read F54_REPORT_DATA\n"); goto error; } memset(detail, 0x0, sizeof(u8)*MAX_DETAIL_SIZE); memset(coarse, 0x0, sizeof(u8)*MAX_COARSE_SIZE); memset(fine, 0x0, sizeof(u8)*MAX_FINE_SIZE); memset(nd_detail, 0x0, sizeof(u8)*MAX_ND_DETAIL_SIZE); memset(nd_coarse, 0x0, sizeof(u8)*MAX_ND_COARSE_SIZE); memset(nd_fine, 0x0, sizeof(u8)*MAX_ND_FINE_SIZE); ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "\n ============ Calibration Data [Freq = %d] ============\n", freq); TOUCH_I("Start Get Cal Data, Freq = %d", freq); k = 0; for (i = 0; i < MAX_CAL_DATA_SIZE; i += 2) { detail[k] = cal_data[i]; coarse[k] = (cal_data[i+1] & 0xf0) >> 4; fine[k] = cal_data[i+1] & 0x0f; k++; } ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "==== detail ==\n"); line = 0; for (i = 0; i < (MAX_CAL_DATA_SIZE/2); i++) { ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "%d ", detail[i]); if (((i+1)%18 == 0) && (i != 0)) { TOUCH_I("\n"); ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, " %d\n" , ++line); } } ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "\n==== coarse ==\n"); line = 0; for (i = 0; i < (MAX_CAL_DATA_SIZE/2); i++) { ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "%d ", coarse[i]); if (((i+1)%18 == 0) && (i != 0)) { TOUCH_I("\n"); ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, " %d\n" , ++line); } } ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "\n==== fine ==\n"); line = 0; for (i = 0; i < (MAX_CAL_DATA_SIZE/2); i++) { ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "%d ", fine[i]); if (((i+1)%18 == 0) && (i != 0)) { TOUCH_I("\n"); ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, " %d\n" , ++line); } } ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "\n"); /*Get ND CAL*/ err = synaptics_ts_page_data_read(client, ANALOG_PAGE, F54_REPORT_DATA, MAX_ND_CAL_DATA_SIZE, &nd_cal_data[0]); if (err < 0) { TOUCH_E("Failed to read calibration_status_reg\n"); goto error; } ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "\n ============ ND Calibration Data [Freq = %d] ============\n", freq); TOUCH_I("Start Get ND Cal Data, Freq = %d", freq); k = 0; for (i = 0; i < MAX_ND_CAL_DATA_SIZE; i += 2) { nd_detail[k] = cal_data[i]; nd_coarse[k] = (cal_data[i+1] & 0xf0) >> 4; nd_fine[k] = cal_data[i+1] & 0x0f; k++; } ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "==== detail ==\n"); line = 0; for (i = 0; i < (MAX_ND_CAL_DATA_SIZE/2); i++) { ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "%d ", nd_detail[i]); if (((i+1)%2 == 0) && (i != 0)) { TOUCH_I("\n"); ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, " %d\n" , ++line); } } ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "\n==== coarse ==\n"); line = 0; for (i = 0; i < (MAX_ND_CAL_DATA_SIZE/2); i++) { ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "%d ", nd_coarse[i]); if (((i+1)%2 == 0) && (i != 0)) { TOUCH_I("\n"); ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, " %d\n" , ++line); } } ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "\n==== fine ==\n"); line = 0; for (i = 0; i < (MAX_ND_CAL_DATA_SIZE/2); i++) { ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "%d ", nd_fine[i]); if (((i+1)%2 == 0) && (i != 0)) { TOUCH_I("\n"); ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, " %d\n" , ++line); } } ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "\n"); } ret_size += snprintf(save_buf + ret_size, MAX_CAL_LOG_SIZE - ret_size, "\n"); write_log(f_path, save_buf); /*Step 5. Restore Report Mode*/ err = synaptics_ts_page_data_write_byte(client, ANALOG_PAGE, ts->f54.dsc.data_base, old_report_type); if (err < 0) { TOUCH_E("Failed to read calibration_status_reg\n"); goto error; } mutex_unlock(&ts->pdata->thread_lock); TOUCH_I("Cal Data Extract Complete."); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Cal Data Extract Complete.\n"); kfree(save_buf); kfree(detail); kfree(coarse); kfree(fine); kfree(nd_detail); kfree(nd_coarse); kfree(nd_fine); return ret; error: TOUCH_E("Fail to get Calibration Data"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Fail to get Calibration Data.\n"); mutex_unlock(&ts->pdata->thread_lock); kfree(save_buf); kfree(detail); kfree(coarse); kfree(fine); kfree(nd_detail); kfree(nd_coarse); kfree(nd_fine); return ret; } static ssize_t show_swipe_param(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); struct swipe_data *swp = &ts->swipe; int ret = 0; ret += snprintf(buf + ret, PAGE_SIZE - ret, "swipe_mode = 0x%02X\n", swp->swipe_mode); ret += snprintf(buf + ret, PAGE_SIZE - ret, "support_swipe = 0x%02X\n", swp->support_swipe); ret += snprintf(buf + ret, PAGE_SIZE - ret, "=================================================\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "down.min_distance = 0x%02X (%dmm)\n", swp->down.min_distance, swp->down.min_distance); ret += snprintf(buf + ret, PAGE_SIZE - ret, "down.ratio_thres = 0x%02X (%d%%)\n", swp->down.ratio_thres, swp->down.ratio_thres); ret += snprintf(buf + ret, PAGE_SIZE - ret, "down.ratio_chk_period = 0x%02X (%dframes)\n", swp->down.ratio_chk_period, swp->down.ratio_chk_period); ret += snprintf(buf + ret, PAGE_SIZE - ret, "down.ratio_chk_min_distance = 0x%02X (%dmm)\n", swp->down.ratio_chk_min_distance, swp->down.ratio_chk_min_distance); ret += snprintf(buf + ret, PAGE_SIZE - ret, "down.min_time_thres = 0x%02X (%d0ms)\n", swp->down.min_time_thres, swp->down.min_time_thres); ret += snprintf(buf + ret, PAGE_SIZE - ret, "down.max_time_thres = 0x%02X (%d0ms)\n", swp->down.max_time_thres, swp->down.max_time_thres); ret += snprintf(buf + ret, PAGE_SIZE - ret, "down.active_area = x0,y0(%d,%d) x1,y1(%d,%d)\n", swp->down.active_area_x0, swp->down.active_area_y0, swp->down.active_area_x1, swp->down.active_area_y1); ret += snprintf(buf + ret, PAGE_SIZE - ret, "=================================================\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "up.min_distance = 0x%02X (%dmm)\n", swp->up.min_distance, swp->up.min_distance); ret += snprintf(buf + ret, PAGE_SIZE - ret, "up.ratio_thres = 0x%02X (%d%%)\n", swp->up.ratio_thres, swp->up.ratio_thres); ret += snprintf(buf + ret, PAGE_SIZE - ret, "up.ratio_chk_period = 0x%02X (%dframes)\n", swp->up.ratio_chk_period, swp->up.ratio_chk_period); ret += snprintf(buf + ret, PAGE_SIZE - ret, "up.ratio_chk_min_distance = 0x%02X (%dmm)\n", swp->up.ratio_chk_min_distance, swp->up.ratio_chk_min_distance); ret += snprintf(buf + ret, PAGE_SIZE - ret, "up.min_time_thres = 0x%02X (%d0ms)\n", swp->up.min_time_thres, swp->up.min_time_thres); ret += snprintf(buf + ret, PAGE_SIZE - ret, "up.max_time_thres = 0x%02X (%d0ms)\n", swp->up.max_time_thres, swp->up.max_time_thres); ret += snprintf(buf + ret, PAGE_SIZE - ret, "up.active_area = x0,y0(%d,%d) x1,y1(%d,%d)\n", swp->up.active_area_x0, swp->up.active_area_y0, swp->up.active_area_x1, swp->up.active_area_y1); return ret; } static ssize_t store_swipe_param(struct i2c_client *client, const char *buf, size_t count) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); struct swipe_data *swp = &ts->swipe; struct swipe_ctrl_info *swpd = NULL; char direction; char select; u16 value; if (swp->support_swipe == NO_SUPPORT_SWIPE) { TOUCH_E("support_swipe:0x%02X\n", swp->support_swipe); return count; } if (sscanf(buf, "%c %c %hu", &direction, &select, &value) <= 0) return count; if (((direction != 'd') && (direction != 'u')) || (select < 'a') || (select > 'j')) { TOUCH_I("<writing swipe_param guide>\n"); TOUCH_I("echo [direction] [select] [value] > swipe_param\n"); TOUCH_I("[direction]: d(down), u(up)\n"); TOUCH_I("[select]:\n"); TOUCH_I("a(min_distance),\n"); TOUCH_I("b(ratio_thres),\n"); TOUCH_I("c(ratio_chk_period),\n"); TOUCH_I("d(ratio_chk_min_distance),\n"); TOUCH_I("e(min_time_thres),\n"); TOUCH_I("f(max_time_thres),\n"); TOUCH_I("g(active_area_x0),\n"); TOUCH_I("h(active_area_y0),\n"); TOUCH_I("i(active_area_x1),\n"); TOUCH_I("j(active_area_y1)\n"); TOUCH_I("[value]: (0x00~0xFF) or (0x00~0xFFFF)\n"); return count; } switch (direction) { case 'd': swpd = &swp->down; break; case 'u': swpd = &swp->up; break; default: TOUCH_I("unknown direction(%c)\n", direction); return count; } switch (select) { case 'a': swpd->min_distance = GET_LOW_U8_FROM_U16(value); break; case 'b': swpd->ratio_thres = GET_LOW_U8_FROM_U16(value); break; case 'c': swpd->ratio_chk_period = GET_LOW_U8_FROM_U16(value); break; case 'd': swpd->ratio_chk_min_distance = GET_LOW_U8_FROM_U16(value); break; case 'e': swpd->min_time_thres = value; break; case 'f': swpd->max_time_thres = value; break; case 'g': swpd->active_area_x0 = value; break; case 'h': swpd->active_area_y0 = value; break; case 'i': swpd->active_area_x1 = value; break; case 'j': swpd->active_area_y1 = value; break; default: break; } mutex_lock(&ts->pdata->thread_lock); swipe_enable(ts); mutex_unlock(&ts->pdata->thread_lock); return count; } static ssize_t store_swipe_mode(struct i2c_client *client, const char *buf, size_t count) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); struct swipe_data *swp = &ts->swipe; int down = 0; int up = 0; u8 mode = 0; if (swp->support_swipe == NO_SUPPORT_SWIPE) { TOUCH_E("support_swipe:0x%02X\n", swp->support_swipe); return count; } if (sscanf(buf, "%d %d", &down, &up) <= 0) return count; if (down) mode |= SWIPE_DOWN_BIT; else mode &= ~(SWIPE_DOWN_BIT); if (up) mode |= SWIPE_UP_BIT; else mode &= ~(SWIPE_UP_BIT); swp->swipe_mode = mode; return count; } static ssize_t show_hidden_normal_cal_state(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; u8 start_buffer = 0; u8 crc_buffer = 0; u8 calibration_status = 0; u8 crc_status = 0; if (ts->pdata->panel_id != 1) { TOUCH_I("[%s] Panel id : %d, Not supproted f/w calibration\n", __func__, ts->pdata->panel_id); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Not supproted f/w calibration\n"); return ret; } mutex_lock(&ts->pdata->thread_lock); if (power_state == POWER_ON || power_state == POWER_WAKE) { ret = synaptics_ts_page_data_read(client, ANALOG_PAGE, CALIBRATION_STATUS_REG, 1, &crc_buffer); if (ret < 0) { TOUCH_E( "[%s] Failed to read calibration_status_reg\n", __func__); goto error; } ret = synaptics_ts_page_data_read(client, ANALOG_PAGE, CALIBRATION_FLAGS_REG, 1, &start_buffer); if (ret < 0) { TOUCH_E( "[%s] Failed to read calibration_start_reg\n", __func__); goto error; } TOUCH_I( "[%s] Calibration start_buffer = 0x%02x, crc_buffer = 0x%02x\n", __func__, start_buffer, crc_buffer); calibration_status = (start_buffer & 0x01); crc_status = (crc_buffer & 0x02) >> 1; /*Calibration Result*/ /*Bad = 0, Good = 1, In Progress = 2, Error = 99*/ if (calibration_status == 0) { if (!crc_status) { TOUCH_I( "[%s] Normal CRC is Good. = %d\n", __func__, crc_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "status = 1\n"); } else { TOUCH_E( "[%s] Normal CRC Value is bad, crc = %d\n", __func__, crc_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "status = 0\n"); } } else if (calibration_status == 1) { TOUCH_I("Calibration is in progress = %d\n", calibration_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "status = 2\n"); } else { TOUCH_E("Invalidated to calibration_status = %d\n", calibration_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "status = 99\n"); } } else { TOUCH_E( "[%s] state is suspend, Failed to read register because cannot use I2C\n", __func__); ret += snprintf(buf + ret, PAGE_SIZE - ret, "state is suspend, Failed to read register because cannot use I2C\n"); } mutex_unlock(&ts->pdata->thread_lock); return ret; error: mutex_unlock(&ts->pdata->thread_lock); return ret; } static ssize_t show_hidden_lpwg_cal_state(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; u8 start_buffer = 0; u8 crc_buffer = 0; u8 calibration_status = 0; u8 crc_status = 0; if (ts->pdata->panel_id != 1) { TOUCH_I( "[%s] Panel id : %d, Not supproted f/w calibration\n", __func__, ts->pdata->panel_id); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Not supproted f/w calibration\n"); return ret; } mutex_lock(&ts->pdata->thread_lock); ret = synaptics_ts_page_data_read(client, ANALOG_PAGE, CALIBRATION_STATUS_REG, 1, &crc_buffer); if (ret < 0) { TOUCH_E( "[%s] Failed to read calibration_status_reg\n", __func__); goto error; } ret = synaptics_ts_page_data_read(client, ANALOG_PAGE, CALIBRATION_FLAGS_REG, 1, &start_buffer); if (ret < 0) { TOUCH_E( "[%s] Failed to read calibration_start_reg\n", __func__); goto error; } TOUCH_I( "[%s] start_buffer = 0x%02x, crc_buffer = 0x%02x\n", __func__, start_buffer, crc_buffer); calibration_status = (start_buffer & 0x02) >> 1; crc_status = (crc_buffer & 0x01); /*Calibration Result*/ /*Bad = 0, Good = 1, In Progress = 2, Error = 99*/ if (calibration_status == 0) { if (!crc_status) { TOUCH_I( "[%s] LPWG CRC Value is good = %d\n", __func__, crc_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "status = 1\n"); } else { TOUCH_E( "[%s] LPWG CRC Value is Bad = %d\n", __func__, crc_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "status = 0\n"); } } else if (calibration_status == 1) { TOUCH_I( "[%s] Calibration is in progress = %d\n", __func__, calibration_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "status = 2\n"); } else { TOUCH_E( "[%s] Invalidated to calibration_status = %d\n", __func__, calibration_status); ret += snprintf(buf + ret, PAGE_SIZE - ret, "status = 99\n"); } mutex_unlock(&ts->pdata->thread_lock); return ret; error: mutex_unlock(&ts->pdata->thread_lock); return ret; } static ssize_t show_lpwg_disable(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d\n", ts->lpwg_ctrl.hidden_lpwg_disable); TOUCH_I("hidden_lpwg_disable = %s\n", ts->lpwg_ctrl.hidden_lpwg_disable ? "lpwg disable" : "lpwg enable"); return ret; } static ssize_t store_lpwg_disable(struct i2c_client *client, const char *buf, size_t count) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int value; if (sscanf(buf, "%d", &value) <= 0) return count; if ((ts->lpwg_ctrl.hidden_lpwg_disable == 0) && (value == 1)) { ts->lpwg_ctrl.hidden_lpwg_disable = 1; } else if ((ts->lpwg_ctrl.hidden_lpwg_disable == 1) && (value == 0)) { ts->lpwg_ctrl.hidden_lpwg_disable = 0; } else { TOUCH_I("hidden_lpwg_disable: %d, value: %d\n", ts->lpwg_ctrl.hidden_lpwg_disable, value); return count; } TOUCH_I("hidden_lpwg_disable = %s\n", ts->lpwg_ctrl.hidden_lpwg_disable ? "set lpwg disable" : "set lpwg enable"); return count; } static ssize_t show_lpwg_sd(struct i2c_client *client, char *buf) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; int adc_test = 0; int rsp_test = 0; int lower_img = 0; int upper_img = 0; TOUCH_I("[%s] start.\n", __func__); if (power_state == POWER_SLEEP) { if (is_product(ts, "PLG446", 6)) { wake_lock(&ts->touch_rawdata); write_time_log(NULL, NULL, 0); msleep(30); write_firmware_version_log(ts); mutex_lock(&ts->pdata->thread_lock); adc_test = synaptics_ts_lpwg_adc_test(client); msleep(20); ret = snprintf(buf, PAGE_SIZE, "========RESULT=======\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "LPWG RawData : %s", (adc_test == 1) ? "Pass\n" : "Fail\n"); wake_unlock(&ts->touch_rawdata); mutex_unlock(&ts->pdata->thread_lock); } else { wake_lock(&ts->touch_rawdata); lower_img = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "RspLPWGLowerLimit", LowerImage); upper_img = get_limit(TxChannelCount, RxChannelCount, *ts->client, ts->pdata, "RspLPWGUpperLimit", UpperImage); if (lower_img < 0 || upper_img < 0) { TOUCH_I( "[%s] lower return = %d upper return = %d\n", __func__, lower_img, upper_img); TOUCH_I( "[%s][FAIL] Can not check the limit of raw cap\n", __func__); wake_unlock(&ts->touch_rawdata); return ret; } else { TOUCH_I( "Getting limit of LPWG raw cap is success\n"); } /*Exception Handle of flat and curved state*/ if(mfts_mode == 2 || mfts_mode == 3){ TOUCH_I("Can not execute lpwg_sd, mfts_mode = %d\n",mfts_mode); ret += snprintf(buf + ret, PAGE_SIZE - ret, "LPWG RawData : Not Support\n"); wake_unlock(&ts->touch_rawdata); return ret; } msleep(1000); SCAN_PDT(); touch_disable_irq(ts->client->irq); mutex_lock(&ts->pdata->thread_lock); write_time_log(NULL, NULL, 0); msleep(30); rsp_test = F54Test('q', 5, buf); msleep(20); ret = snprintf(buf, PAGE_SIZE, "========RESULT=======\n"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "LPWG RawData : %s", (rsp_test == 1) ? "Pass\n" : "Fail\n"); mutex_unlock(&ts->pdata->thread_lock); wake_unlock(&ts->touch_rawdata); touch_enable_irq(ts->client->irq); } } else { TOUCH_I("Can not execute lpwg_sd, power state = %d\n", power_state); ret += snprintf(buf + ret, PAGE_SIZE - ret, "Can not execute lpwg_sd, power state = %d\n", power_state); } return ret; } static LGE_TOUCH_ATTR(firmware, S_IRUGO | S_IWUSR, show_firmware, NULL); static LGE_TOUCH_ATTR(sd, S_IRUGO | S_IWUSR, show_sd, NULL); static LGE_TOUCH_ATTR(rawdata, S_IRUGO | S_IWUSR, show_rawdata, NULL); static LGE_TOUCH_ATTR(delta, S_IRUGO | S_IWUSR, show_delta, NULL); static LGE_TOUCH_ATTR(chstatus, S_IRUGO | S_IWUSR, show_chstatus, NULL); static LGE_TOUCH_ATTR(testmode_ver, S_IRUGO | S_IWUSR, show_atcmd_fw_ver, NULL); static LGE_TOUCH_ATTR(tci, S_IRUGO | S_IWUSR, show_tci, store_tci); static LGE_TOUCH_ATTR(reg_ctrl, S_IRUGO | S_IWUSR, NULL, store_reg_ctrl); static LGE_TOUCH_ATTR(object_report, S_IRUGO | S_IWUSR, show_object_report, store_object_report); static LGE_TOUCH_ATTR(version, S_IRUGO | S_IWUSR, show_synaptics_fw_version, NULL); static LGE_TOUCH_ATTR(bootmode, S_IRUGO | S_IWUSR, NULL, store_boot_mode); static LGE_TOUCH_ATTR(ts_noise, S_IRUGO | S_IWUSR, show_ts_noise, store_ts_noise); static LGE_TOUCH_ATTR(ts_noise_log_enable, S_IRUGO | S_IWUSR, show_ts_noise_log_enable, store_ts_noise_log_enable); static LGE_TOUCH_ATTR(diff_node, S_IRUGO | S_IWUSR, show_diff_node, store_diff_node); static LGE_TOUCH_ATTR(lpwg_test_info, S_IRUGO | S_IWUSR, show_lpwg_test_info, NULL); static LGE_TOUCH_ATTR(touch_wake_up_test, S_IRUGO | S_IWUSR, show_touch_wake_up_test, store_touch_wake_up_test); static LGE_TOUCH_ATTR(pen_support, S_IRUGO | S_IWUSR, show_pen_support, NULL); static LGE_TOUCH_ATTR(palm_ctrl_mode, S_IRUGO | S_IWUSR, show_palm_ctrl_mode, store_palm_ctrl_mode); static LGE_TOUCH_ATTR(use_hover_finger, S_IRUGO | S_IWUSR, show_use_hover_finger, store_use_hover_finger); static LGE_TOUCH_ATTR(use_rmi_dev, S_IRUGO | S_IWUSR, show_use_rmi_dev, store_use_rmi_dev); static LGE_TOUCH_ATTR(sensing_test, S_IRUGO | S_IWUSR, NULL, store_sensing_test); static LGE_TOUCH_ATTR(abs_test, S_IRUGO | S_IWUSR, show_abs_test, NULL); static LGE_TOUCH_ATTR(sensor_speed_test, S_IRUGO | S_IWUSR, show_sensor_speed_test, NULL); static LGE_TOUCH_ATTR(adc_range_test, S_IRUGO | S_IWUSR, show_adc_range_test, NULL); static LGE_TOUCH_ATTR(noise_delta_test, S_IRUGO | S_IWUSR, show_noise_delta_test, NULL); static LGE_TOUCH_ATTR(gnd_test, S_IRUGO | S_IWUSR, show_gnd_test, NULL); static LGE_TOUCH_ATTR(status_normal_calibration, S_IRUGO | S_IWUSR, show_status_normal_calibration, NULL); static LGE_TOUCH_ATTR(normal_calibration, S_IRUGO | S_IWUSR, show_normal_calibration, NULL); static LGE_TOUCH_ATTR(status_lpwg_calibration, S_IRUGO | S_IWUSR, show_status_lpwg_calibration, NULL); static LGE_TOUCH_ATTR(lpwg_calibration, S_IRUGO | S_IWUSR, show_lpwg_calibration, NULL); static LGE_TOUCH_ATTR(get_calibration, S_IRUGO | S_IWUSR, show_get_calibration, NULL); static LGE_TOUCH_ATTR(swipe_param, S_IRUGO | S_IWUSR, show_swipe_param, store_swipe_param); static LGE_TOUCH_ATTR(swipe_mode, S_IRUGO | S_IWUSR, NULL, store_swipe_mode); static LGE_TOUCH_ATTR(hidden_normal_cal_state, S_IRUGO | S_IWUSR, show_hidden_normal_cal_state, NULL); static LGE_TOUCH_ATTR(hidden_lpwg_cal_state, S_IRUGO | S_IWUSR, show_hidden_lpwg_cal_state, NULL); static LGE_TOUCH_ATTR(sp_link_touch_off, S_IRUGO | S_IWUSR, show_sp_link_touch_off, store_sp_link_touch_off); static LGE_TOUCH_ATTR(lpwg_disable, S_IRUGO | S_IWUSR, show_lpwg_disable, store_lpwg_disable); static LGE_TOUCH_ATTR(lpwg_sd, S_IRUGO | S_IWUSR, show_lpwg_sd, NULL); static struct attribute *synaptics_ts_attribute_list[] = { &lge_touch_attr_firmware.attr, &lge_touch_attr_sd.attr, &lge_touch_attr_rawdata.attr, &lge_touch_attr_delta.attr, &lge_touch_attr_chstatus.attr, &lge_touch_attr_testmode_ver.attr, &lge_touch_attr_tci.attr, &lge_touch_attr_reg_ctrl.attr, &lge_touch_attr_object_report.attr, &lge_touch_attr_version.attr, &lge_touch_attr_bootmode.attr, &lge_touch_attr_ts_noise.attr, &lge_touch_attr_ts_noise_log_enable.attr, &lge_touch_attr_diff_node.attr, &lge_touch_attr_lpwg_test_info.attr, &lge_touch_attr_touch_wake_up_test.attr, &lge_touch_attr_pen_support.attr, &lge_touch_attr_palm_ctrl_mode.attr, &lge_touch_attr_use_hover_finger.attr, &lge_touch_attr_use_rmi_dev.attr, &lge_touch_attr_sensing_test.attr, &lge_touch_attr_abs_test.attr, &lge_touch_attr_sensor_speed_test.attr, &lge_touch_attr_adc_range_test.attr, &lge_touch_attr_noise_delta_test.attr, &lge_touch_attr_gnd_test.attr, &lge_touch_attr_status_normal_calibration.attr, &lge_touch_attr_normal_calibration.attr, &lge_touch_attr_status_lpwg_calibration.attr, &lge_touch_attr_lpwg_calibration.attr, &lge_touch_attr_get_calibration.attr, &lge_touch_attr_swipe_param.attr, &lge_touch_attr_swipe_mode.attr, &lge_touch_attr_hidden_normal_cal_state.attr, &lge_touch_attr_hidden_lpwg_cal_state.attr, &lge_touch_attr_sp_link_touch_off.attr, &lge_touch_attr_lpwg_disable.attr, &lge_touch_attr_lpwg_sd.attr, NULL, }; static const struct attribute_group synaptics_ts_attribute_group = { .attrs = synaptics_ts_attribute_list, }; static int read_page_description_table(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); struct function_descriptor buffer; unsigned short u_address = 0; unsigned short page_num = 0; TOUCH_TRACE(); memset(&buffer, 0x0, sizeof(buffer)); memset(&ts->f01, 0x0, sizeof(struct ts_ic_function)); memset(&ts->f11, 0x0, sizeof(struct ts_ic_function)); memset(&ts->f12, 0x0, sizeof(struct ts_ic_function)); memset(&ts->f1a, 0x0, sizeof(struct ts_ic_function)); memset(&ts->f34, 0x0, sizeof(struct ts_ic_function)); memset(&ts->f51, 0x0, sizeof(struct ts_ic_function)); memset(&ts->f54, 0x0, sizeof(struct ts_ic_function)); for (page_num = 0; page_num < PAGE_MAX_NUM; page_num++) { DO_SAFE(synaptics_ts_set_page(client, page_num), error); for (u_address = DESCRIPTION_TABLE_START; u_address > 10; u_address -= sizeof(buffer)) { DO_SAFE(touch_i2c_read(client, u_address, sizeof(buffer), (unsigned char *)&buffer) < 0, error); if (buffer.id == 0) break; switch (buffer.id) { case 0x01: /* RMI device control */ ts->f01.dsc = buffer; ts->f01.page = page_num; break; case 0x11: /* 2D sensors (finger) */ ts->f11.dsc = buffer; ts->f11.page = page_num; break; case 0x12: /* 2D sensors (finger) */ ts->f12.dsc = buffer; ts->f12.page = page_num; get_f12_info(ts); break; case 0x1a: /* capacitive button sensors */ ts->f1a.dsc = buffer; ts->f1a.page = page_num; break; case 0x34: /* Flash memory management */ ts->f34.dsc = buffer; ts->f34.page = page_num; break; case 0x51: /* lpwg */ ts->f51.dsc = buffer; ts->f51.page = page_num; break; case 0x54: /* test report */ ts->f54.dsc = buffer; ts->f54.page = page_num; break; default: break; } } } TOUCH_D(DEBUG_BASE_INFO, "common[%dP:0x%02x] finger_f12[%dP:0x%02x] flash[%dP:0x%02x] analog[%dP:0x%02x] lpwg[%dP:0x%02x]\n", ts->f01.page, ts->f01.dsc.id, ts->f12.page, ts->f12.dsc.id, ts->f34.page, ts->f34.dsc.id, ts->f54.page, ts->f54.dsc.id, ts->f51.page, ts->f51.dsc.id); /* means fw version before v1.12 */ if (ts->f1a.dsc.id) int_mask_cust = 0x40; else int_mask_cust = 0x20; ERROR_IF(ts->f01.dsc.id == 0 || ts->f12.dsc.id == 0 || ts->f34.dsc.id == 0 || ts->f54.dsc.id == 0 || ts->f51.dsc.id == 0, "page_init_error", init_error); DO_SAFE(synaptics_ts_set_page(client, 0x00), error); get_finger_amplitude(ts); return 0; init_error: TOUCH_E("%s, %d : read page failed\n", __func__, __LINE__); return -EINVAL; error: TOUCH_E("%s, %d : read page failed\n", __func__, __LINE__); return -EIO; } static int get_swipe_info(struct synaptics_ts_data *ts) { struct swipe_data *swp = &ts->swipe; bool is_official_fw = 0; u8 fw_ver = 0; u8 lpwg_properties_reg = ts->f51.dsc.query_base + 4; u8 has_swipe_mask = 0x10; u8 buf = 0; memset(swp, 0, sizeof(struct swipe_data)); if (is_product(ts, "PLG446", 6) || is_product(ts, "PLG468", 6)) { is_official_fw = ((ts->fw_info.version[3] & 0x80) >> 7); fw_ver = (ts->fw_info.version[3] & 0x7F); TOUCH_I("%s: is_official_fw=%d, fw_ver=%d\n", __func__, is_official_fw, fw_ver); swp->support_swipe = NO_SUPPORT_SWIPE; if (is_official_fw) { if (is_product(ts, "PLG446", 6)) { if (fw_ver >= 9) swp->support_swipe |= SUPPORT_SWIPE_DOWN; if (fw_ver >= 14) swp->support_swipe |= SUPPORT_SWIPE_UP; } else if (is_product(ts, "PLG468", 6)) { if (fw_ver >= 11) swp->support_swipe |= SUPPORT_SWIPE_DOWN; if (fw_ver >= 14) swp->support_swipe |= SUPPORT_SWIPE_UP; } } else { if (is_product(ts, "PLG446", 6)) { if (fw_ver >= 41) swp->support_swipe |= SUPPORT_SWIPE_DOWN; if (fw_ver >= 75) swp->support_swipe |= SUPPORT_SWIPE_UP; } else if (is_product(ts, "PLG468", 6)) { if (fw_ver >= 40) swp->support_swipe |= SUPPORT_SWIPE_DOWN; if (fw_ver >= 90) swp->support_swipe |= SUPPORT_SWIPE_UP; } } if (swp->support_swipe) { synaptics_ts_page_data_read(ts->client, LPWG_PAGE, lpwg_properties_reg, 1, &buf); TOUCH_I( "%s: lpwg_properties_reg [addr:0x%02X,value:0x%02X)\n", __func__, lpwg_properties_reg, buf); if (!(buf & has_swipe_mask)) { TOUCH_I("%s: Need to check Has Swipe bit\n", __func__); swp->support_swipe = NO_SUPPORT_SWIPE; } } } else { TOUCH_E("%s, %d : Unknown firmware\n", __func__, __LINE__); return 0; } TOUCH_I("%s: support_swipe:0x%02X\n", __func__, swp->support_swipe); if (swp->support_swipe == NO_SUPPORT_SWIPE) return 0; swp->gesture_mask = 0x04; swp->enable_reg = ts->f51.dsc.control_base + 15; if (is_product(ts, "PLG446", 6)) { if (ts->lpwg_ctrl.has_lpwg_overtap_module) { swp->coordinate_start_reg = ts->f51.dsc.data_base + 74; swp->coordinate_end_reg = ts->f51.dsc.data_base + 78; swp->fail_reason_reg = ts->f51.dsc.data_base + 82; swp->time_reg = ts->f51.dsc.data_base + 83; } else { swp->coordinate_start_reg = ts->f51.dsc.data_base + 73; swp->coordinate_end_reg = ts->f51.dsc.data_base + 77; swp->fail_reason_reg = ts->f51.dsc.data_base + 81; swp->time_reg = ts->f51.dsc.data_base + 82; } } else if (is_product(ts, "PLG468", 6)) { if (ts->lpwg_ctrl.has_lpwg_overtap_module && ts->lpwg_ctrl.has_request_reset_reg) { swp->coordinate_start_reg = ts->f51.dsc.data_base + 58; swp->coordinate_end_reg = ts->f51.dsc.data_base + 62; swp->fail_reason_reg = ts->f51.dsc.data_base + 66; swp->time_reg = ts->f51.dsc.data_base + 67; } else { swp->coordinate_start_reg = ts->f51.dsc.data_base + 57; swp->coordinate_end_reg = ts->f51.dsc.data_base + 61; swp->fail_reason_reg = ts->f51.dsc.data_base + 65; swp->time_reg = ts->f51.dsc.data_base + 66; } } else { TOUCH_E("%s, %d : Unknown firmware\n", __func__, __LINE__); memset(swp, 0, sizeof(struct swipe_data)); return 0; } if (swp->support_swipe & SUPPORT_SWIPE_DOWN) { swp->swipe_mode |= SWIPE_DOWN_BIT; swp->down.enable_mask = SWIPE_DOWN_BIT; swp->down.min_distance = ts->pdata->swp_down_caps->min_distance; swp->down.ratio_thres = ts->pdata->swp_down_caps->ratio_thres; swp->down.ratio_chk_period = ts->pdata->swp_down_caps->ratio_chk_period; swp->down.ratio_chk_min_distance = ts->pdata->swp_down_caps->ratio_chk_min_distance; swp->down.min_time_thres = ts->pdata->swp_down_caps->min_time_thres; swp->down.max_time_thres = ts->pdata->swp_down_caps->max_time_thres; swp->down.active_area_x0 = ts->pdata->swp_down_caps->active_area_x0; swp->down.active_area_y0 = ts->pdata->swp_down_caps->active_area_y0; swp->down.active_area_x1 = ts->pdata->swp_down_caps->active_area_x1; swp->down.active_area_y1 = ts->pdata->swp_down_caps->active_area_y1; swp->down.min_distance_reg = ts->f51.dsc.control_base + ts->pdata->swp_down_caps->min_distance_reg_offset; swp->down.ratio_thres_reg = ts->f51.dsc.control_base + ts->pdata->swp_down_caps->ratio_thres_reg_offset; swp->down.ratio_chk_period_reg = ts->f51.dsc.control_base + ts->pdata->swp_down_caps->ratio_chk_period_reg_offset; swp->down.ratio_chk_min_distance_reg = ts->f51.dsc.control_base + ts->pdata->swp_down_caps->ratio_chk_min_distance_reg_offset; swp->down.min_time_thres_reg = ts->f51.dsc.control_base + ts->pdata->swp_down_caps->min_time_thres_reg_offset; swp->down.max_time_thres_reg = ts->f51.dsc.control_base + ts->pdata->swp_down_caps->max_time_thres_reg_offset; swp->down.active_area_x0_reg = ts->f51.dsc.control_base + ts->pdata->swp_down_caps->active_area_x0_reg_offset; swp->down.active_area_y0_reg = ts->f51.dsc.control_base + ts->pdata->swp_down_caps->active_area_y0_reg_offset; swp->down.active_area_x1_reg = ts->f51.dsc.control_base + ts->pdata->swp_down_caps->active_area_x1_reg_offset; swp->down.active_area_y1_reg = ts->f51.dsc.control_base + ts->pdata->swp_down_caps->active_area_y1_reg_offset; } if (swp->support_swipe & SUPPORT_SWIPE_UP) { /* swp->swipe_mode |= SWIPE_UP_BIT; */ swp->up.enable_mask = SWIPE_UP_BIT; swp->up.min_distance = ts->pdata->swp_up_caps->min_distance; swp->up.ratio_thres = ts->pdata->swp_up_caps->ratio_thres; swp->up.ratio_chk_period = ts->pdata->swp_up_caps->ratio_chk_period; swp->up.ratio_chk_min_distance = ts->pdata->swp_up_caps->ratio_chk_min_distance; swp->up.min_time_thres = ts->pdata->swp_up_caps->min_time_thres; swp->up.max_time_thres = ts->pdata->swp_up_caps->max_time_thres; swp->up.active_area_x0 = ts->pdata->swp_up_caps->active_area_x0; swp->up.active_area_y0 = ts->pdata->swp_up_caps->active_area_y0; swp->up.active_area_x1 = ts->pdata->swp_up_caps->active_area_x1; swp->up.active_area_y1 = ts->pdata->swp_up_caps->active_area_y1; swp->up.min_distance_reg = ts->f51.dsc.control_base + ts->pdata->swp_up_caps->min_distance_reg_offset; swp->up.ratio_thres_reg = ts->f51.dsc.control_base + ts->pdata->swp_up_caps->ratio_thres_reg_offset; swp->up.ratio_chk_period_reg = ts->f51.dsc.control_base + ts->pdata->swp_up_caps->ratio_chk_period_reg_offset; swp->up.ratio_chk_min_distance_reg = ts->f51.dsc.control_base + ts->pdata->swp_up_caps->ratio_chk_min_distance_reg_offset; swp->up.min_time_thres_reg = ts->f51.dsc.control_base + ts->pdata->swp_up_caps->min_time_thres_reg_offset; swp->up.max_time_thres_reg = ts->f51.dsc.control_base + ts->pdata->swp_up_caps->max_time_thres_reg_offset; swp->up.active_area_x0_reg = ts->f51.dsc.control_base + ts->pdata->swp_up_caps->active_area_x0_reg_offset; swp->up.active_area_y0_reg = ts->f51.dsc.control_base + ts->pdata->swp_up_caps->active_area_y0_reg_offset; swp->up.active_area_x1_reg = ts->f51.dsc.control_base + ts->pdata->swp_up_caps->active_area_x1_reg_offset; swp->up.active_area_y1_reg = ts->f51.dsc.control_base + ts->pdata->swp_up_caps->active_area_y1_reg_offset; } return 0; } static void get_lpwg_module_enable(struct synaptics_ts_data *ts) { bool is_official_fw = 0; u8 fw_ver = 0; is_official_fw = ((ts->fw_info.version[3] & 0x80) >> 7); fw_ver = (ts->fw_info.version[3] & 0x7F); if (is_product(ts, "PLG468", 6)) { if (is_official_fw && fw_ver >= 20) { ts->lpwg_ctrl.has_lpwg_overtap_module = 1; ts->lpwg_ctrl.has_request_reset_reg = 1; } else if (fw_ver >= 123) { ts->lpwg_ctrl.has_lpwg_overtap_module = 1; ts->lpwg_ctrl.has_request_reset_reg = 1; } else { ts->lpwg_ctrl.has_lpwg_overtap_module = 0; ts->lpwg_ctrl.has_request_reset_reg = 0; } } else if (is_product(ts, "PLG446", 6)) { if (is_official_fw && fw_ver >= 25) ts->lpwg_ctrl.has_lpwg_overtap_module = 1; else if (fw_ver >= 125) ts->lpwg_ctrl.has_lpwg_overtap_module = 1; else ts->lpwg_ctrl.has_lpwg_overtap_module = 0; } else { TOUCH_E("%s, %d : can't find matched product id \n", __func__, __LINE__); } } static int get_ic_info(struct synaptics_ts_data *ts) { u8 buf = 0; memset(&ts->fw_info, 0, sizeof(struct synaptics_ts_fw_info)); ts->pdata->role->fw_index = get_type_bootloader(ts->client); DO_SAFE(touch_i2c_read(ts->client, FLASH_CONFIG_ID_REG, sizeof(ts->fw_info.version) - 1, ts->fw_info.version), error); DO_SAFE(touch_i2c_read(ts->client, CUSTOMER_FAMILY_REG, 1, &(ts->fw_info.family)), error); DO_SAFE(touch_i2c_read(ts->client, FW_REVISION_REG, 1, &(ts->fw_info.revision)), error); TOUCH_D(DEBUG_BASE_INFO, "CUSTOMER_FAMILY_REG = %d\n", ts->fw_info.family); TOUCH_D(DEBUG_BASE_INFO, "FW_REVISION_REG = %d\n", ts->fw_info.revision); DO_SAFE(synaptics_ts_page_data_read(ts->client, LPWG_PAGE, LPWG_HAS_DEBUG_MODULE, 1, &buf), error); ts->lpwg_ctrl.has_debug_module = (buf & 0x0C) ? 1 : 0; TOUCH_D(DEBUG_BASE_INFO, "addr[0x%x] buf[0x%x] has_d_module[%d]", LPWG_HAS_DEBUG_MODULE, buf, ts->lpwg_ctrl.has_debug_module); get_lpwg_module_enable(ts); get_swipe_info(ts); return 0; error: TOUCH_E("%s, %d : get_ic_info failed\n", __func__, __LINE__); return -EIO; } static int check_firmware_status(struct synaptics_ts_data *ts) { u8 device_status = 0; u8 flash_status = 0; DO_SAFE(touch_i2c_read(ts->client, FLASH_STATUS_REG, sizeof(flash_status), &flash_status), error); DO_SAFE(touch_i2c_read(ts->client, DEVICE_STATUS_REG, sizeof(device_status), &device_status), error); ts->fw_info.need_rewrite_firmware = 0; if ((device_status & DEVICE_STATUS_FLASH_PROG) || (device_status & DEVICE_CRC_ERROR_MASK) || (flash_status & FLASH_STATUS_MASK)) { TOUCH_E("FLASH_STATUS[0x%x] DEVICE_STATUS[0x%x]\n", (u32)flash_status, (u32)device_status); ts->fw_info.need_rewrite_firmware = 1; } return 0; error: TOUCH_E("%s, %d : check_firmware_status failed\n", __func__, __LINE__); return -EIO; } enum error_type synaptics_ts_probe(struct i2c_client *client, struct touch_platform_data *lge_ts_data, struct state_info *state) { struct synaptics_ts_data *ts; TOUCH_TRACE(); ASSIGN(ts = devm_kzalloc(&client->dev, sizeof(struct synaptics_ts_data), GFP_KERNEL), error); set_touch_handle(client, ts); ts->client = client; ds4_i2c_client = client; ts->pdata = lge_ts_data; ts->state = state; /* Protocol 9 disable for sleep control */ ts->lpwg_ctrl.protocol9_sleep_flag = false; if (ts->pdata->pwr->vio_control) { TOUCH_I( "%s: ts->pdata->vio_pin[%d]\n", __func__, ts->pdata->vio_pin); if (ts->pdata->vio_pin > 0) { DO_SAFE(gpio_request(ts->pdata->vio_pin, "touch_vio"), error); gpio_direction_output(ts->pdata->vio_pin, 0); } } if (ts->pdata->pwr->use_regulator) { DO_IF(IS_ERR(ts->vreg_vdd = regulator_get(&client->dev, ts->pdata->pwr->vdd)), error); DO_IF(IS_ERR(ts->vreg_vio = regulator_get(&client->dev, ts->pdata->pwr->vio)), error); if (ts->pdata->pwr->vdd_voltage > 0) DO_SAFE(regulator_set_voltage(ts->vreg_vdd, ts->pdata->pwr->vdd_voltage, ts->pdata->pwr->vdd_voltage), error); if (ts->pdata->pwr->vio_voltage > 0) DO_SAFE(regulator_set_voltage(ts->vreg_vio, ts->pdata->pwr->vio_voltage, ts->pdata->pwr->vio_voltage), error); } ts->is_probed = 0; ts->is_init = 0; ts->lpwg_ctrl.screen = 1; ts->lpwg_ctrl.sensor = 1; atomic_set(&ts->lpwg_ctrl.is_suspend, 0); INIT_DELAYED_WORK(&ts->work_timer, lpwg_timer_func); INIT_DELAYED_WORK(&ts->work_palm, all_palm_released_func); INIT_DELAYED_WORK(&ts->work_sleep, sleepmode_func); wake_lock_init(&ts->timer_wake_lock, WAKE_LOCK_SUSPEND, "touch_timer"); wake_lock_init(&ts->touch_rawdata, WAKE_LOCK_SUSPEND, "touch_rawdata"); return NO_ERROR; error: TOUCH_E("%s, %d : synaptics_probe failed\n", __func__, __LINE__); return ERROR; } enum error_type synaptics_ts_remove(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); TOUCH_TRACE(); if (ts->pdata->role->use_hover_finger && prox_fhandler.inserted && prox_fhandler.initialized) { prox_fhandler.exp_fn->remove(ts); prox_fhandler.initialized = false; } if (ts->pdata->role->use_rmi_dev && rmidev_fhandler.inserted && rmidev_fhandler.initialized) { rmidev_fhandler.exp_fn->remove(ts); rmidev_fhandler.initialized = false; } if (ts->pdata->pwr->use_regulator) { regulator_put(ts->vreg_vio); regulator_put(ts->vreg_vdd); } wake_lock_destroy(&ts->timer_wake_lock); wake_lock_destroy(&ts->touch_rawdata); return NO_ERROR; } static int lpwg_update_all(struct synaptics_ts_data *ts, bool irqctrl) { int sleep_status = 0; int lpwg_status = 0; bool req_lpwg_param = false; TOUCH_TRACE(); if (ts->lpwg_ctrl.screen) { if (atomic_read(&ts->lpwg_ctrl.is_suspend) == 1) { if (power_state == POWER_OFF || power_state == POWER_SLEEP) ts->is_init = 0; if (ts->lpwg_ctrl.has_debug_module) { DO_SAFE(print_tci_debug_result(ts, 0), error); DO_SAFE(print_tci_debug_result(ts, 1), error); } } atomic_set(&ts->lpwg_ctrl.is_suspend, 0); if (ts->pdata->panel_id) ghost_do_not_reset = false; } else { if (atomic_read(&ts->lpwg_ctrl.is_suspend) == 0) { atomic_set(&ts->lpwg_ctrl.is_suspend, 1); if (is_product(ts, "PLG349", 6)) set_doze_param(ts, 3); } if (ts->pdata->swipe_pwr_ctr != SKIP_PWR_CON) { ts->pdata->swipe_pwr_ctr = WAIT_SWIPE_WAKEUP; } TOUCH_I("%s : swipe_pwr_ctr = %d\n", __func__, ts->pdata->swipe_pwr_ctr); if (ts->pdata->panel_id) ghost_do_not_reset = true; } if (ts->lpwg_ctrl.screen) { /* ON(1) */ sleep_status = 1; lpwg_status = 0; } else if (!ts->lpwg_ctrl.screen /* OFF(0), CLOSED(0) */ && ts->lpwg_ctrl.qcover) { sleep_status = 1; lpwg_status = 1; } else if (!ts->lpwg_ctrl.screen /* OFF(0), OPEN(1), FAR(1) */ && !ts->lpwg_ctrl.qcover && ts->lpwg_ctrl.sensor) { sleep_status = 1; lpwg_status = ts->lpwg_ctrl.lpwg_mode; } else if (!ts->lpwg_ctrl.screen /* OFF(0), OPEN(1), NEAR(0) */ && !ts->lpwg_ctrl.qcover && !ts->lpwg_ctrl.sensor) { if (ts->pdata->role->crack->use_crack_mode) { if (!after_crack_check) { TOUCH_I( "%s : Crack check not done... use nonsleep mode to check Crack!!\n", __func__); sleep_status = 1; lpwg_status = ts->lpwg_ctrl.lpwg_mode; } else { sleep_status = 0; req_lpwg_param = true; } } else { sleep_status = 0; req_lpwg_param = true; } } if (is_product(ts, "PLG349", 6)) { DO_SAFE(sleep_control(ts, sleep_status, 0), error); } else { TOUCH_D(DEBUG_BASE_INFO, "Sensor Status: %d\n", ts->lpwg_ctrl.sensor); TOUCH_D(DEBUG_BASE_INFO, "lpwg_is_enabled: %d\n", ts->lpwg_ctrl.lpwg_is_enabled); ts->pdata->swipe_stat[0] = ts->lpwg_ctrl.sensor; if (!ts->lpwg_ctrl.lpwg_mode && !ts->pdata->swipe_stat[1] && !mfts_mode) { touch_sleep_status(ts->client, 1); TOUCH_D(DEBUG_BASE_INFO, "[%s] LPWG Disable !\n", __func__); } else if (ts->lpwg_ctrl.qcover) { touch_sleep_status(ts->client, 0); } else if (ts->pdata->swipe_stat[1] == SWIPE_DONE) { touch_sleep_status(ts->client, !ts->lpwg_ctrl.sensor); } else if (ts->lpwg_ctrl.lpwg_is_enabled) { touch_swipe_status(ts->client, ts->pdata->swipe_stat[1]); } } if (req_lpwg_param == false) DO_SAFE(lpwg_control(ts, lpwg_status), error); ts->lpwg_ctrl.prev_screen = ts->lpwg_ctrl.screen; return NO_ERROR; error: return ERROR; } /* temporary code for INCELL JDI (relaxation) */ static int set_rebase_param(struct synaptics_ts_data *ts, int value) { u8 buf_array[9] = {0}; DO_SAFE(synaptics_ts_set_page(ts->client, ANALOG_PAGE), error); touch_i2c_read(ts->client, 0x45, 5, buf_array); if (value) buf_array[3] = 0x19; /* hold fast transition */ else buf_array[3] = 0x32; /* hold fast transition */ touch_i2c_write(ts->client, 0x45, 5, buf_array); touch_i2c_read(ts->client, 0x4C, 9, buf_array); if (value) { buf_array[6] = 0x1E; /* Difference Threshold */ buf_array[8] = 0x46; /* Negative Energy Threshold */ } else { buf_array[6] = 0x32; /* Difference Threshold */ buf_array[8] = 0x96; /* Negative Energy Threshold */ } touch_i2c_write(ts->client, 0x4C, 9, buf_array); if (value) TOUCH_D(DEBUG_BASE_INFO, "%s : Set for Normal\n", __func__); else TOUCH_D(DEBUG_BASE_INFO, "%s : Set for LPWG\n", __func__); DO_SAFE(synaptics_ts_set_page(ts->client, DEFAULT_PAGE), error); return 0; error: TOUCH_E("%s : failed to set rebase param\n", __func__); return -EPERM; } enum error_type synaptics_ts_init(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); u8 buf = 0; u8 buf_array[2] = {0}; int exp_fn_retval; u8 motion_suppression_reg_addr; int rc = 0; u8 lpwg_mode = ts->lpwg_ctrl.lpwg_mode; int is_suspend = atomic_read(&ts->lpwg_ctrl.is_suspend); TOUCH_TRACE(); if (ts->is_probed == 0) { rc = read_page_description_table(ts->client); DO_SAFE(check_firmware_status(ts), error); if (rc == -EIO) return ERROR; get_ic_info(ts); if (rc == -EINVAL) { TOUCH_I("%s : need to rewrite firmware !!", __func__); ts->fw_info.need_rewrite_firmware = 1; } ts->is_probed = 1; } if (ts->pdata->role->use_hover_finger && prox_fhandler.inserted) { if (!prox_fhandler.initialized) { exp_fn_retval = prox_fhandler.exp_fn->init(ts); if (exp_fn_retval < 0) { TOUCH_I( "[Touch Proximity] %s: Failed to init proximity settings\n", __func__); } else prox_fhandler.initialized = true; } else { prox_fhandler.exp_fn->reinit(ts); } } if (ts->pdata->role->use_rmi_dev && rmidev_fhandler.inserted) { if (!rmidev_fhandler.initialized) { exp_fn_retval = rmidev_fhandler.exp_fn->init(ts); if (exp_fn_retval < 0) { TOUCH_I( "[Touch RMI_Dev] %s: Failed to init rmi_dev settings\n", __func__); } else { rmidev_fhandler.initialized = true; } } } if (is_product(ts, "PLG468", 6)) { TOUCH_I("[%s] DEVICE_CONTROL_NORMAL_OP\n", __func__); DO_SAFE(touch_i2c_write_byte(client, DEVICE_CONTROL_REG, DEVICE_CONTROL_NORMAL_OP | DEVICE_CONTROL_CONFIGURED), error); DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, LPWG_PARTIAL_REG + 71, 1, &buf), error); buf_array[0] = touch_ta_status ? 0x02 : 0x00; buf_array[1] = incoming_call_state ? 0x00 : 0x04; TOUCH_I("%s: prev:0x%02X, next:0x%02X (TA: %d / Call: %d)\n", __func__, buf, (buf & 0xF9) | (buf_array[0] | buf_array[1]), touch_ta_status, incoming_call_state); buf = (buf & 0xF9) | (buf_array[0] | buf_array[1]); if (incoming_call_state) { DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, LPWG_PARTIAL_REG + 71, 1, &buf), error); set_param_incoming_call(client, incoming_call_state); } else { set_param_incoming_call(client, incoming_call_state); DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, LPWG_PARTIAL_REG + 71, 1, &buf), error); } if (ts->pdata->swipe_pwr_ctr == WAIT_TOUCH_PRESS) { DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, ts->f51_reg.lpwg_partial_reg, 1, &buf), error); TOUCH_I("SAC disable for GV prev. buf=0x%x \n", buf); buf = buf&0xFB; DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, ts->f51_reg.lpwg_partial_reg, 1, &buf), error); TOUCH_I("SAC Enable for GV Done. next buf=0x%x \n", buf); } } else if (is_product(ts, "PLG446", 6)) { if (touch_ta_status == 2 || touch_ta_status == 3) { TOUCH_I("[%s] DEVICE_CONTROL_NOSLEEP\n", __func__); DO_SAFE(touch_i2c_read(client, DEVICE_CONTROL_REG, 1, &buf), error); DO_SAFE(touch_i2c_write_byte(client, DEVICE_CONTROL_REG, DEVICE_CONTROL_NOSLEEP | (buf & 0xF8)), error); buf = 0x01; DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, LPWG_PARTIAL_REG + 4, 1, &buf), error); } else { TOUCH_I("[%s] DEVICE_CONTROL_NORMAL_OP\n", __func__); DO_SAFE(touch_i2c_read(client, DEVICE_CONTROL_REG, 1, &buf), error); DO_SAFE(touch_i2c_write_byte(client, DEVICE_CONTROL_REG, DEVICE_CONTROL_NORMAL_OP | (buf & 0xF8)), error); buf = 0x00; DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, LPWG_PARTIAL_REG + 4, 1, &buf), error); } set_rebase_param(ts, 1); } DO_SAFE(touch_i2c_read(client, INTERRUPT_ENABLE_REG, 1, &buf), error); if (is_product(ts, "PLG349", 6) || is_product(ts, "s3320", 5) || is_product(ts, "PLG446", 6) || is_product(ts, "PLG468", 6)) { DO_SAFE(touch_i2c_write_byte(client, INTERRUPT_ENABLE_REG, buf | INTERRUPT_MASK_ABS0 | int_mask_cust), error); } else { DO_SAFE(touch_i2c_write_byte(client, INTERRUPT_ENABLE_REG, buf | INTERRUPT_MASK_ABS0), error); } if (ts->pdata->role->report_mode == REDUCED_MODE && !ts->pdata->role->ghost->long_press_chk) { buf_array[0] = buf_array[1] = ts->pdata->role->delta_pos_threshold; } else { buf_array[0] = buf_array[1] = 0; ts->pdata->role->ghost->force_continuous_mode = true; } motion_suppression_reg_addr = ts->f12_reg.ctrl[20]; DO_SAFE(touch_i2c_write(client, motion_suppression_reg_addr, 2, buf_array), error); if (ts->pdata->role->palm_ctrl_mode > PALM_REPORT) { TOUCH_I( "Invalid palm_ctrl_mode:%u (palm_ctrl_mode -> PALM_REJECT_FW)\n", ts->pdata->role->palm_ctrl_mode); ts->pdata->role->palm_ctrl_mode = PALM_REJECT_FW; } TOUCH_I("palm_ctrl_mode:%u\n", ts->pdata->role->palm_ctrl_mode); DO_SAFE(touch_i2c_read(client, ts->f12_reg.ctrl[22], 1, &buf), error); buf_array[0] = buf & 0x03; if ((ts->pdata->role->palm_ctrl_mode == PALM_REJECT_DRIVER) || (ts->pdata->role->palm_ctrl_mode == PALM_REPORT)) { if (buf_array[0] != 0x00) { /* PalmFilterMode bits[1:0] (00:Disable palm filter */ buf &= ~(0x03); DO_SAFE(touch_i2c_write_byte(client, ts->f12_reg.ctrl[22], buf), error); } memset(&ts->palm, 0, sizeof(struct palm_data)); } else { if (buf_array[0] != 0x01) { /* PalmFilterMode bits[1:0] (01:Enable palm filter) */ buf &= ~(0x02); buf |= 0x01; DO_SAFE(touch_i2c_write_byte(client, ts->f12_reg.ctrl[22], buf), error); } } if (ts->pdata->role->use_lpwg_all) DO_SAFE(lpwg_update_all(ts, 0), error); else DO_SAFE(lpwg_control(ts, is_suspend ? lpwg_mode : 0), error); /* To get register addr properly for each Environment*/ matchUp_f51_regMap(ts); matchUp_f54_regMap(ts); /* It always should be done last. */ DO_SAFE(touch_i2c_read(client, INTERRUPT_STATUS_REG, 1, &buf), error); ts->is_init = 1; lpwg_by_lcd_notifier = false; return NO_ERROR; error: TOUCH_E("%s, %d : synaptics init failed\n", __func__, __LINE__); return ERROR; } static int synaptics_ts_im_test(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); unsigned int retry_cnt = 0, im_fail_max = 150; u8 buf1 = 0, buf2 = 0, curr[2] = {0}; u16 im = 0, im_test_max = 0, result = 0; int f54len = 0; char f54buf[1000] = {0}; int im_result = 0; f54len = snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "RSP IM Test Result\n"); DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, LPWG_PARTIAL_REG + 71, 1, curr), error); curr[0] = (curr[0] & 0xff) | 0x02; DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, LPWG_PARTIAL_REG + 71, 1, curr), error); msleep(20); for (retry_cnt = 0; retry_cnt < 10; retry_cnt++) { DO_SAFE(synaptics_ts_set_page(client, ANALOG_PAGE), error); DO_SAFE(touch_i2c_read(client, ts->f54_reg.interference__metric_LSB, 1, &buf1), error); DO_SAFE(touch_i2c_read(client, ts->f54_reg.interference__metric_MSB, 1, &buf2), error); im = (buf2 << 8) | buf1; f54len += snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "%d : Current IM value = %d\n", retry_cnt, im); if (im > im_test_max) im_test_max = im; TOUCH_I("%s : im_test_max : %u retry_cnt : %u\n", __func__, im_test_max, retry_cnt); mdelay(5); } result = im_test_max; TOUCH_I("%s : result : %u\n", __func__, result); curr[0] = (curr[0] & 0xff) & 0xfd; DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, LPWG_PARTIAL_REG + 71, 1, curr), error); f54len += snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "\nMAX IM value=%d\n", result); if (result < im_fail_max) { f54len += snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "RSP IM TEST passed\n\n"); im_result = 1; } else { f54len += snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "RSP IM TEST failed\n\n"); im_result = 0; } write_log(NULL, f54buf); msleep(30); return im_result; error: TOUCH_E("%s, %d : IM TEST failed\n", __func__, __LINE__); f54len += snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "%s, %d : IM TEST failed\n", __func__, __LINE__); write_log(NULL, f54buf); msleep(30); return -EPERM; } static int synaptics_ts_adc_test(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); unsigned int i = 0, temp_cnt = 0; u8 buf[42] = {0}; u16 result = 0, adc_result = 0, adc_fail_max = 3800, adc_fail_min = 400; u16 adc[20] = {0}; int f54len = 0; char f54buf[1000] = {0}; f54len = snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "ADC Test Result\n"); write_log(NULL, f54buf); f54len = 0; TOUCH_D(DEBUG_BASE_INFO, "JDI ADC Test start\n"); DO_SAFE(synaptics_ts_set_page(client, ANALOG_PAGE), error); DO_SAFE(touch_i2c_read(client, ts->f54_reg.incell_statistic, 50, buf), error); DO_SAFE(synaptics_ts_set_page(client, DEFAULT_PAGE), error); for(i = 0 ; i < 21 ; i++){ if(i < 4) continue; temp_cnt = i * 2; adc[i] = (buf[temp_cnt + 1] << 8) | buf[temp_cnt]; f54len += snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "%d : Adc mesured value = %d\n", i, adc[i]); TOUCH_D(DEBUG_BASE_INFO, "Adc value adc[%d] = %d\n", i+1, adc[i]); if (adc[i] > adc_fail_max || adc[i] < adc_fail_min) adc_result++; write_log(NULL, f54buf); f54len = 0; } if (adc_result) { f54len += snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "ADC TEST Failed\n"); TOUCH_D(DEBUG_BASE_INFO, "JDI ADC Test has failed!!\n"); result = 0; } else { f54len += snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "ADC TEST Passed\n\n"); TOUCH_D(DEBUG_BASE_INFO, "JDI ADC Test has passed\n"); result = 1; } write_log(NULL, f54buf); msleep(30); TOUCH_D(DEBUG_BASE_INFO, "JDI ADC Test end\n"); return result; error: TOUCH_E("%s, %d : ADC TEST failed\n", __func__, __LINE__); f54len += snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "%s, %d : ADC TEST failed\n", __func__, __LINE__); write_log(NULL, f54buf); msleep(30); return -EPERM; } static int synaptics_ts_lpwg_adc_test(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); unsigned int i = 0; u8 buf[5] = {0}; u16 result = 0, adc_result = 0; unsigned int adc_fail_max = 54299, adc_fail_min = 20998; u32 adc[20] = {0}; int f54len = 0; char f54buf[1000] = {0}; f54len = snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "LPWG ADC Test Result\n"); write_log(NULL, f54buf); f54len = 0; TOUCH_D(DEBUG_BASE_INFO, "LPWG ADC Test start\n"); DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, ts->f51_reg.lpwg_adc_offset_reg, 1, buf), error); msleep(20); for (i = 0 ; i < 17 ; i++) { DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, ts->f51_reg.lpwg_adc_offset_reg, 1, &buf[0]), error); TOUCH_D(DEBUG_BASE_INFO, "LPWG ADC Test offset_read : %d\n", buf[0]); if (buf[0] != i) { TOUCH_D(DEBUG_BASE_INFO, "LPWG ADC Test offset update error\n"); goto error; } DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, ts->f51_reg.lpwg_adc_fF_reg1, 1, &buf[1]), error); DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, ts->f51_reg.lpwg_adc_fF_reg2, 1, &buf[2]), error); DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, ts->f51_reg.lpwg_adc_fF_reg3, 1, &buf[3]), error); DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, ts->f51_reg.lpwg_adc_fF_reg4, 1, &buf[4]), error); adc[i] = (buf[4] << 24) | (buf[3] << 16) | (buf[2] << 8) | buf[1]; TOUCH_D(DEBUG_BASE_INFO, "LPWG ADC Test value : %d\t" "(buf[4] 0x%02x, buf[3] 0x%02x\t" "buf[2] 0x%02x, buf[1] 0x%02x)\n", adc[i], buf[4], buf[3], buf[2], buf[1]); f54len += snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "%d : LPWG Adc mesured value = %d\n", i, adc[i]); TOUCH_D(DEBUG_BASE_INFO, "LPWG Adc value adc[%d] = %d\n", i + 1, adc[i]); if (adc[i] > adc_fail_max || adc[i] < adc_fail_min) adc_result++; write_log(NULL, f54buf); f54len = 0; } if (adc_result) { f54len += snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "LPWG ADC TEST Failed\n"); TOUCH_D(DEBUG_BASE_INFO, "LPWG ADC Test has failed!!\n"); result = 0; } else { f54len += snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "LPWG ADC TEST Passed\n\n"); TOUCH_D(DEBUG_BASE_INFO, "LPWG ADC Test has passed\n"); result = 1; } write_log(NULL, f54buf); msleep(30); TOUCH_D(DEBUG_BASE_INFO, "LPWG ADC Test end\n"); return result; error: TOUCH_E("%s, %d : LPWG ADC TEST failed\n", __func__, __LINE__); f54len += snprintf(f54buf + f54len, sizeof(f54buf) - f54len, "%s, %d : LPWG ADC TEST failed\n", __func__, __LINE__); write_log(NULL, f54buf); msleep(30); return -EPERM; } static int synaptics_ts_noise_log(struct i2c_client *client, struct touch_data *curr_data, const struct touch_data *prev_data) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); u8 buffer[3] = {0}; u8 buf1 = 0, buf2 = 0, cns = 0; u16 im = 0, cid_im = 0, freq_scan_im = 0; int i = 0; DO_SAFE(synaptics_ts_set_page(client, ANALOG_PAGE), error); DO_SAFE(touch_i2c_read(client, ts->f54_reg.interference__metric_LSB, 1, &buf1), error); DO_SAFE(touch_i2c_read(client, ts->f54_reg.interference__metric_MSB, 1, &buf2), error); im = (buf2 << 8) | buf1; im_sum += im; DO_SAFE(touch_i2c_read(client, ts->f54_reg.current_noise_status, 1, &cns), error); cns_sum += cns; if (!ts->pdata->panel_id) { DO_SAFE(touch_i2c_read(client, ts->f54_reg.cid_im, 2, buffer), error); cid_im = (buffer[1]<<8)|buffer[0]; cid_im_sum += cid_im; } DO_SAFE(touch_i2c_read(client, ts->f54_reg.freq_scan_im, 2, buffer), error); freq_scan_im = (buffer[1] << 8) | buffer[0]; freq_scan_im_sum += freq_scan_im; DO_SAFE(synaptics_ts_set_page(client, DEFAULT_PAGE), error); cnt++; if ((ts->ts_state_flag.ts_noise_log_flag == TS_NOISE_LOG_ENABLE) || (touch_debug_mask & DEBUG_NOISE)) { if (prev_data->total_num != curr_data->total_num) { if (!ts->pdata->panel_id) { TOUCH_I( "Curr: CNS[%5d] IM[%5d] CID_IM[%5d] FREQ_SCAN_IM[%5d]\n", cns, im, cid_im, freq_scan_im); } else { TOUCH_I( "Curr: CNS[%5d] IM[%5d] FREQ_SCAN_IM[%5d]\n", cns, im, freq_scan_im); } } } for (i = 0; i < MAX_FINGER; i++) { if ((prev_data->report_id_mask & (1 << i)) && !(curr_data->id_mask & (1 << i))) { break; } } if (((i < MAX_FINGER) && curr_data->total_num == 0) || (im_sum >= ULONG_MAX || cns_sum >= ULONG_MAX || cid_im_sum >= ULONG_MAX || freq_scan_im_sum >= ULONG_MAX || cnt >= UINT_MAX)) { if ((ts->ts_state_flag.ts_noise_log_flag == TS_NOISE_LOG_ENABLE) || (touch_debug_mask & DEBUG_NOISE)) { if (!ts->pdata->panel_id) { TOUCH_I( "Aver: CNS[%5lu] IM[%5lu] CID_IM[%5lu] FREQ_SCAN_IM[%5lu] (cnt:%u)\n", cns_sum/cnt, im_sum/cnt, cid_im_sum/cnt, freq_scan_im_sum/cnt, cnt); } else { TOUCH_I( "Aver: CNS[%5lu] IM[%5lu] FREQ_SCAN_IM[%5lu] (cnt:%u)\n", cns_sum/cnt, im_sum/cnt, freq_scan_im_sum/cnt, cnt); } } im_aver = im_sum/cnt; cns_aver = cns_sum/cnt; cid_im_aver = cid_im_sum/cnt; freq_scan_im_aver = freq_scan_im_sum/cnt; } if (prev_data->total_num == 0 && curr_data->total_num != 0) { cnt = im_sum = cns_sum = cid_im_sum = freq_scan_im_sum = 0; im_aver = cns_aver = cid_im_aver = freq_scan_im_aver = 0; } return 0; error: TOUCH_E("%s, %d : get ts noise failed\n", __func__, __LINE__); return -EPERM; } static int synaptics_ts_debug_noise(struct i2c_client *client, struct touch_data *curr_data, const struct touch_data *prev_data) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); u8 buffer[2] = {0}; u8 report_rate_lsb = 0, sense_frq_select = 0; u8 data10_status = 0, cur_noise = 0; u16 freq_scan_im = 0; if (prev_data->total_num != curr_data->total_num) { DO_SAFE(synaptics_ts_set_page(client, ANALOG_PAGE), error); DO_SAFE(touch_i2c_read(client, 0x06, 1, &report_rate_lsb), error); DO_SAFE(touch_i2c_read(client, 0x0C, 1, &sense_frq_select), error); DO_SAFE(touch_i2c_read(client, 0x09, 1, &data10_status), error); DO_SAFE(touch_i2c_read(client, 0x08, 1, &cur_noise), error); DO_SAFE(touch_i2c_read(client, ts->f54_reg.freq_scan_im, 2, buffer), error); freq_scan_im = (buffer[1] << 8) | buffer[0]; DO_SAFE(synaptics_ts_set_page(client, DEFAULT_PAGE), error); TOUCH_I("Report rate LSB : [%5d],\n", report_rate_lsb); TOUCH_I("Sense Freq Select : [%5d],\n", sense_frq_select); TOUCH_I("Analog data10 stats: [%5d],\n", data10_status); TOUCH_I("Current Noise stats: [%5d],\n", cur_noise); TOUCH_I("Freq Scan IM : [%5d]\n", freq_scan_im); } return 0; error: TOUCH_E("%s : failed to get noise values\n", __func__); return -EPERM; } int synaptics_ts_get_object_count(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); /* determine finger count to process */ u8 object_to_read = ts->num_of_fingers; u8 buf[2] = {0,}; u16 object_attention_data = 0; DO_SAFE(touch_i2c_read(ts->client, ts->f12_reg.data[15], sizeof(buf), (u8 *) buf), error); object_attention_data = (((u16)((buf[1] << 8) & 0xFF00) | (u16)((buf[0]) & 0xFF))); for (; object_to_read > 0 ;) { if (object_attention_data & (0x1 << (object_to_read - 1))) break; else object_to_read--; } return object_to_read; error: TOUCH_E( "%s, %d : get object_attention data failed\n", __func__, __LINE__); return -ERROR; } static int get_swipe_data(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); struct swipe_data *swp = &ts->swipe; u8 swipe_buf[11] = {0}; u16 swipe_start_x = 0; u16 swipe_start_y = 0; u16 swipe_end_x = 0; u16 swipe_end_y = 0; u8 swipe_direction = 0; u8 swipe_fail_reason = 0; u16 swipe_time = 0; DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, ts->swipe.coordinate_start_reg, 11, swipe_buf), error); swipe_start_x = GET_U16_FROM_U8(swipe_buf[1], swipe_buf[0]); swipe_start_y = GET_U16_FROM_U8(swipe_buf[3], swipe_buf[2]); swipe_end_x = GET_U16_FROM_U8(swipe_buf[5], swipe_buf[4]); swipe_end_y = GET_U16_FROM_U8(swipe_buf[7], swipe_buf[6]); if (swp->support_swipe & SUPPORT_SWIPE_DOWN) { swipe_direction = SWIPE_DIRECTION_DOWN; swipe_fail_reason = swipe_buf[8]; } if (swp->support_swipe & SUPPORT_SWIPE_UP) { swipe_direction = swipe_buf[8] & 0x03; swipe_fail_reason = (swipe_buf[8] & 0xfc) >> 2; } swipe_time = GET_U16_FROM_U8(swipe_buf[10], swipe_buf[9]); TOUCH_D(DEBUG_BASE_INFO || DEBUG_LPWG, "LPWG Swipe Gesture: start(%4d,%4d) end(%4d,%4d) " "swipe_direction(%d) swipe_fail_reason(%d) " "swipe_time(%dms)\n", swipe_start_x, swipe_start_y, swipe_end_x, swipe_end_y, swipe_direction, swipe_fail_reason, swipe_time); if (swipe_fail_reason == 0) { ts->pw_data.data_num = 1; ts->pw_data.data[0].x = swipe_end_x; ts->pw_data.data[0].y = swipe_end_y; return swipe_direction; } else { TOUCH_I("swipe fail.\n"); return -ERROR; } error: TOUCH_E("failed to read swipe data.\n"); return -ERROR; } enum error_type synaptics_ts_palm_control(struct synaptics_ts_data *ts) { u8 i = 0; switch (ts->pdata->role->palm_ctrl_mode) { case PALM_REJECT_DRIVER: case PALM_REPORT: for (i = 0; i < ts->num_of_fingers; i++) { if (ts->palm.curr_mask[i] == ts->palm.prev_mask[i]) continue; if (ts->palm.curr_mask[i]) { ts->palm.curr_num++; TOUCH_I( "Palm is detected : id[%d] pos[%4d,%4d] total palm:%u\n", i, ts->palm.coordinate[i].x, ts->palm.coordinate[i].y, ts->palm.curr_num); } else { ts->palm.curr_num--; TOUCH_I( "Palm is released : id[%d] pos[%4d,%4d] total palm:%u\n", i, ts->palm.coordinate[i].x, ts->palm.coordinate[i].y, ts->palm.curr_num); } } memcpy(ts->palm.prev_mask, ts->palm.curr_mask, sizeof(ts->palm.prev_mask)); if (ts->pdata->role->palm_ctrl_mode == PALM_REJECT_DRIVER) { if (ts->palm.curr_num) { ts->palm.prev_num = ts->palm.curr_num; return NO_FILTER; } if (ts->palm.prev_num) { ts->palm.all_released = true; queue_delayed_work(touch_wq, &ts->work_palm, msecs_to_jiffies(50)); TOUCH_I("All palm is released.\n"); ts->palm.prev_num = ts->palm.curr_num; return NO_FILTER; } if (ts->palm.all_released) { ts->palm.all_released = true; cancel_delayed_work(&ts->work_palm); queue_delayed_work(touch_wq, &ts->work_palm, msecs_to_jiffies(50)); return NO_FILTER; } } ts->palm.prev_num = ts->palm.curr_num; break; case PALM_REJECT_FW: default: break; } return NO_ERROR; } enum error_type synaptics_ts_get_data(struct i2c_client *client, struct touch_data *curr_data, const struct touch_data *prev_data) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); struct finger_data *finger = NULL; struct t_data *abs = NULL; enum error_type ret = NO_ERROR; u8 i = 0; u8 finger_index = 0, lpwg_fail = 0; u8 object_to_read; static u8 prev_object_to_read; bool odd_zvalue = false; int swipe_direction = 0; int swipe_uevent = 0; u8 buffer = 0; TOUCH_TRACE(); if (!ts->is_init) { if (lpwg_by_lcd_notifier) { TOUCH_D(DEBUG_BASE_INFO || DEBUG_LPWG, "ts->is_init = 0," "lpwg_by_lcd_notifier = ture," "handling lpwg event\n"); } else { TOUCH_E("%s, %d : ts->is_init == 0, IGNORE_EVENT!!, s:\n", __func__, __LINE__); return IGNORE_EVENT; } } curr_data->total_num = 0; curr_data->id_mask = 0; if (ts->pdata->role->palm_ctrl_mode == PALM_REJECT_DRIVER || ts->pdata->role->palm_ctrl_mode == PALM_REPORT) { memset(ts->palm.curr_mask, 0, sizeof(ts->palm.curr_mask)); } DO_SAFE(touch_i2c_read(client, DEVICE_STATUS_REG, sizeof(ts->ts_data.device_status_reg), &ts->ts_data.device_status_reg), error); DO_IF((ts->ts_data.device_status_reg & DEVICE_FAILURE_MASK) == DEVICE_FAILURE_MASK, error); DO_SAFE(touch_i2c_read(client, INTERRUPT_STATUS_REG, sizeof(ts->ts_data.interrupt_status_reg), &ts->ts_data.interrupt_status_reg), error); if (ts->pdata->role->use_hover_finger && prox_fhandler.inserted && prox_fhandler.initialized) prox_fhandler.exp_fn->attn(ts->ts_data.interrupt_status_reg); if (ts->ts_data.interrupt_status_reg & int_mask_cust) { u8 status = 0; DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, ts->f51_reg.lpwg_status_reg, 1, &status), error); if (ts->lpwg_ctrl.has_request_reset_reg) { DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, ts->f51_reg.request_reset_reg, 1, &lpwg_fail), error); if ((lpwg_fail & 0x01) && (!ts->lpwg_ctrl.screen)) { TOUCH_I("%s: LPWG Malfuction (lpwg_state 0x%02x) - goto reset\n", __func__, status); return ERROR_IN_LPWG; } } if ((status & 0x1)) { /* TCI-1 Double-Tap */ TOUCH_D(DEBUG_BASE_INFO || DEBUG_LPWG, "LPWG Double-Tap mode\n"); if (ts->lpwg_ctrl.double_tap_enable) { get_tci_data(ts, 2); send_uevent_lpwg(ts->client, LPWG_DOUBLE_TAP); } } else if ((status & 0x2)) { /* TCI-2 Multi-Tap */ TOUCH_D(DEBUG_BASE_INFO || DEBUG_LPWG, "LPWG Multi-Tap mode\n"); if (ts->lpwg_ctrl.password_enable) { get_tci_data(ts, ts->pw_data.tap_count); wake_lock(&ts->timer_wake_lock); queue_delayed_work(touch_wq, &ts->work_timer, msecs_to_jiffies(UEVENT_DELAY - I2C_DELAY)); } } else if ((ts->swipe.support_swipe) && (status & ts->swipe.gesture_mask)) { swipe_direction = get_swipe_data(client); if (swipe_direction == SWIPE_DIRECTION_DOWN) { swipe_uevent = LPWG_SWIPE_DOWN; } else if (swipe_direction == SWIPE_DIRECTION_UP) { swipe_uevent = LPWG_SWIPE_UP; } else { return IGNORE_EVENT; } ts->pdata->swipe_stat[1] = DO_SWIPE; ts->pdata->swipe_pwr_ctr = SKIP_PWR_CON; send_uevent_lpwg(client, swipe_uevent); swipe_disable(ts); } else { if (ts->lpwg_ctrl.has_lpwg_overtap_module) { DO_SAFE(synaptics_ts_page_data_read(ts->client, LPWG_PAGE, ts->f51_reg.overtap_cnt_reg, 1, &buffer), error); if (buffer > ts->pw_data.tap_count) { wake_lock(&ts->timer_wake_lock); ts->pw_data.data_num = 1; get_tci_data(ts, ts->pw_data.data_num); TOUCH_D(DEBUG_BASE_INFO || DEBUG_LPWG, "knock code fail to over tap count(%u)\n", buffer); queue_delayed_work(touch_wq, &ts->work_timer, msecs_to_jiffies(0)); } } TOUCH_D(DEBUG_BASE_INFO || DEBUG_LPWG, "LPWG status has problem\n"); } return IGNORE_EVENT; } else if (ts->ts_data.interrupt_status_reg & INTERRUPT_MASK_ABS0) { object_to_read = synaptics_ts_get_object_count(client); ERROR_IF(unlikely(object_to_read < 0), "get_object_count error", error); object_to_read = object_to_read > prev_object_to_read ? object_to_read : prev_object_to_read; if (likely(object_to_read > 0)) { DO_SAFE(touch_i2c_read(ts->client, FINGER_DATA_REG_START, sizeof(ts->ts_data.finger[0]) * object_to_read, (u8 *) ts->ts_data.finger), error); } for (i = 0; i < object_to_read; i++) { finger = ts->ts_data.finger + i; abs = curr_data->touch + finger_index; if (finger->type == F12_NO_OBJECT_STATUS) continue; /* work around for wrong finger type*/ /* by msm-i2c-v2 BAM mode*/ if (finger->type > 6) { u8 *bb = (u8 *) finger; TOUCH_I( "wrong finger id:%d, type:%x\n", i, finger->type); TOUCH_I("x=%d y=%d\n", TS_POSITION(finger->x_msb, finger->x_lsb), TS_POSITION(finger->y_msb, finger->y_lsb)); TOUCH_I( "%02x %02x %02x %02x %02x %02x %02x %02x\n", bb[0], bb[1], bb[2], bb[3], bb[4], bb[5], bb[6], bb[7] ); bb[0] = F12_FINGER_STATUS; } prev_object_to_read = i+1; abs->id = i; abs->type = finger->type; abs->raw_x = TS_POSITION(finger->x_msb, finger->x_lsb); abs->raw_y = TS_POSITION(finger->y_msb, finger->y_lsb); if (finger->wx > finger->wy) { abs->width_major = finger->wx; abs->width_minor = finger->wy; abs->orientation = 0; } else { abs->width_major = finger->wy; abs->width_minor = finger->wx; abs->orientation = 1; } abs->pressure = finger->z; abs->x = abs->raw_x; abs->y = abs->raw_y; if (abs->type == F12_PALM_STATUS) { switch (ts->pdata->role->palm_ctrl_mode) { case PALM_REJECT_DRIVER: case PALM_REPORT: abs->pressure = MAX_PRESSURE; ts->palm.curr_mask[i] = 1; ts->palm.coordinate[i].x = abs->x; ts->palm.coordinate[i].y = abs->y; break; case PALM_REJECT_FW: default: break; } } if (ts->pdata->role->ghost->pressure_zero_chk && abs->pressure == 0) ts->pdata->role->ghost->pressure_zero = true; if (ts->pdata->role->ghost->pressure_high_chk && ts->pdata->panel_id == 0 && abs->pressure >= 250) ts->pdata->role->ghost->pressure_high = true; curr_data->id_mask |= (0x1 << i); curr_data->total_num++; TOUCH_D(DEBUG_GET_DATA, "<%d> type[%d] pos(%4d,%4d) w_m[%2d] w_n[%2d] o[%2d] p[%2d]\n", i, abs->type, abs->x, abs->y, abs->width_major, abs->width_minor, abs->orientation, abs->pressure); finger_index++; if (curr_data->touch->pressure >= 250) odd_zvalue = true; } ret = synaptics_ts_palm_control(ts); if (ret == NO_FILTER) { memset(curr_data, 0, sizeof(struct touch_data)); return NO_FILTER; } TOUCH_D(DEBUG_GET_DATA, "ID[0x%x] Total_num[%d]\n", curr_data->id_mask, curr_data->total_num); if (ts->lpwg_ctrl.password_enable && wake_lock_active(&ts->timer_wake_lock)) { if (curr_data->id_mask & ~(prev_data->id_mask)) { /* password-matching will be failed */ if (cancel_delayed_work(&ts->work_timer)) { ts->pw_data.data_num = 1; queue_delayed_work(touch_wq, &ts->work_timer, msecs_to_jiffies(UEVENT_DELAY)); } } return IGNORE_EVENT_BUT_SAVE_IT; } if (ts->lpwg_ctrl.password_enable && atomic_read(&ts->lpwg_ctrl.is_suspend) == 1) { TOUCH_I("%s:ignore abs interrupt in suspend\n", __func__); return IGNORE_EVENT; } } else if (ts->ts_data.interrupt_status_reg & INTERRUPT_MASK_FLASH) { TOUCH_E("%s: INTERRUPT_MASK_FLASH!\n", __func__); if (ts->lpwg_ctrl.screen) return ERROR; else return ERROR_IN_LPWG; } else if (ts->ts_data.interrupt_status_reg & INTERRUPT_MASK_STATUS) { TOUCH_I("%s: INTERRUPT_MASK_STATUS!\n", __func__); TOUCH_I("(lpwg_mode:%d, screen:%d, power_state:%d)\n", ts->lpwg_ctrl.lpwg_mode, ts->lpwg_ctrl.screen, power_state); if (ts->lpwg_ctrl.screen) return IGNORE_EVENT; else return ERROR_IN_LPWG; } else { return IGNORE_EVENT; } if (odd_zvalue && is_product(ts, "PLG446", 6)) { odd_zvalue = false; DO_SAFE(synaptics_ts_debug_noise(client, curr_data, prev_data), error); } if ((ts->ts_state_flag.ts_noise_log_flag == TS_NOISE_LOG_ENABLE) || (ts->ts_state_flag.check_noise_menu == MENU_ENTER)) DO_SAFE(synaptics_ts_noise_log(client, curr_data, prev_data), error); return NO_ERROR; error: TOUCH_E("%s, %d : get data failed\n", __func__, __LINE__); return ERROR; } enum error_type synaptics_ts_filter(struct i2c_client *client, struct touch_data *curr_data, const struct touch_data *prev_data) { /* struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client);*/ int i = 0; for (i = 0; i < curr_data->total_num; i++) { if (curr_data->touch[i].type == HOVER_TYPE) curr_data->touch[i].pressure = 0; else if (curr_data->touch[i].type == PALM_TYPE) curr_data->touch[i].pressure = MAX_PRESSURE; else if (curr_data->touch[i].pressure == MAX_PRESSURE) curr_data->touch[i].pressure = MAX_PRESSURE - 1; } return NO_ERROR; } enum error_type synaptics_ts_power(struct i2c_client *client, int power_ctrl) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int ret = 0; TOUCH_TRACE(); if (ts->pdata->swipe_pwr_ctr == SKIP_PWR_CON) { TOUCH_I("%s : Skip power_control (swipe_pwr_ctr = %d)\n", __func__, ts->pdata->swipe_pwr_ctr); power_state = power_ctrl; TOUCH_I("%s : power_state[%d]\n", __func__, power_state); return NO_ERROR; } switch (power_ctrl) { case POWER_OFF: if (ts->swipe.support_swipe) print_swipe_fail_reason(ts); ts->is_init = 0; if (ts->pdata->reset_pin > 0) gpio_direction_output(ts->pdata->reset_pin, 0); if (ts->pdata->pwr->vio_control) if (ts->pdata->vio_pin > 0) gpio_direction_output(ts->pdata->vio_pin, 0); if (ts->pdata->pwr->use_regulator) { if (ts->pdata->pwr->vio_control) { if (regulator_is_enabled(ts->vreg_vio)) regulator_disable(ts->vreg_vio); } if (regulator_is_enabled(ts->vreg_vdd)) regulator_disable(ts->vreg_vdd); } break; case POWER_ON: ts->is_init = 0; if (ts->pdata->pwr->use_regulator) { if (!regulator_is_enabled(ts->vreg_vdd)) ret = regulator_enable(ts->vreg_vdd); if (ts->pdata->pwr->vio_control) { if (!regulator_is_enabled(ts->vreg_vio)) ret = regulator_enable(ts->vreg_vio); } } if (ts->pdata->pwr->vio_control) { if (ts->pdata->vio_pin > 0) gpio_direction_output(ts->pdata->vio_pin, 1); } if (ts->pdata->reset_pin > 0) gpio_direction_output(ts->pdata->reset_pin, 1); break; case POWER_SLEEP: if (!ts->lpwg_ctrl.lpwg_is_enabled && is_product(ts, "PLG349", 6)) sleep_control(ts, 0, 1); break; case POWER_WAKE: break; case POWER_SLEEP_STATUS: sleep_control(ts, ts->lpwg_ctrl.sensor, 0); power_ctrl = POWER_SLEEP; break; default: break; } power_state = power_ctrl; TOUCH_I("%s : power_state[%d]\n", __func__, power_state); return NO_ERROR; } enum error_type synaptics_ts_ic_ctrl(struct i2c_client *client, u8 code, u32 value, u32 *ret) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); u8 buf = 0; u8 buf_array[2] = {0}; switch (code) { case IC_CTRL_READ: DO_SAFE(touch_i2c_read(client, value, 1, &buf), error); *ret = (u32)buf; break; case IC_CTRL_WRITE: DO_SAFE(touch_i2c_write_byte(client, ((value & 0xFFF0) >> 8), (value & 0xFF)), error); break; case IC_CTRL_BASELINE_REBASE: DO_SAFE(synaptics_ts_page_data_write_byte(client, ANALOG_PAGE, ANALOG_COMMAND_REG, value), error); break; case IC_CTRL_REPORT_MODE: if (value == REDUCED_MODE) buf_array[0] = buf_array[1] = ts->pdata->role->delta_pos_threshold; DO_SAFE(touch_i2c_write(client, ts->f12_reg.ctrl[20], 2, buf_array), error); break; case IC_CTRL_THERMAL: TOUCH_I("Driver Thermal Control Skip... !!\n"); break; case IC_CTRL_RESET: ts->is_init = 0; lpwg_by_lcd_notifier = false; DO_SAFE(touch_i2c_write_byte(client, DEVICE_COMMAND_REG, (value & 0xFF)), error); break; default: break; } return NO_ERROR; error: TOUCH_E("%s, %d : IC control failed\n", __func__, __LINE__); return ERROR; } int compare_fw_version(struct i2c_client *client, struct touch_fw_info *fw_info) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int i = 0; if (ts->fw_info.version[0] > 0x50) { if (ts->fw_info.img_version[0] > 0x50) { TOUCH_D(DEBUG_BASE_INFO, "product_id[%s(ic):%s(fw)] version[%s(ic):%s(fw)]\n", ts->fw_info.product_id, ts->fw_info.img_product_id, ts->fw_info.version, ts->fw_info.img_version); if (strncmp(ts->fw_info.version, ts->fw_info.img_version, 4)) { TOUCH_D(DEBUG_BASE_INFO, "version mismatch.\n"); return 1; } else { goto no_upgrade; } } else { TOUCH_D(DEBUG_BASE_INFO, "product_id[%s(ic):%s(fw)] version[%s(ic):V%d.%02d(fw)]\n", ts->fw_info.product_id, ts->fw_info.img_product_id, ts->fw_info.version, (ts->fw_info.img_version[3] & 0x80 ? 1 : 0), ts->fw_info.img_version[3] & 0x7F); if (strncmp(ts->fw_info.version, ts->fw_info.img_version, 4)) { TOUCH_D(DEBUG_BASE_INFO, "version mismatch.\n"); return 1; } else { goto no_upgrade; } } } else { if (!(ts->fw_info.version[3] & 0x80)) { if ((ts->fw_info.version[3] & 0x7F) == 0) { TOUCH_D(DEBUG_BASE_INFO, "FW version is someting wrong.[V%d.%02d], need to upgrade!\n", ts->fw_info.version[3] & 0x80 ? 1 : 0, ts->fw_info.version[3] & 0x7F); return 1; } else if (((ts->fw_info.version[3] & 0x7F) == 40) && is_product(ts, "PLG446", 6)) { TOUCH_D(DEBUG_BASE_INFO, "FW version is Test Version.[V%d.%02d]\n", (ts->fw_info.version[3] & 0x80 ? 1 : 0), ts->fw_info.version[3] & 0x7F); TOUCH_D(DEBUG_BASE_INFO, "Need upgrade for DV Sample\n"); return 1; } else { TOUCH_D(DEBUG_BASE_INFO, "FW version is Test Version.[V%d.%02d]\n", (ts->fw_info.version[3] & 0x80 ? 1 : 0), ts->fw_info.version[3] & 0x7F); goto no_upgrade; } } if (ts->fw_info.img_version[0] > 0x50) { TOUCH_D(DEBUG_BASE_INFO, "product_id[%s(ic):%s(fw)] fw_version[V%d.%02d(ic):%s(fw)]\n", ts->fw_info.product_id, ts->fw_info.img_product_id, (ts->fw_info.version[3] & 0x80 ? 1 : 0), ts->fw_info.version[3] & 0x7F, ts->fw_info.img_version); if (strncmp(ts->fw_info.version, ts->fw_info.img_version, 4)) { TOUCH_D(DEBUG_BASE_INFO, "version mismatch.\n"); return 1; } else { goto no_upgrade; } } else { TOUCH_D(DEBUG_BASE_INFO, "product_id[%s(ic):%s(fw)]\n", ts->fw_info.product_id, ts->fw_info.img_product_id); TOUCH_D(DEBUG_BASE_INFO, "ic_version[V%d.%02d(0x%02X 0x%02X 0x%02X 0x%02X)]\n ", (ts->fw_info.version[3] & 0x80 ? 1 : 0), ts->fw_info.version[3] & 0x7F, ts->fw_info.version[0], ts->fw_info.version[1], ts->fw_info.version[2], ts->fw_info.version[3]); TOUCH_D(DEBUG_BASE_INFO, "version[V%d.%02d(0x%02X 0x%02X 0x%02X 0x%02X)]\n", (ts->fw_info.img_version[3] & 0x80 ? 1 : 0), ts->fw_info.img_version[3] & 0x7F, ts->fw_info.img_version[0], ts->fw_info.img_version[1], ts->fw_info.img_version[2], ts->fw_info.img_version[3]); for (i = 0; i < FW_VER_INFO_NUM; i++) { if (ts->fw_info.version[i] != ts->fw_info.img_version[i]) { TOUCH_D(DEBUG_BASE_INFO, "version mismatch. ic_version[%d]:0x%02X != version[%d]:0x%02X\n", i, ts->fw_info.version[i], i, ts->fw_info.img_version[i]); return 1; } } goto no_upgrade; } } no_upgrade: TOUCH_D(DEBUG_BASE_INFO | DEBUG_FW_UPGRADE, "need not fw version upgrade.\n"); return 0; } enum error_type synaptics_ts_fw_upgrade(struct i2c_client *client, struct touch_fw_info *info, struct touch_firmware_module *fw) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); int need_upgrade = 0; int rc = 0; char path[256]; const struct firmware *fw_entry = NULL; const u8 *firmware = NULL; if (info->fw_path) { TOUCH_D(DEBUG_BASE_INFO, "IC_product_id: %s\n", ts->fw_info.product_id); rc = request_firmware(&fw_entry, info->fw_path, &ts->client->dev); if (rc != 0) { TOUCH_E("request_firmware() failed %d\n", rc); goto error; } } else { TOUCH_E("error get fw_path\n"); goto error; } firmware = fw_entry->data; memcpy(ts->fw_info.img_product_id, &firmware[ts->pdata->fw_pid_addr], 6); memcpy(ts->fw_info.img_version, &firmware[ts->pdata->fw_ver_addr], 4); if (info->force_upgrade) { TOUCH_D(DEBUG_BASE_INFO | DEBUG_FW_UPGRADE, "FW: need_upgrade[%d] force[%d] file[%s]\n", fw->need_upgrade, info->force_upgrade, path); goto firmware_up; } if (info->force_upgrade_cat) { TOUCH_D(DEBUG_BASE_INFO | DEBUG_FW_UPGRADE, "FW: need_upgrade[%d] force[%d] file[%s]\n", fw->need_upgrade, info->force_upgrade, info->fw_path); goto firmware_up; } need_upgrade = !strncmp(ts->fw_info.product_id, ts->fw_info.img_product_id, sizeof(ts->fw_info.product_id)); /* Force Upgrade for P1 on 1st cut temporarily */ if (is_product(ts, "s3320", 5) || is_img_product(ts, "PLG468", 6)) need_upgrade = 1; TOUCH_I("[%s] img_product_id : %s\n", __func__, ts->fw_info.img_product_id); rc = compare_fw_version(client, info); if (fw->need_upgrade) need_upgrade = need_upgrade && rc; else need_upgrade = need_upgrade && rc; TOUCH_I("ts_need_upgrade = %d, need_upgrade = %d\n", ts->pdata->fw->need_upgrade, need_upgrade); need_upgrade = ts->pdata->fw->need_upgrade & need_upgrade; if (need_upgrade || ts->fw_info.need_rewrite_firmware) { TOUCH_D(DEBUG_BASE_INFO | DEBUG_FW_UPGRADE, "FW: start-upgrade - need[%d] rewrite[%d]\n", need_upgrade, ts->fw_info.need_rewrite_firmware); if (info->fw_path != NULL) { TOUCH_I( "FW: need_upgrade[%d] force[%d] file[%s]\n", fw->need_upgrade, info->force_upgrade, info->fw_path); goto firmware_up; } else { goto firmware_up_error; } /* it will be reset and initialized * automatically by lge_touch_core. */ } release_firmware(fw_entry); return NO_UPGRADE; firmware_up: ts->is_probed = 0; ts->is_init = 0; /* During upgrading, interrupt will be ignored. */ info->force_upgrade = 0; info->force_upgrade_cat = 0; need_scan_pdt = true; DO_SAFE(FirmwareUpgrade(ts, fw_entry), error); release_firmware(fw_entry); return NO_ERROR; firmware_up_error: release_firmware(fw_entry); return ERROR; error: return ERROR; } enum error_type synaptics_ts_notify(struct i2c_client *client, u8 code, u32 value) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); switch (code) { case NOTIFY_TA_CONNECTION: queue_delayed_work(touch_wq, &ts->work_sleep, msecs_to_jiffies(0)); break; case NOTIFY_TEMPERATURE_CHANGE: break; case NOTIFY_PROXIMITY: break; default: break; } return NO_ERROR; } enum error_type synaptics_ts_suspend(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); if (ts->pdata->role->use_hover_finger && prox_fhandler.inserted && prox_fhandler.initialized) prox_fhandler.exp_fn->suspend(ts); if (!atomic_read(&ts->lpwg_ctrl.is_suspend)) { DO_SAFE(lpwg_control(ts, ts->lpwg_ctrl.lpwg_mode), error); atomic_set(&ts->lpwg_ctrl.is_suspend, 1); } ts->lpwg_ctrl.screen = 0; return NO_ERROR; error: return ERROR; } enum error_type synaptics_ts_resume(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); if (ts->pdata->role->use_hover_finger && prox_fhandler.inserted && prox_fhandler.initialized) prox_fhandler.exp_fn->resume(ts); cancel_delayed_work_sync(&ts->work_timer); if (wake_lock_active(&ts->timer_wake_lock)) wake_unlock(&ts->timer_wake_lock); atomic_set(&ts->lpwg_ctrl.is_suspend, 0); ts->lpwg_ctrl.screen = 1; return NO_ERROR; } enum error_type synaptics_ts_lpwg(struct i2c_client *client, u32 code, int64_t value, struct point *data) { int i; u8 buffer[50] = {0}; struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); u8 mode = ts->lpwg_ctrl.lpwg_mode; u8 doubleTap_area_reg_addr = ts->f12_reg.ctrl[18]; switch (code) { case LPWG_READ: memcpy(data, ts->pw_data.data, sizeof(struct point) * ts->pw_data.data_num); data[ts->pw_data.data_num].x = -1; data[ts->pw_data.data_num].y = -1; /* '-1' should be assigned to the last data. Each data should be converted to LCD-resolution.*/ memset(ts->pw_data.data, -1, sizeof(struct point) * ts->pw_data.data_num); break; case LPWG_ENABLE: if (!atomic_read(&ts->lpwg_ctrl.is_suspend)) ts->lpwg_ctrl.lpwg_mode = value; break; case LPWG_LCD_X: case LPWG_LCD_Y: /* If touch-resolution is not same with LCD-resolution, position-data should be converted to LCD-resolution.*/ break; case LPWG_ACTIVE_AREA_X1: for (i = 0; i < 2; i++) { synaptics_ts_page_data_read(client, COMMON_PAGE, doubleTap_area_reg_addr, i + 1, buffer); if (i == 0) buffer[i] = value; else buffer[i] = value >> 8; synaptics_ts_page_data_write(client, COMMON_PAGE, doubleTap_area_reg_addr, i + 1, buffer); } break; case LPWG_ACTIVE_AREA_X2: for (i = 4; i < 6; i++) { synaptics_ts_page_data_read(client, COMMON_PAGE, doubleTap_area_reg_addr, i + 1, buffer); if (i == 4) buffer[i] = value; else buffer[i] = value >> 8; synaptics_ts_page_data_write(client, COMMON_PAGE, doubleTap_area_reg_addr, i + 1, buffer); } break; case LPWG_ACTIVE_AREA_Y1: for (i = 2; i < 4; i++) { synaptics_ts_page_data_read(client, COMMON_PAGE, doubleTap_area_reg_addr, i + 1, buffer); if (i == 2) buffer[i] = value; else buffer[i] = value >> 8; synaptics_ts_page_data_write(client, COMMON_PAGE, doubleTap_area_reg_addr, i + 1, buffer); } break; case LPWG_ACTIVE_AREA_Y2: /* Quick Cover Area*/ for (i = 6; i < 8; i++) { synaptics_ts_page_data_read(client, COMMON_PAGE, doubleTap_area_reg_addr, i + 1, buffer); if (i == 6) buffer[i] = value; else buffer[i] = value >> 8; synaptics_ts_page_data_write(client, COMMON_PAGE, doubleTap_area_reg_addr, i + 1, buffer); } break; case LPWG_TAP_COUNT: ts->pw_data.tap_count = value; if (ts->lpwg_ctrl.password_enable) tci_control(ts, TAP_COUNT_CTRL, (u8)ts->pw_data.tap_count); break; case LPWG_LENGTH_BETWEEN_TAP: if (ts->lpwg_ctrl.double_tap_enable || ts->lpwg_ctrl.password_enable) tci_control(ts, TAP_DISTANCE_CTRL, value); break; case LPWG_EARLY_SUSPEND: if (!mode) break; /* wakeup gesture enable */ if (value) { if (atomic_read(&ts->lpwg_ctrl.is_suspend) == 1 && (power_state == POWER_OFF || power_state == POWER_SLEEP)) ts->is_init = 0; DO_SAFE(lpwg_control(ts, 0), error); atomic_set(&ts->lpwg_ctrl.is_suspend, 0); } else { if (is_product(ts, "PLG349", 6)) set_doze_param(ts, 3); DO_SAFE(lpwg_control(ts, ts->lpwg_ctrl.lpwg_mode), error); atomic_set(&ts->lpwg_ctrl.is_suspend, 1); } break; case LPWG_SENSOR_STATUS: if (!mode) break; if (value) { /* Far */ DO_SAFE(lpwg_control(ts, mode), error); } else { /* Near */ if (ts->lpwg_ctrl.password_enable && wake_lock_active( &ts->timer_wake_lock)) { cancel_delayed_work_sync(&ts->work_timer); tci_control(ts, REPORT_MODE_CTRL, 1); wake_unlock(&ts->timer_wake_lock); } } break; case LPWG_DOUBLE_TAP_CHECK: ts->pw_data.double_tap_check = value; if (ts->lpwg_ctrl.password_enable) tci_control(ts, INTERRUPT_DELAY_CTRL, value); break; case LPWG_REPLY: if (ts->pdata->role->use_lpwg_all) { if (atomic_read(&ts->lpwg_ctrl.is_suspend) == 0) { TOUCH_I("%s : screen on\n", __func__); break; } DO_SAFE(lpwg_update_all(ts, 1), error); } else { if (ts->lpwg_ctrl.password_enable && !value) DO_SAFE(lpwg_control(ts, mode), error); } break; case LPWG_UPDATE_ALL: { int *v = (int *) value; int mode = *(v + 0); int screen = *(v + 1); int sensor = *(v + 2); int qcover = *(v + 3); ts->lpwg_ctrl.lpwg_mode = mode; ts->lpwg_ctrl.screen = screen; ts->lpwg_ctrl.sensor = sensor; ts->lpwg_ctrl.qcover = qcover; TOUCH_I( "LPWG_UPDATE_ALL: mode[%s], screen[%s], sensor[%s], qcover[%s]\n", ts->lpwg_ctrl.lpwg_mode ? "ENABLE" : "DISABLE", ts->lpwg_ctrl.screen ? "ON" : "OFF", ts->lpwg_ctrl.sensor ? "FAR" : "NEAR", ts->lpwg_ctrl.qcover ? "CLOSE" : "OPEN"); DO_SAFE(lpwg_update_all(ts, 1), error); break; } /* LPWG On Sequence has to be */ /* after Display off callback timing. */ case LPWG_INCELL_LPWG_ON: if (is_product(ts, "PLG446", 6)) { lpwg_by_lcd_notifier = true; set_rebase_param(ts, 0); tci_control(ts, REPORT_MODE_CTRL, 1); } else if (is_product(ts, "PLG468", 6)) { TOUCH_I("[%s] CONTROL_REG : DEVICE_CONTROL_NOSLEEP\n", __func__); DO_SAFE(touch_i2c_write_byte(client, DEVICE_CONTROL_REG, DEVICE_CONTROL_NOSLEEP | DEVICE_CONTROL_CONFIGURED), error); tci_control(ts, REPORT_MODE_CTRL, 1); lpwg_by_lcd_notifier = true; } /* Protocol 9 enable for sleep control */ ts->lpwg_ctrl.protocol9_sleep_flag = true; TOUCH_D(DEBUG_BASE_INFO, "Protocol 9 enable!\n"); break; case LPWG_INCELL_LPWG_OFF: if (is_product(ts, "PLG446", 6)) { TOUCH_I("[%s] DEVICE_CONTROL_NORMAL_OP\n", __func__); DO_SAFE(touch_i2c_write_byte(client, DEVICE_CONTROL_REG, DEVICE_CONTROL_NORMAL_OP | DEVICE_CONTROL_CONFIGURED), error); } /* normal */ tci_control(ts, REPORT_MODE_CTRL, 0); lpwg_by_lcd_notifier = false; /* Protocol 9 disable for sleep control */ ts->lpwg_ctrl.protocol9_sleep_flag = false; TOUCH_D(DEBUG_BASE_INFO, "Protocol 9 disable!\n"); break; case LPWG_INCELL_NO_SLEEP: msleep(20); TOUCH_I("[%s] CONTROL_REG : DEVICE_CONTROL_NOSLEEP\n", __func__); DO_SAFE(touch_i2c_write_byte(client, DEVICE_CONTROL_REG, DEVICE_CONTROL_NOSLEEP | DEVICE_CONTROL_CONFIGURED), error); if (is_product(ts, "PLG446", 6)) mdelay(30); break; default: break; } return NO_ERROR; error: return ERROR; } static void synapitcs_change_ime_status(struct i2c_client *client, int ime_status) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); u8 udata[5] = {0, }; u8 drumming_address = ts->f12_reg.ctrl[10]; TOUCH_I("%s : IME STATUS is [ %d ]!!!\n", __func__, ime_status); touch_i2c_read(ts->client, drumming_address, 5, udata); if (ime_status) { TOUCH_I("%s : IME on !!\n", __func__); udata[3] = 0x08;/*Drumming Acceleration Threshold*/ udata[4] = 0x05;/*Minimum Drumming Separation*/ if (touch_i2c_write(ts->client, drumming_address, 5, udata) < 0) { TOUCH_E("%s : Touch i2c write fail !!\n", __func__); } } else { udata[3] = 0x0f; /*Drumming Acceleration Threshold*/ udata[4] = 0x0a; /*Minimum Drumming Separation*/ if (touch_i2c_write(ts->client, drumming_address, 5, udata) < 0) { TOUCH_E("%s : Touch i2c write fail !!\n", __func__); } TOUCH_I("%s : IME Off\n", __func__); } TOUCH_I("%s : Done !!\n", __func__); return; } static void synaptics_toggle_swipe(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); if (ts->swipe.support_swipe == NO_SUPPORT_SWIPE) { TOUCH_I("%s: support_swipe:0x%02X\n", __func__, ts->swipe.support_swipe); return; } if (power_state == POWER_OFF) { TOUCH_I("%s: power_state:%d\n", __func__, power_state); return; } TOUCH_I("%s: [S/Q/P/L] = [%d/%d/%d/%d]\n", __func__, ts->lpwg_ctrl.screen, ts->lpwg_ctrl.qcover, power_state, ts->pdata->lockscreen_stat); if (!ts->lpwg_ctrl.screen && !ts->lpwg_ctrl.qcover && (power_state == POWER_SLEEP) && ts->pdata->lockscreen_stat) swipe_enable(ts); else swipe_disable(ts); return; } static int get_type_bootloader(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); u8 temp_pid[11] = {0,}; DO_SAFE(touch_i2c_read(ts->client, PRODUCT_ID_REG, sizeof(ts->fw_info.product_id) - 1, ts->fw_info.product_id), error); TOUCH_I("[%s]IC_product_id: %s\n" , __func__, ts->fw_info.product_id); if (is_product(ts, "S332U", 5) || is_product(ts, "S3320T", 6)) { DO_SAFE(touch_i2c_read(ts->client, FLASH_CONFIG_ID_REG, sizeof(temp_pid) - 1, temp_pid), error); memset(ts->fw_info.product_id, 0, sizeof(ts->fw_info.product_id)); memcpy(ts->fw_info.product_id, &temp_pid[4], 6); TOUCH_I("[%s] Product_ID_Reset ! , addr = 0x%x, P_ID = %s\n", __func__, FLASH_CONFIG_ID_REG, ts->fw_info.product_id); return BL_VER_HIGHER; } return BL_VER_LOWER; error: return -EPERM; } static int set_doze_param(struct synaptics_ts_data *ts, int value) { u8 buf_array[6] = {0}; if (ts->pdata->panel_id) { TOUCH_D(DEBUG_BASE_INFO, "panel_id = %d, ignore %s\n", ts->pdata->panel_id, __func__); return 0; } touch_i2c_read(ts->client, ts->f12_reg.ctrl[27], 6, buf_array); /* max active duration */ if (ts->pw_data.tap_count < 3) buf_array[3] = 3; else buf_array[3] = 3 + ts->pw_data.tap_count; buf_array[2] = 0x0C; /* False Activation Threshold */ buf_array[4] = 0x01; /* Timer 1 */ buf_array[5] = 0x01; /* Max Active Duration Timeout */ touch_i2c_write(ts->client, ts->f12_reg.ctrl[27], 6, buf_array); DO_SAFE(touch_i2c_write_byte(ts->client, DOZE_INTERVAL_REG, 3), error); DO_SAFE(touch_i2c_write_byte(ts->client, DOZE_WAKEUP_THRESHOLD_REG, 30), error); return 0; error: TOUCH_E("%s : failed to set doze interval\n", __func__); return -EPERM; } enum window_status synapitcs_check_crack(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); char result[2] = {0x00, }; if (need_scan_pdt) { SCAN_PDT(); need_scan_pdt = false; } touch_disable_irq(ts->client->irq); F54Test('l', (int)ts->pdata->role->crack->min_cap_value, result); touch_enable_irq(ts->client->irq); TOUCH_I("%s : check window crack = %s\n", __func__, result); after_crack_check = 1; /* set crack check flag */ if (strncmp(result, "1", 1) == 0) return CRACK; else return NO_CRACK; } static void synaptics_change_sleepmode(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); u8 curr[2] = {0}; if (is_product(ts, "PLG468", 6)) { DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, LPWG_PARTIAL_REG + 71, 1, curr), error); TOUCH_I("%s: prev:0x%02X, next:0x%02X (TA :%d)\n", __func__, curr[0], touch_ta_status ? (curr[0] & 0xff) | 0x02 : (curr[0] & 0xff) & 0xfd, touch_ta_status); if (touch_ta_status) curr[0] = (curr[0] & 0xff) | 0x02; else curr[0] = (curr[0] & 0xff) & 0xfd; DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, LPWG_PARTIAL_REG + 71, 1, curr), error); } else if (is_product(ts, "PLG446", 6)) { if (touch_ta_status == 2 || touch_ta_status == 3) { curr[0] = 0x01; DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, LPWG_PARTIAL_REG + 4, 1, curr), error); DO_SAFE(touch_i2c_read(client, DEVICE_CONTROL_REG, 1, curr), error); DO_SAFE(touch_i2c_write_byte(client, DEVICE_CONTROL_REG, DEVICE_CONTROL_NOSLEEP | (curr[0] & 0xF8)), error); } else { curr[0] = 0x00; DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, LPWG_PARTIAL_REG + 4, 1, curr), error); DO_SAFE(touch_i2c_read(client, DEVICE_CONTROL_REG, 1, curr), error); DO_SAFE(touch_i2c_write_byte(client, DEVICE_CONTROL_REG, DEVICE_CONTROL_NORMAL_OP | (curr[0] & 0xF8)), error); } } return; error: TOUCH_E("%s : failed to set sleep_mode\n", __func__); return; } static void set_param_incoming_call(struct i2c_client *client, int call_state) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); bool is_official_fw = 0; u8 fw_ver = 0; u8 buf[2] = {0}; u8 buffer[2] = {0}; u8 noise_floor = 35; u8 minpeak = 40; u8 finger_amplitude = 51; u8 call_noise_floor = 65; u8 call_minpeak = 70; u8 call_finger_amplitude = 67; is_official_fw = ((ts->fw_info.version[3] & 0x80) >> 7); fw_ver = (ts->fw_info.version[3] & 0x7F); if ((is_official_fw && fw_ver >= 21) || fw_ver >= 124) { if (!call_state) { buf[0] = noise_floor; buf[1] = minpeak; buffer[0] = buffer[1] = finger_amplitude; } else { buf[0] = call_noise_floor; buf[1] = call_minpeak; buffer[0] = buffer[1] = call_finger_amplitude; } DO_SAFE(touch_i2c_write(client, ts->f12_reg.ctrl[10], 2, buf), error); DO_SAFE(touch_i2c_read(client, ts->f12_reg.ctrl[10], 2, buf), error); DO_SAFE(touch_i2c_write(client, ts->f12_reg.ctrl[15], 2, buffer), error); DO_SAFE(touch_i2c_read(client, ts->f12_reg.ctrl[15], 2, buffer), error); TOUCH_I("%s : noise_floor(0x%02x), minpeak(0x%02x), finger_amplitude(0x%02x)\n", __func__, buf[0], buf[1], buffer[0]); } else { TOUCH_I("%s : Do not need param setting\n", __func__); } return; error: TOUCH_E("%s : failed to set param incoming_call_mode\n", __func__); return; } static void synaptics_ts_incoming_call(struct i2c_client *client, int value) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); u8 curr[2] = {0}; incoming_call_state = value; if (is_product(ts, "PLG468", 6)) { if (incoming_call_state) { DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, LPWG_PARTIAL_REG + 71, 1, curr), error); curr[0] = (curr[0] & 0xff) & 0xfb; DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, LPWG_PARTIAL_REG + 71, 1, curr), error); DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, LPWG_PARTIAL_REG + 71, 1, curr), error); TOUCH_I("%s : incoming_call(%d) = 0x%02x\n", __func__, incoming_call_state, curr[0]); set_param_incoming_call(client, incoming_call_state); } else { set_param_incoming_call(client, incoming_call_state); DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, LPWG_PARTIAL_REG + 71, 1, curr), error); curr[0] = (curr[0] & 0xff) | 0x04; DO_SAFE(synaptics_ts_page_data_write(client, LPWG_PAGE, LPWG_PARTIAL_REG + 71, 1, curr), error); DO_SAFE(synaptics_ts_page_data_read(client, LPWG_PAGE, LPWG_PARTIAL_REG + 71, 1, curr), error); TOUCH_I("%s : incoming_call(%d) = 0x%02x\n", __func__, incoming_call_state, curr[0]); } } else { TOUCH_I("%s : Not incoming_call setting\n", __func__); } return; error: TOUCH_E("%s : failed to set incoming_call_mode\n", __func__); return; } enum error_type synaptics_ts_shutdown(struct i2c_client *client) { struct synaptics_ts_data *ts = (struct synaptics_ts_data *)get_touch_handle(client); TOUCH_TRACE(); if (is_product(ts, "PLG468", 6)) { if (ts->pdata->reset_pin > 0) gpio_direction_output(ts->pdata->reset_pin, 0); } return NO_ERROR; } static int synapitcs_ts_register_sysfs(struct kobject *k) { return sysfs_create_group(k, &synaptics_ts_attribute_group); } struct touch_device_driver synaptics_ts_driver = { .probe = synaptics_ts_probe, .remove = synaptics_ts_remove, .shutdown = synaptics_ts_shutdown, .suspend = synaptics_ts_suspend, .resume = synaptics_ts_resume, .init = synaptics_ts_init, .data = synaptics_ts_get_data, .filter = synaptics_ts_filter, .power = synaptics_ts_power, .ic_ctrl = synaptics_ts_ic_ctrl, .fw_upgrade = synaptics_ts_fw_upgrade, .notify = synaptics_ts_notify, .lpwg = synaptics_ts_lpwg, .ime_drumming = synapitcs_change_ime_status, .toggle_swipe = synaptics_toggle_swipe, .inspection_crack = synapitcs_check_crack, .register_sysfs = synapitcs_ts_register_sysfs, .incoming_call = synaptics_ts_incoming_call, }; static struct of_device_id match_table[] = { { .compatible = "synaptics,s3528",}, { }, }; static void async_touch_init(void *data, async_cookie_t cookie) { int panel_type = lge_get_panel(); TOUCH_D(DEBUG_BASE_INFO, "panel type is %d\n", panel_type); if (panel_type == 3) return; touch_driver_register(&synaptics_ts_driver, match_table); return; } static int __init touch_init(void) { TOUCH_TRACE(); /* async_schedule(async_touch_init, NULL); */ async_schedule(async_touch_init, NULL); return 0; } static void __exit touch_exit(void) { TOUCH_TRACE(); touch_driver_unregister(); } module_init(touch_init); module_exit(touch_exit); MODULE_AUTHOR("yehan.ahn@lge.com, hyesung.shin@lge.com"); MODULE_DESCRIPTION("LGE Touch Driver"); MODULE_LICENSE("GPL");
gpl-2.0
greearb/iproute2-ct
tc/f_bpf.c
3
6927
/* * f_bpf.c BPF-based Classifier * * This program is free software; you can distribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Daniel Borkmann <daniel@iogearbox.net> */ #include <stdio.h> #include <stdlib.h> #include <linux/bpf.h> #include "utils.h" #include "tc_util.h" #include "bpf_util.h" static const enum bpf_prog_type bpf_type = BPF_PROG_TYPE_SCHED_CLS; static void explain(void) { fprintf(stderr, "Usage: ... bpf ...\n" "\n" "BPF use case:\n" " bytecode BPF_BYTECODE\n" " bytecode-file FILE\n" "\n" "eBPF use case:\n" " object-file FILE [ section CLS_NAME ] [ export UDS_FILE ]" " [ verbose ] [ direct-action ] [ skip_hw | skip_sw ]\n" " object-pinned FILE [ direct-action ] [ skip_hw | skip_sw ]\n" "\n" "Common remaining options:\n" " [ action ACTION_SPEC ]\n" " [ classid CLASSID ]\n" "\n" "Where BPF_BYTECODE := \'s,c t f k,c t f k,c t f k,...\'\n" "c,t,f,k and s are decimals; s denotes number of 4-tuples\n" "\n" "Where FILE points to a file containing the BPF_BYTECODE string,\n" "an ELF file containing eBPF map definitions and bytecode, or a\n" "pinned eBPF program.\n" "\n" "Where CLS_NAME refers to the section name containing the\n" "classifier (default \'%s\').\n" "\n" "Where UDS_FILE points to a unix domain socket file in order\n" "to hand off control of all created eBPF maps to an agent.\n" "\n" "ACTION_SPEC := ... look at individual actions\n" "NOTE: CLASSID is parsed as hexadecimal input.\n", bpf_prog_to_default_section(bpf_type)); } static void bpf_cbpf_cb(void *nl, const struct sock_filter *ops, int ops_len) { addattr16(nl, MAX_MSG, TCA_BPF_OPS_LEN, ops_len); addattr_l(nl, MAX_MSG, TCA_BPF_OPS, ops, ops_len * sizeof(struct sock_filter)); } static void bpf_ebpf_cb(void *nl, int fd, const char *annotation) { addattr32(nl, MAX_MSG, TCA_BPF_FD, fd); addattrstrz(nl, MAX_MSG, TCA_BPF_NAME, annotation); } static const struct bpf_cfg_ops bpf_cb_ops = { .cbpf_cb = bpf_cbpf_cb, .ebpf_cb = bpf_ebpf_cb, }; static int bpf_parse_opt(struct filter_util *qu, char *handle, int argc, char **argv, struct nlmsghdr *n) { const char *bpf_obj = NULL, *bpf_uds_name = NULL; struct tcmsg *t = NLMSG_DATA(n); unsigned int bpf_gen_flags = 0; unsigned int bpf_flags = 0; struct bpf_cfg_in cfg = {}; bool seen_run = false; bool skip_sw = false; struct rtattr *tail; int ret = 0; if (handle) { if (get_u32(&t->tcm_handle, handle, 0)) { fprintf(stderr, "Illegal \"handle\"\n"); return -1; } } if (argc == 0) return 0; tail = (struct rtattr *)(((void *)n) + NLMSG_ALIGN(n->nlmsg_len)); addattr_l(n, MAX_MSG, TCA_OPTIONS, NULL, 0); while (argc > 0) { if (matches(*argv, "run") == 0) { NEXT_ARG(); if (seen_run) duparg("run", *argv); opt_bpf: seen_run = true; cfg.type = bpf_type; cfg.argc = argc; cfg.argv = argv; if (bpf_parse_common(&cfg, &bpf_cb_ops) < 0) { fprintf(stderr, "Unable to parse bpf command line\n"); return -1; } argc = cfg.argc; argv = cfg.argv; bpf_obj = cfg.object; bpf_uds_name = cfg.uds; } else if (matches(*argv, "classid") == 0 || matches(*argv, "flowid") == 0) { unsigned int handle; NEXT_ARG(); if (get_tc_classid(&handle, *argv)) { fprintf(stderr, "Illegal \"classid\"\n"); return -1; } addattr32(n, MAX_MSG, TCA_BPF_CLASSID, handle); } else if (matches(*argv, "direct-action") == 0 || matches(*argv, "da") == 0) { bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT; } else if (matches(*argv, "skip_hw") == 0) { bpf_gen_flags |= TCA_CLS_FLAGS_SKIP_HW; } else if (matches(*argv, "skip_sw") == 0) { bpf_gen_flags |= TCA_CLS_FLAGS_SKIP_SW; skip_sw = true; } else if (matches(*argv, "action") == 0) { NEXT_ARG(); if (parse_action(&argc, &argv, TCA_BPF_ACT, n)) { fprintf(stderr, "Illegal \"action\"\n"); return -1; } continue; } else if (matches(*argv, "police") == 0) { NEXT_ARG(); if (parse_police(&argc, &argv, TCA_BPF_POLICE, n)) { fprintf(stderr, "Illegal \"police\"\n"); return -1; } continue; } else if (matches(*argv, "help") == 0) { explain(); return -1; } else { if (!seen_run) goto opt_bpf; fprintf(stderr, "What is \"%s\"?\n", *argv); explain(); return -1; } NEXT_ARG_FWD(); } if (skip_sw) cfg.ifindex = t->tcm_ifindex; if (bpf_load_common(&cfg, &bpf_cb_ops, n) < 0) { fprintf(stderr, "Unable to load program\n"); return -1; } if (bpf_gen_flags) addattr32(n, MAX_MSG, TCA_BPF_FLAGS_GEN, bpf_gen_flags); if (bpf_flags) addattr32(n, MAX_MSG, TCA_BPF_FLAGS, bpf_flags); tail->rta_len = (((void *)n) + n->nlmsg_len) - (void *)tail; if (bpf_uds_name) ret = bpf_send_map_fds(bpf_uds_name, bpf_obj); return ret; } static int bpf_print_opt(struct filter_util *qu, FILE *f, struct rtattr *opt, __u32 handle) { struct rtattr *tb[TCA_BPF_MAX + 1]; int dump_ok = 0; if (opt == NULL) return 0; parse_rtattr_nested(tb, TCA_BPF_MAX, opt); if (handle) print_0xhex(PRINT_ANY, "handle", "handle %#llx ", handle); if (tb[TCA_BPF_CLASSID]) { SPRINT_BUF(b1); print_string(PRINT_ANY, "flowid", "flowid %s ", sprint_tc_classid(rta_getattr_u32(tb[TCA_BPF_CLASSID]), b1)); } if (tb[TCA_BPF_NAME]) print_string(PRINT_ANY, "bpf_name", "%s ", rta_getattr_str(tb[TCA_BPF_NAME])); if (tb[TCA_BPF_FLAGS]) { unsigned int flags = rta_getattr_u32(tb[TCA_BPF_FLAGS]); if (flags & TCA_BPF_FLAG_ACT_DIRECT) print_bool(PRINT_ANY, "direct-action", "direct-action ", true); } if (tb[TCA_BPF_FLAGS_GEN]) { unsigned int flags = rta_getattr_u32(tb[TCA_BPF_FLAGS_GEN]); if (flags & TCA_CLS_FLAGS_SKIP_HW) print_bool(PRINT_ANY, "skip_hw", "skip_hw ", true); if (flags & TCA_CLS_FLAGS_SKIP_SW) print_bool(PRINT_ANY, "skip_sw", "skip_sw ", true); if (flags & TCA_CLS_FLAGS_IN_HW) print_bool(PRINT_ANY, "in_hw", "in_hw ", true); else if (flags & TCA_CLS_FLAGS_NOT_IN_HW) print_bool(PRINT_ANY, "not_in_hw", "not_in_hw ", true); } if (tb[TCA_BPF_OPS] && tb[TCA_BPF_OPS_LEN]) bpf_print_ops(tb[TCA_BPF_OPS], rta_getattr_u16(tb[TCA_BPF_OPS_LEN])); if (tb[TCA_BPF_ID]) dump_ok = bpf_dump_prog_info(f, rta_getattr_u32(tb[TCA_BPF_ID])); if (!dump_ok && tb[TCA_BPF_TAG]) { SPRINT_BUF(b); print_string(PRINT_ANY, "tag", "tag %s ", hexstring_n2a(RTA_DATA(tb[TCA_BPF_TAG]), RTA_PAYLOAD(tb[TCA_BPF_TAG]), b, sizeof(b))); } if (tb[TCA_BPF_POLICE]) { print_nl(); tc_print_police(f, tb[TCA_BPF_POLICE]); } if (tb[TCA_BPF_ACT]) tc_print_action(f, tb[TCA_BPF_ACT], 0); return 0; } struct filter_util bpf_filter_util = { .id = "bpf", .parse_fopt = bpf_parse_opt, .print_fopt = bpf_print_opt, };
gpl-2.0
rex-xxx/mt6572_x201
mediatek/kernel/drivers/conn_soc/drv_wlan/mt6582/wlan/mgmt/p2p_func.c
3
134695
#include "precomp.h" #ifdef __GNUC__ #pragma GCC diagnostic ignored "-Wformat" #endif APPEND_VAR_ATTRI_ENTRY_T txAssocRspAttributesTable[] = { { (P2P_ATTRI_HDR_LEN + P2P_ATTRI_MAX_LEN_STATUS) , NULL, p2pFuncAppendAttriStatusForAssocRsp } /* 0 */ // Status ,{ (P2P_ATTRI_HDR_LEN + P2P_ATTRI_MAX_LEN_EXT_LISTEN_TIMING), NULL, p2pFuncAppendAttriExtListenTiming } /* 8 */ }; APPEND_VAR_IE_ENTRY_T txProbeRspIETable[] = { { (ELEM_HDR_LEN + (RATE_NUM - ELEM_MAX_LEN_SUP_RATES)), NULL, bssGenerateExtSuppRate_IE } /* 50 */ ,{ (ELEM_HDR_LEN + ELEM_MAX_LEN_ERP), NULL, rlmRspGenerateErpIE } /* 42 */ ,{ (ELEM_HDR_LEN + ELEM_MAX_LEN_HT_CAP), NULL, rlmRspGenerateHtCapIE } /* 45 */ ,{ (ELEM_HDR_LEN + ELEM_MAX_LEN_HT_OP), NULL, rlmRspGenerateHtOpIE } /* 61 */ ,{ (ELEM_HDR_LEN + ELEM_MAX_LEN_RSN), NULL, rsnGenerateRSNIE } /* 48 */ ,{ (ELEM_HDR_LEN + ELEM_MAX_LEN_OBSS_SCAN), NULL, rlmRspGenerateObssScanIE } /* 74 */ ,{ (ELEM_HDR_LEN + ELEM_MAX_LEN_EXT_CAP), NULL, rlmRspGenerateExtCapIE } /* 127 */ ,{ (ELEM_HDR_LEN + ELEM_MAX_LEN_WPA), NULL, rsnGenerateWpaNoneIE } /* 221 */ ,{ (ELEM_HDR_LEN + ELEM_MAX_LEN_WMM_PARAM), NULL, mqmGenerateWmmParamIE } /* 221 */ }; /*----------------------------------------------------------------------------*/ /*! * @brief Function for requesting scan. There is an option to do ACTIVE or PASSIVE scan. * * @param eScanType - Specify the scan type of the scan request. It can be an ACTIVE/PASSIVE * Scan. * eChannelSet - Specify the prefered channel set. * A FULL scan would request a legacy full channel normal scan.(usually ACTIVE). * A P2P_SOCIAL scan would scan 1+6+11 channels.(usually ACTIVE) * A SPECIFIC scan would only 1/6/11 channels scan. (Passive Listen/Specific Search) * ucChannelNum - A specific channel number. (Only when channel is specified) * eBand - A specific band. (Only when channel is specified) * * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID p2pFuncRequestScan ( IN P_ADAPTER_T prAdapter, IN P_P2P_SCAN_REQ_INFO_T prScanReqInfo ) { P_MSG_SCN_SCAN_REQ prScanReq = (P_MSG_SCN_SCAN_REQ)NULL; DEBUGFUNC("p2pFuncRequestScan()"); do { ASSERT_BREAK((prAdapter != NULL) && (prScanReqInfo != NULL)); if (prScanReqInfo->eChannelSet == SCAN_CHANNEL_SPECIFIED) { ASSERT_BREAK(prScanReqInfo->ucNumChannelList > 0); DBGLOG(P2P, LOUD, ("P2P Scan Request Channel:%d\n", prScanReqInfo->arScanChannelList[0].ucChannelNum)); } prScanReq = (P_MSG_SCN_SCAN_REQ)cnmMemAlloc(prAdapter, RAM_TYPE_MSG, sizeof(MSG_SCN_SCAN_REQ)); if (!prScanReq) { ASSERT(0); // Can't trigger SCAN FSM break; } prScanReq->rMsgHdr.eMsgId = MID_P2P_SCN_SCAN_REQ; prScanReq->ucSeqNum = ++prScanReqInfo->ucSeqNumOfScnMsg; prScanReq->ucNetTypeIndex = (UINT_8)NETWORK_TYPE_P2P_INDEX; prScanReq->eScanType = prScanReqInfo->eScanType; prScanReq->eScanChannel = prScanReqInfo->eChannelSet; prScanReq->u2IELen = 0; /* Copy IE for Probe Request. */ kalMemCopy(prScanReq->aucIE, prScanReqInfo->aucIEBuf, prScanReqInfo->u4BufLength); prScanReq->u2IELen = (UINT_16)prScanReqInfo->u4BufLength; prScanReq->u2ChannelDwellTime = prScanReqInfo->u2PassiveDewellTime; switch (prScanReqInfo->eChannelSet) { case SCAN_CHANNEL_SPECIFIED: { UINT_32 u4Idx = 0; P_RF_CHANNEL_INFO_T prDomainInfo = (P_RF_CHANNEL_INFO_T)prScanReqInfo->arScanChannelList; if (prScanReqInfo->ucNumChannelList > MAXIMUM_OPERATION_CHANNEL_LIST) { prScanReqInfo->ucNumChannelList = MAXIMUM_OPERATION_CHANNEL_LIST; } for (u4Idx = 0; u4Idx < prScanReqInfo->ucNumChannelList; u4Idx++) { prScanReq->arChnlInfoList[u4Idx].ucChannelNum = prDomainInfo->ucChannelNum; prScanReq->arChnlInfoList[u4Idx].eBand = prDomainInfo->eBand; prDomainInfo++; } prScanReq->ucChannelListNum = prScanReqInfo->ucNumChannelList; } case SCAN_CHANNEL_FULL: case SCAN_CHANNEL_2G4: case SCAN_CHANNEL_P2P_SOCIAL: { UINT_8 aucP2pSsid[] = P2P_WILDCARD_SSID; COPY_SSID(prScanReq->aucSSID, prScanReq->ucSSIDLength, prScanReqInfo->rSsidStruct.aucSsid, prScanReqInfo->rSsidStruct.ucSsidLen); /* For compatible. */ if (EQUAL_SSID(aucP2pSsid, P2P_WILDCARD_SSID_LEN, prScanReq->aucSSID, prScanReq->ucSSIDLength)) { prScanReq->ucSSIDType = SCAN_REQ_SSID_P2P_WILDCARD; } else if (prScanReq->ucSSIDLength != 0) { prScanReq->ucSSIDType = SCAN_REQ_SSID_SPECIFIED; } } break; default: /* Currently there is no other scan channel set. */ ASSERT(FALSE); break; } mboxSendMsg(prAdapter, MBOX_ID_0, (P_MSG_HDR_T)prScanReq, MSG_SEND_METHOD_BUF); } while (FALSE); return; } /* p2pFuncRequestScan */ VOID p2pFuncCancelScan ( IN P_ADAPTER_T prAdapter, IN P_P2P_SCAN_REQ_INFO_T prScanInfo ) { P_MSG_SCN_SCAN_CANCEL prScanCancelMsg = (P_MSG_SCN_SCAN_CANCEL)NULL; do { ASSERT_BREAK((prAdapter != NULL) && (prScanInfo != NULL)); if (!prScanInfo->fgIsScanRequest) { break; } if (prScanInfo->ucSeqNumOfScnMsg) { /* There is a channel privilege on hand. */ DBGLOG(P2P, TRACE, ("P2P Cancel Scan\n")); prScanCancelMsg = (P_MSG_SCN_SCAN_CANCEL)cnmMemAlloc(prAdapter, RAM_TYPE_MSG, sizeof(MSG_SCN_SCAN_CANCEL)); if (!prScanCancelMsg) { /* Buffer not enough, can not cancel scan request. */ DBGLOG(P2P, TRACE, ("Buffer not enough, can not cancel scan.\n")); ASSERT(FALSE); break; } prScanCancelMsg->rMsgHdr.eMsgId = MID_P2P_SCN_SCAN_CANCEL; prScanCancelMsg->ucNetTypeIndex = NETWORK_TYPE_P2P_INDEX; prScanCancelMsg->ucSeqNum = prScanInfo->ucSeqNumOfScnMsg++; prScanCancelMsg->fgIsChannelExt = FALSE; prScanInfo->fgIsScanRequest = FALSE; mboxSendMsg(prAdapter, MBOX_ID_0, (P_MSG_HDR_T)prScanCancelMsg, MSG_SEND_METHOD_BUF); } } while (FALSE); return; } /* p2pFuncCancelScan */ VOID p2pFuncSwitchOPMode ( IN P_ADAPTER_T prAdapter, IN P_BSS_INFO_T prP2pBssInfo, IN ENUM_OP_MODE_T eOpMode, IN BOOLEAN fgSyncToFW ) { do { ASSERT_BREAK((prAdapter != NULL) && (prP2pBssInfo != NULL) && (eOpMode < OP_MODE_NUM)); if (prP2pBssInfo->eCurrentOPMode != eOpMode) { DBGLOG(P2P, TRACE, ("p2pFuncSwitchOPMode: Switch to from %d, to %d.\n", prP2pBssInfo->eCurrentOPMode, eOpMode)); switch (prP2pBssInfo->eCurrentOPMode) { case OP_MODE_ACCESS_POINT: p2pFuncDissolve(prAdapter, prP2pBssInfo, TRUE, REASON_CODE_DEAUTH_LEAVING_BSS); p2pFsmRunEventStopAP(prAdapter, NULL); break; default: break; } prP2pBssInfo->eIntendOPMode = eOpMode; prP2pBssInfo->eCurrentOPMode = eOpMode; switch (eOpMode) { case OP_MODE_INFRASTRUCTURE: DBGLOG(P2P, TRACE, ("p2pFuncSwitchOPMode: Switch to Client.\n")); case OP_MODE_ACCESS_POINT: // if (!IS_BSS_ACTIVE(prP2pBssInfo)) { // SET_NET_ACTIVE(prAdapter, NETWORK_TYPE_P2P_INDEX); // nicActivateNetwork(prAdapter, NETWORK_TYPE_P2P_INDEX); // } /* Change interface address. */ if (eOpMode == OP_MODE_ACCESS_POINT) { DBGLOG(P2P, TRACE, ("p2pFuncSwitchOPMode: Switch to AP.\n")); prP2pBssInfo->ucSSIDLen = 0; } COPY_MAC_ADDR(prP2pBssInfo->aucOwnMacAddr, prAdapter->rWifiVar.aucInterfaceAddress); COPY_MAC_ADDR(prP2pBssInfo->aucBSSID, prAdapter->rWifiVar.aucInterfaceAddress); break; case OP_MODE_P2P_DEVICE: { /* Change device address. */ DBGLOG(P2P, TRACE, ("p2pFuncSwitchOPMode: Switch back to P2P Device.\n")); // if (!IS_BSS_ACTIVE(prP2pBssInfo)) { // SET_NET_ACTIVE(prAdapter, NETWORK_TYPE_P2P_INDEX); // nicActivateNetwork(prAdapter, NETWORK_TYPE_P2P_INDEX); // } p2pChangeMediaState(prAdapter, PARAM_MEDIA_STATE_DISCONNECTED); COPY_MAC_ADDR(prP2pBssInfo->aucOwnMacAddr, prAdapter->rWifiVar.aucDeviceAddress); COPY_MAC_ADDR(prP2pBssInfo->aucBSSID, prAdapter->rWifiVar.aucDeviceAddress); } break; default: // if (IS_BSS_ACTIVE(prP2pBssInfo)) { // UNSET_NET_ACTIVE(prAdapter, NETWORK_TYPE_P2P_INDEX); // nicDeactivateNetwork(prAdapter, NETWORK_TYPE_P2P_INDEX); // } ASSERT(FALSE); break; } if (1) { P2P_DISCONNECT_INFO rP2PDisInfo; rP2PDisInfo.ucRole = 2; wlanSendSetQueryCmd(prAdapter, CMD_ID_P2P_ABORT, TRUE, FALSE, FALSE, NULL, NULL, sizeof(P2P_DISCONNECT_INFO), (PUINT_8)&rP2PDisInfo, NULL, 0); } DBGLOG(P2P, TRACE, ("The device address is changed to " MACSTR " \n", MAC2STR(prP2pBssInfo->aucOwnMacAddr))); DBGLOG(P2P, TRACE, ("The BSSID is changed to " MACSTR " \n", MAC2STR(prP2pBssInfo->aucBSSID))); /* Update BSS INFO to FW. */ if ((fgSyncToFW) && (eOpMode != OP_MODE_ACCESS_POINT)) { nicUpdateBss(prAdapter, NETWORK_TYPE_P2P_INDEX); } } } while (FALSE); return; } /* p2pFuncSwitchOPMode */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will start a P2P Group Owner and send Beacon Frames. * * @param (none) * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID p2pFuncStartGO ( IN P_ADAPTER_T prAdapter, IN P_BSS_INFO_T prBssInfo, IN PUINT_8 pucSsidBuf, IN UINT_8 ucSsidLen, IN UINT_8 ucChannelNum, IN ENUM_BAND_T eBand, IN ENUM_CHNL_EXT_T eSco, IN BOOLEAN fgIsPureAP ) { do { ASSERT_BREAK((prAdapter != NULL) && (prBssInfo != NULL)); ASSERT(prBssInfo->eCurrentOPMode == OP_MODE_ACCESS_POINT); DBGLOG(P2P, TRACE, ("p2pFuncStartGO:\n")); /* AP mode started. */ p2pFuncSwitchOPMode(prAdapter, prBssInfo, prBssInfo->eIntendOPMode, FALSE); prBssInfo->eIntendOPMode = OP_MODE_NUM; //4 <1.1> Assign SSID COPY_SSID(prBssInfo->aucSSID, prBssInfo->ucSSIDLen, pucSsidBuf, ucSsidLen); DBGLOG(P2P, TRACE, ("GO SSID:%s \n", prBssInfo->aucSSID)); //4 <1.2> Clear current AP's STA_RECORD_T and current AID prBssInfo->prStaRecOfAP = (P_STA_RECORD_T)NULL; prBssInfo->u2AssocId = 0; //4 <1.3> Setup Channel, Band and Phy Attributes prBssInfo->ucPrimaryChannel = ucChannelNum; prBssInfo->eBand = eBand; prBssInfo->eBssSCO = eSco; DBGLOG(P2P, TRACE, ("GO Channel:%d \n", ucChannelNum)); if (prBssInfo->eBand == BAND_5G) { prBssInfo->ucPhyTypeSet = (prAdapter->rWifiVar.ucAvailablePhyTypeSet & PHY_TYPE_SET_802_11AN); /* Depend on eBand */ prBssInfo->ucConfigAdHocAPMode = AP_MODE_11A; /* Depend on eCurrentOPMode and ucPhyTypeSet */ } else if (fgIsPureAP) { prBssInfo->ucPhyTypeSet = (prAdapter->rWifiVar.ucAvailablePhyTypeSet & PHY_TYPE_SET_802_11BGN); /* Depend on eBand */ prBssInfo->ucConfigAdHocAPMode = AP_MODE_MIXED_11BG; /* Depend on eCurrentOPMode and ucPhyTypeSet */ } else { prBssInfo->ucPhyTypeSet = (prAdapter->rWifiVar.ucAvailablePhyTypeSet & PHY_TYPE_SET_802_11GN); /* Depend on eBand */ prBssInfo->ucConfigAdHocAPMode = AP_MODE_11G_P2P; /* Depend on eCurrentOPMode and ucPhyTypeSet */ } prBssInfo->ucNonHTBasicPhyType = (UINT_8) rNonHTApModeAttributes[prBssInfo->ucConfigAdHocAPMode].ePhyTypeIndex; prBssInfo->u2BSSBasicRateSet = rNonHTApModeAttributes[prBssInfo->ucConfigAdHocAPMode].u2BSSBasicRateSet; prBssInfo->u2OperationalRateSet = rNonHTPhyAttributes[prBssInfo->ucNonHTBasicPhyType].u2SupportedRateSet; if (prBssInfo->ucAllSupportedRatesLen == 0) { rateGetDataRatesFromRateSet(prBssInfo->u2OperationalRateSet, prBssInfo->u2BSSBasicRateSet, prBssInfo->aucAllSupportedRates, &prBssInfo->ucAllSupportedRatesLen); } //4 <1.5> Setup MIB for current BSS prBssInfo->u2ATIMWindow = 0; prBssInfo->ucBeaconTimeoutCount = 0; //3 <2> Update BSS_INFO_T common part #if CFG_SUPPORT_AAA if (!fgIsPureAP) { prBssInfo->fgIsProtection = TRUE; /* Always enable protection at P2P GO */ kalP2PSetCipher(prAdapter->prGlueInfo, IW_AUTH_CIPHER_CCMP); } else { if (kalP2PGetCipher(prAdapter->prGlueInfo)) prBssInfo->fgIsProtection = TRUE; } // 20120106 frog: I want separate OP_Mode & Beacon TX Function. //p2pFuncSwitchOPMode(prAdapter, prBssInfo, OP_MODE_ACCESS_POINT, FALSE); bssInitForAP(prAdapter, prBssInfo, FALSE); nicQmUpdateWmmParms(prAdapter, NETWORK_TYPE_P2P_INDEX); #endif /* CFG_SUPPORT_AAA */ //3 <3> Set MAC HW //4 <3.1> Setup channel and bandwidth rlmBssInitForAPandIbss(prAdapter, prBssInfo); //4 <3.2> Reset HW TSF Update Mode and Beacon Mode nicUpdateBss(prAdapter, NETWORK_TYPE_P2P_INDEX); //4 <3.3> Update Beacon again for network phy type confirmed. bssUpdateBeaconContent(prAdapter, NETWORK_TYPE_P2P_INDEX); //4 <3.4> Setup BSSID nicPmIndicateBssCreated(prAdapter, NETWORK_TYPE_P2P_INDEX); } while (FALSE); return; } /* p2pFuncStartGO() */ /*----------------------------------------------------------------------------*/ /*! * \brief This function is to inform CNM that channel privilege * has been released * * \param[in] prAdapter Pointer of ADAPTER_T * * \return none */ /*----------------------------------------------------------------------------*/ VOID p2pFuncReleaseCh ( IN P_ADAPTER_T prAdapter, IN P_P2P_CHNL_REQ_INFO_T prChnlReqInfo ) { P_MSG_CH_ABORT_T prMsgChRelease = (P_MSG_CH_ABORT_T)NULL; DEBUGFUNC("p2pFuncReleaseCh()"); do { ASSERT_BREAK((prAdapter != NULL) && (prChnlReqInfo != NULL)); if (!prChnlReqInfo->fgIsChannelRequested) { break; } else { DBGLOG(P2P, TRACE, ("P2P Release Channel\n")); prChnlReqInfo->fgIsChannelRequested = FALSE; } /* 1. return channel privilege to CNM immediately */ prMsgChRelease = (P_MSG_CH_ABORT_T)cnmMemAlloc(prAdapter, RAM_TYPE_MSG, sizeof(MSG_CH_ABORT_T)); if (!prMsgChRelease) { ASSERT(0); // Can't release Channel to CNM break; } prMsgChRelease->rMsgHdr.eMsgId = MID_MNY_CNM_CH_ABORT; prMsgChRelease->ucNetTypeIndex = NETWORK_TYPE_P2P_INDEX; prMsgChRelease->ucTokenID = prChnlReqInfo->ucSeqNumOfChReq++; mboxSendMsg(prAdapter, MBOX_ID_0, (P_MSG_HDR_T) prMsgChRelease, MSG_SEND_METHOD_BUF); } while (FALSE); return; } /* p2pFuncReleaseCh */ /*----------------------------------------------------------------------------*/ /*! * @brief Process of CHANNEL_REQ_JOIN Initial. Enter CHANNEL_REQ_JOIN State. * * @param (none) * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID p2pFuncAcquireCh ( IN P_ADAPTER_T prAdapter, IN P_P2P_CHNL_REQ_INFO_T prChnlReqInfo ) { P_MSG_CH_REQ_T prMsgChReq = (P_MSG_CH_REQ_T)NULL; do { ASSERT_BREAK((prAdapter != NULL) && (prChnlReqInfo != NULL)); p2pFuncReleaseCh(prAdapter, prChnlReqInfo); /* send message to CNM for acquiring channel */ prMsgChReq = (P_MSG_CH_REQ_T)cnmMemAlloc(prAdapter, RAM_TYPE_MSG, sizeof(MSG_CH_REQ_T)); if (!prMsgChReq) { ASSERT(0); // Can't indicate CNM for channel acquiring break; } prMsgChReq->rMsgHdr.eMsgId = MID_MNY_CNM_CH_REQ; prMsgChReq->ucNetTypeIndex = NETWORK_TYPE_P2P_INDEX; prMsgChReq->ucTokenID = ++prChnlReqInfo->ucSeqNumOfChReq; prMsgChReq->eReqType = CH_REQ_TYPE_JOIN; prMsgChReq->u4MaxInterval = prChnlReqInfo->u4MaxInterval; prMsgChReq->ucPrimaryChannel = prChnlReqInfo->ucReqChnlNum; prMsgChReq->eRfSco = prChnlReqInfo->eChnlSco; prMsgChReq->eRfBand = prChnlReqInfo->eBand; kalMemZero(prMsgChReq->aucBSSID, MAC_ADDR_LEN); /* Channel request join BSSID. */ mboxSendMsg(prAdapter, MBOX_ID_0, (P_MSG_HDR_T) prMsgChReq, MSG_SEND_METHOD_BUF); prChnlReqInfo->fgIsChannelRequested = TRUE; } while (FALSE); return; } /* p2pFuncAcquireCh */ #if 0 WLAN_STATUS p2pFuncBeaconUpdate( IN P_ADAPTER_T prAdapter, IN PUINT_8 pucBcnHdr, IN UINT_32 u4HdrLen, IN PUINT_8 pucBcnBody, IN UINT_32 u4BodyLen, IN UINT_32 u4DtimPeriod, IN UINT_32 u4BcnInterval) { WLAN_STATUS rResultStatus = WLAN_STATUS_INVALID_DATA; P_WLAN_BEACON_FRAME_T prBcnFrame = (P_WLAN_BEACON_FRAME_T)NULL; P_BSS_INFO_T prP2pBssInfo = (P_BSS_INFO_T)NULL; P_MSDU_INFO_T prBcnMsduInfo = (P_MSDU_INFO_T)NULL; PUINT_8 pucTIMBody = (PUINT_8)NULL; UINT_16 u2FrameLength = 0, UINT_16 u2OldBodyLen = 0; UINT_8 aucIEBuf[MAX_IE_LENGTH]; do { ASSERT_BREAK(prAdapter != NULL); prP2pBssInfo = &(prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_P2P_INDEX]); prBcnMsduInfo = prP2pBssInfo->prBeacon ASSERT_BREAK(prBcnMsduInfo != NULL); /* TODO: Find TIM IE pointer. */ prBcnFrame = prBcnMsduInfo->prPacket; ASSERT_BREAK(prBcnFrame != NULL); do { /* Ori header. */ UINT_16 u2IELength = 0, u2Offset = 0; PUINT_8 pucIEBuf = prBcnFrame->aucInfoElem; u2IELength = prBcnMsduInfo->u2FrameLength - prBcnMsduInfo->ucMacHeaderLength; IE_FOR_EACH(pucIEBuf, u2IELength, u2Offset) { if ((IE_ID(pucIEBuf) == ELEM_ID_TIM) || ((IE_ID(pucIEBuf) > ELEM_ID_IBSS_PARAM_SET)) { pucTIMBody = pucIEBuf; break } u2FrameLength += IE_SIZE(pucIEBuf); } if (pucTIMBody == NULL) { pucTIMBody = pucIEBuf; } /* Body not change. */ u2OldBodyLen = (UINT_16)((UINT_32)pucTIMBody - (UINT_32)prBcnFrame->aucInfoElem); // Move body. kalMemCmp(aucIEBuf, pucTIMBody, u2OldBodyLen); } while (FALSE); if (pucBcnHdr) { kalMemCopy(prBcnMsduInfo->prPacket, pucBcnHdr, u4HdrLen); pucTIMBody = (PUINT_8)((UINT_32)prBcnMsduInfo->prPacket + u4HdrLen); prBcnMsduInfo->ucMacHeaderLength = (WLAN_MAC_MGMT_HEADER_LEN + (TIMESTAMP_FIELD_LEN + BEACON_INTERVAL_FIELD_LEN + CAP_INFO_FIELD_LEN)); u2FrameLength = u4HdrLen; /* Header + Partial Body. */ } else { /* Header not change. */ u2FrameLength += prBcnMsduInfo->ucMacHeaderLength; } if (pucBcnBody) { kalMemCopy(pucTIMBody, pucBcnBody, u4BodyLen); u2FrameLength += (UINT_16)u4BodyLen; } else { kalMemCopy(pucTIMBody, aucIEBuf, u2OldBodyLen); u2FrameLength += u2OldBodyLen; } /* Frame Length */ prBcnMsduInfo->u2FrameLength = u2FrameLength; prBcnMsduInfo->fgIs802_11 = TRUE; prBcnMsduInfo->ucNetworkType = NETWORK_TYPE_P2P_INDEX; prP2pBssInfo->u2BeaconInterval = (UINT_16)u4BcnInterval; prP2pBssInfo->ucDTIMPeriod = (UINT_8)u4DtimPeriod; prP2pBssInfo->u2CapInfo = prBcnFrame->u2CapInfo; prBcnMsduInfo->ucPacketType = 3; rResultStatus = nicUpdateBeaconIETemplate(prAdapter, IE_UPD_METHOD_UPDATE_ALL, NETWORK_TYPE_P2P_INDEX, prP2pBssInfo->u2CapInfo, (PUINT_8)prBcnFrame->aucInfoElem, prBcnMsduInfo->u2FrameLength - OFFSET_OF(WLAN_BEACON_FRAME_T, aucInfoElem)); if (prP2pBssInfo->eCurrentOPMode == OP_MODE_ACCESS_POINT) { /* AP is created, Beacon Update. */ nicPmIndicateBssAbort(prAdapter, NETWORK_TYPE_P2P_INDEX); nicPmIndicateBssCreated(prAdapter, NETWORK_TYPE_P2P_INDEX); } } while (FALSE); return rResultStatus; } /* p2pFuncBeaconUpdate */ #else WLAN_STATUS p2pFuncBeaconUpdate ( IN P_ADAPTER_T prAdapter, IN P_BSS_INFO_T prP2pBssInfo, IN P_P2P_BEACON_UPDATE_INFO_T prBcnUpdateInfo, IN PUINT_8 pucNewBcnHdr, IN UINT_32 u4NewHdrLen, IN PUINT_8 pucNewBcnBody, IN UINT_32 u4NewBodyLen ) { WLAN_STATUS rWlanStatus = WLAN_STATUS_SUCCESS; P_WLAN_BEACON_FRAME_T prBcnFrame = (P_WLAN_BEACON_FRAME_T)NULL; P_MSDU_INFO_T prBcnMsduInfo = (P_MSDU_INFO_T)NULL; PUINT_8 pucIEBuf = (PUINT_8)NULL; UINT_8 aucIEBuf[MAX_IE_LENGTH]; do { ASSERT_BREAK((prAdapter != NULL) && (prP2pBssInfo != NULL) && (prBcnUpdateInfo != NULL)); prBcnMsduInfo = prP2pBssInfo->prBeacon; #if DBG if (prBcnUpdateInfo->pucBcnHdr != NULL) { ASSERT((UINT_32)prBcnUpdateInfo->pucBcnHdr == ((UINT_32)prBcnMsduInfo->prPacket + MAC_TX_RESERVED_FIELD)); } if (prBcnUpdateInfo->pucBcnBody != NULL) { ASSERT((UINT_32)prBcnUpdateInfo->pucBcnBody == ((UINT_32)prBcnUpdateInfo->pucBcnHdr + (UINT_32)prBcnUpdateInfo->u4BcnHdrLen)); } #endif prBcnFrame = (P_WLAN_BEACON_FRAME_T)((UINT_32)prBcnMsduInfo->prPacket + MAC_TX_RESERVED_FIELD); if (!pucNewBcnBody) { /* Old body. */ pucNewBcnBody = prBcnUpdateInfo->pucBcnBody; ASSERT(u4NewBodyLen == 0); u4NewBodyLen = prBcnUpdateInfo->u4BcnBodyLen; } else { prBcnUpdateInfo->u4BcnBodyLen = u4NewBodyLen; } /* Temp buffer body part. */ kalMemCopy(aucIEBuf, pucNewBcnBody, u4NewBodyLen); if (pucNewBcnHdr) { kalMemCopy(prBcnFrame, pucNewBcnHdr, u4NewHdrLen); prBcnUpdateInfo->pucBcnHdr = (PUINT_8)prBcnFrame; prBcnUpdateInfo->u4BcnHdrLen = u4NewHdrLen; } pucIEBuf = (PUINT_8)((UINT_32)prBcnUpdateInfo->pucBcnHdr + (UINT_32)prBcnUpdateInfo->u4BcnHdrLen); kalMemCopy(pucIEBuf, aucIEBuf, u4NewBodyLen); prBcnUpdateInfo->pucBcnBody = pucIEBuf; /* Frame Length */ prBcnMsduInfo->u2FrameLength = (UINT_16)(prBcnUpdateInfo->u4BcnHdrLen + prBcnUpdateInfo->u4BcnBodyLen); prBcnMsduInfo->ucPacketType = 3; prBcnMsduInfo->fgIs802_11 = TRUE; prBcnMsduInfo->ucNetworkType = NETWORK_TYPE_P2P_INDEX; /* Update BSS INFO related information. */ COPY_MAC_ADDR(prP2pBssInfo->aucOwnMacAddr, prBcnFrame->aucSrcAddr); COPY_MAC_ADDR(prP2pBssInfo->aucBSSID, prBcnFrame->aucBSSID); prP2pBssInfo->u2CapInfo = prBcnFrame->u2CapInfo; p2pFuncParseBeaconContent(prAdapter, prP2pBssInfo, (PUINT_8)prBcnFrame->aucInfoElem, (prBcnMsduInfo->u2FrameLength - OFFSET_OF(WLAN_BEACON_FRAME_T, aucInfoElem))); #if 1 //bssUpdateBeaconContent(prAdapter, NETWORK_TYPE_P2P_INDEX); #else nicUpdateBeaconIETemplate(prAdapter, IE_UPD_METHOD_UPDATE_ALL, NETWORK_TYPE_P2P_INDEX, prBcnFrame->u2CapInfo, (PUINT_8)prBcnFrame->aucInfoElem, (prBcnMsduInfo->u2FrameLength - OFFSET_OF(WLAN_BEACON_FRAME_T, aucInfoElem))); #endif } while (FALSE); return rWlanStatus; } /* p2pFuncBeaconUpdate */ #endif // TODO: We do not apply IE in deauth frame set from upper layer now. WLAN_STATUS p2pFuncDeauth ( IN P_ADAPTER_T prAdapter, IN PUINT_8 pucPeerMacAddr, IN UINT_16 u2ReasonCode, IN PUINT_8 pucIEBuf, IN UINT_16 u2IELen, IN BOOLEAN fgSendDeauth ) { WLAN_STATUS rWlanStatus = WLAN_STATUS_FAILURE; P_STA_RECORD_T prCliStaRec = (P_STA_RECORD_T)NULL; P_BSS_INFO_T prP2pBssInfo = (P_BSS_INFO_T)NULL; BOOLEAN fgIsStaFound = FALSE; do { ASSERT_BREAK((prAdapter != NULL) && (pucPeerMacAddr != NULL)); prP2pBssInfo = &(prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_P2P_INDEX]); prCliStaRec = cnmGetStaRecByAddress(prAdapter, NETWORK_TYPE_P2P_INDEX, pucPeerMacAddr); switch (prP2pBssInfo->eCurrentOPMode) { case OP_MODE_ACCESS_POINT: { P_LINK_T prStaRecOfClientList = (P_LINK_T)NULL; P_LINK_ENTRY_T prLinkEntry = (P_LINK_ENTRY_T)NULL; prStaRecOfClientList = &(prP2pBssInfo->rStaRecOfClientList); LINK_FOR_EACH(prLinkEntry, prStaRecOfClientList) { if ((UINT_32)prCliStaRec == (UINT_32)prLinkEntry) { LINK_REMOVE_KNOWN_ENTRY(prStaRecOfClientList, &prCliStaRec->rLinkEntry); fgIsStaFound = TRUE; break; } } } break; case OP_MODE_INFRASTRUCTURE: ASSERT(prCliStaRec == prP2pBssInfo->prStaRecOfAP); if (prCliStaRec != prP2pBssInfo->prStaRecOfAP) { break; } prP2pBssInfo->prStaRecOfAP = NULL; fgIsStaFound = TRUE; break; default: break; } if (fgIsStaFound) { p2pFuncDisconnect(prAdapter, prCliStaRec, fgSendDeauth, u2ReasonCode); } rWlanStatus = WLAN_STATUS_SUCCESS; } while (FALSE); return rWlanStatus; } /* p2pFuncDeauth */ // TODO: We do not apply IE in disassoc frame set from upper layer now. WLAN_STATUS p2pFuncDisassoc ( IN P_ADAPTER_T prAdapter, IN PUINT_8 pucPeerMacAddr, IN UINT_16 u2ReasonCode, IN PUINT_8 pucIEBuf, IN UINT_16 u2IELen, IN BOOLEAN fgSendDisassoc ) { WLAN_STATUS rWlanStatus = WLAN_STATUS_FAILURE; P_STA_RECORD_T prCliStaRec = (P_STA_RECORD_T)NULL; P_BSS_INFO_T prP2pBssInfo = (P_BSS_INFO_T)NULL; BOOLEAN fgIsStaFound = FALSE; do { ASSERT_BREAK((prAdapter != NULL) && (pucPeerMacAddr != NULL)); prP2pBssInfo = &(prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_P2P_INDEX]); prCliStaRec = cnmGetStaRecByAddress(prAdapter, NETWORK_TYPE_P2P_INDEX, pucPeerMacAddr); switch (prP2pBssInfo->eCurrentOPMode) { case OP_MODE_ACCESS_POINT: { P_LINK_T prStaRecOfClientList = (P_LINK_T)NULL; P_LINK_ENTRY_T prLinkEntry = (P_LINK_ENTRY_T)NULL; prStaRecOfClientList = &(prP2pBssInfo->rStaRecOfClientList); LINK_FOR_EACH(prLinkEntry, prStaRecOfClientList) { if ((UINT_32)prCliStaRec == (UINT_32)prLinkEntry) { LINK_REMOVE_KNOWN_ENTRY(prStaRecOfClientList, &prCliStaRec->rLinkEntry); fgIsStaFound = TRUE; //p2pFuncDisconnect(prAdapter, prCliStaRec, fgSendDisassoc, u2ReasonCode); break; } } } break; case OP_MODE_INFRASTRUCTURE: ASSERT(prCliStaRec == prP2pBssInfo->prStaRecOfAP); if (prCliStaRec != prP2pBssInfo->prStaRecOfAP) { break; } //p2pFuncDisconnect(prAdapter, prCliStaRec, fgSendDisassoc, u2ReasonCode); prP2pBssInfo->prStaRecOfAP = NULL; fgIsStaFound = TRUE; break; default: break; } if (fgIsStaFound) { p2pFuncDisconnect(prAdapter, prCliStaRec, fgSendDisassoc, u2ReasonCode); //20120830 moved into p2pFuncDisconnect(). //cnmStaRecFree(prAdapter, prCliStaRec, TRUE); } rWlanStatus = WLAN_STATUS_SUCCESS; } while (FALSE); return rWlanStatus; } /* p2pFuncDisassoc */ /*----------------------------------------------------------------------------*/ /*! * @brief This function is called to dissolve from group or one group. (Would not change P2P FSM.) * 1. GC: Disconnect from AP. (Send Deauth) * 2. GO: Disconnect all STA * * @param[in] prAdapter Pointer to the adapter structure. * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID p2pFuncDissolve ( IN P_ADAPTER_T prAdapter, IN P_BSS_INFO_T prP2pBssInfo, IN BOOLEAN fgSendDeauth, IN UINT_16 u2ReasonCode ) { DEBUGFUNC("p2pFuncDissolve()"); do { ASSERT_BREAK((prAdapter != NULL) && (prP2pBssInfo != NULL)); switch (prP2pBssInfo->eCurrentOPMode) { case OP_MODE_INFRASTRUCTURE: /* Reset station record status. */ if (prP2pBssInfo->prStaRecOfAP) { kalP2PGCIndicateConnectionStatus(prAdapter->prGlueInfo, NULL, NULL, 0, REASON_CODE_DEAUTH_LEAVING_BSS); // 2012/02/14 frog: After formation before join group, prStaRecOfAP is NULL. p2pFuncDisconnect(prAdapter, prP2pBssInfo->prStaRecOfAP, fgSendDeauth, u2ReasonCode); } /* Fix possible KE when RX Beacon & call nicPmIndicateBssConnected(). hit prStaRecOfAP == NULL. */ p2pChangeMediaState(prAdapter, PARAM_MEDIA_STATE_DISCONNECTED); prP2pBssInfo->prStaRecOfAP = NULL; break; case OP_MODE_ACCESS_POINT: /* Under AP mode, we would net send deauthentication frame to each STA. * We only stop the Beacon & let all stations timeout. */ { P_LINK_T prStaRecOfClientList = (P_LINK_T)NULL; /* Send deauth. */ authSendDeauthFrame(prAdapter, NULL, (P_SW_RFB_T)NULL, u2ReasonCode, (PFN_TX_DONE_HANDLER)NULL); prStaRecOfClientList = &prP2pBssInfo->rStaRecOfClientList; while (!LINK_IS_EMPTY(prStaRecOfClientList)) { P_STA_RECORD_T prCurrStaRec; LINK_REMOVE_HEAD(prStaRecOfClientList, prCurrStaRec, P_STA_RECORD_T); /* Indicate to Host. */ //kalP2PGOStationUpdate(prAdapter->prGlueInfo, prCurrStaRec, FALSE); p2pFuncDisconnect(prAdapter, prCurrStaRec, TRUE, u2ReasonCode); } } break; default: return; // 20110420 -- alreay in Device Mode. } /* Make the deauth frame send to FW ASAP. */ wlanAcquirePowerControl(prAdapter); wlanProcessCommandQueue(prAdapter, &prAdapter->prGlueInfo->rCmdQueue); wlanReleasePowerControl(prAdapter); kalMdelay(100); /* Change Connection Status. */ p2pChangeMediaState(prAdapter, PARAM_MEDIA_STATE_DISCONNECTED); } while (FALSE); return; } /* p2pFuncDissolve */ /*----------------------------------------------------------------------------*/ /*! * @brief This function is called to dissolve from group or one group. (Would not change P2P FSM.) * 1. GC: Disconnect from AP. (Send Deauth) * 2. GO: Disconnect all STA * * @param[in] prAdapter Pointer to the adapter structure. * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID p2pFuncDisconnect ( IN P_ADAPTER_T prAdapter, IN P_STA_RECORD_T prStaRec, IN BOOLEAN fgSendDeauth, IN UINT_16 u2ReasonCode ) { P_BSS_INFO_T prP2pBssInfo = (P_BSS_INFO_T)NULL; ENUM_PARAM_MEDIA_STATE_T eOriMediaStatus; DBGLOG(P2P, TRACE, ("p2pFuncDisconnect()")); do { ASSERT_BREAK((prAdapter != NULL) && (prStaRec != NULL)); prP2pBssInfo = &(prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_P2P_INDEX]); eOriMediaStatus = prP2pBssInfo->eConnectionState; /* Indicate disconnect. */ // TODO: // kalP2PGOStationUpdate // kalP2PGCIndicateConnectionStatus //p2pIndicationOfMediaStateToHost(prAdapter, PARAM_MEDIA_STATE_DISCONNECTED, prStaRec->aucMacAddr); if (prP2pBssInfo->eCurrentOPMode == OP_MODE_ACCESS_POINT) { kalP2PGOStationUpdate(prAdapter->prGlueInfo, prStaRec, FALSE); } if (fgSendDeauth) { /* Send deauth. */ authSendDeauthFrame(prAdapter, prStaRec, (P_SW_RFB_T)NULL, u2ReasonCode, (PFN_TX_DONE_HANDLER)p2pFsmRunEventDeauthTxDone); /* Change station state. */ cnmStaRecChangeState(prAdapter, prStaRec, STA_STATE_1); /* Reset Station Record Status. */ p2pFuncResetStaRecStatus(prAdapter, prStaRec); } else { /* Change station state. */ cnmStaRecChangeState(prAdapter, prStaRec, STA_STATE_1); /* Reset Station Record Status. */ p2pFuncResetStaRecStatus(prAdapter, prStaRec); cnmStaRecFree(prAdapter, prStaRec, TRUE); if ((prP2pBssInfo->eCurrentOPMode != OP_MODE_ACCESS_POINT) || (prP2pBssInfo->rStaRecOfClientList.u4NumElem == 0)) { DBGLOG(P2P, TRACE, ("No More Client, Media Status DISCONNECTED\n")); p2pChangeMediaState(prAdapter, PARAM_MEDIA_STATE_DISCONNECTED); } if (eOriMediaStatus != prP2pBssInfo->eConnectionState) { /* Update Disconnected state to FW. */ nicUpdateBss(prAdapter, NETWORK_TYPE_P2P_INDEX); } } if (prP2pBssInfo->eCurrentOPMode != OP_MODE_ACCESS_POINT) { /* GO: It would stop Beacon TX. GC: Stop all BSS related PS function. */ nicPmIndicateBssAbort(prAdapter, NETWORK_TYPE_P2P_INDEX); /* Reset RLM related field of BSSINFO. */ rlmBssAborted(prAdapter, prP2pBssInfo); } } while (FALSE); return; } /* p2pFuncDisconnect */ WLAN_STATUS p2pFuncTxMgmtFrame ( IN P_ADAPTER_T prAdapter, IN P_P2P_MGMT_TX_REQ_INFO_T prMgmtTxReqInfo, IN P_MSDU_INFO_T prMgmtTxMsdu, IN UINT_64 u8Cookie ) { WLAN_STATUS rWlanStatus = WLAN_STATUS_SUCCESS; P_MSDU_INFO_T prTxMsduInfo = (P_MSDU_INFO_T)NULL; P_WLAN_MAC_HEADER_T prWlanHdr = (P_WLAN_MAC_HEADER_T)NULL; P_STA_RECORD_T prStaRec = (P_STA_RECORD_T)NULL; do { ASSERT_BREAK((prAdapter != NULL) && (prMgmtTxReqInfo != NULL)); if (prMgmtTxReqInfo->fgIsMgmtTxRequested) { // 1. prMgmtTxReqInfo->prMgmtTxMsdu != NULL /* Packet on driver, not done yet, drop it. */ if ((prTxMsduInfo = prMgmtTxReqInfo->prMgmtTxMsdu) != NULL) { kalP2PIndicateMgmtTxStatus(prAdapter->prGlueInfo, prMgmtTxReqInfo->u8Cookie, FALSE, prTxMsduInfo->prPacket, (UINT_32)prTxMsduInfo->u2FrameLength); // Leave it to TX Done handler. //cnmMgtPktFree(prAdapter, prTxMsduInfo); prMgmtTxReqInfo->prMgmtTxMsdu = NULL; } // 2. prMgmtTxReqInfo->prMgmtTxMsdu == NULL /* Packet transmitted, wait tx done. (cookie issue) */ // 20120105 frog - use another u8cookie to store this value. } ASSERT(prMgmtTxReqInfo->prMgmtTxMsdu == NULL); prWlanHdr = (P_WLAN_MAC_HEADER_T)((UINT_32)prMgmtTxMsdu->prPacket + MAC_TX_RESERVED_FIELD); prStaRec = cnmGetStaRecByAddress(prAdapter, NETWORK_TYPE_P2P_INDEX, prWlanHdr->aucAddr1); prMgmtTxMsdu->ucNetworkType = (UINT_8)NETWORK_TYPE_P2P_INDEX; switch (prWlanHdr->u2FrameCtrl & MASK_FRAME_TYPE) { case MAC_FRAME_PROBE_RSP: DBGLOG(P2P, TRACE, ("p2pFuncTxMgmtFrame: TX MAC_FRAME_PROBE_RSP\n")); prMgmtTxMsdu = p2pFuncProcessP2pProbeRsp(prAdapter, prMgmtTxMsdu); break; default: break; } prMgmtTxReqInfo->u8Cookie = u8Cookie; prMgmtTxReqInfo->prMgmtTxMsdu = prMgmtTxMsdu; prMgmtTxReqInfo->fgIsMgmtTxRequested = TRUE; prMgmtTxMsdu->eSrc = TX_PACKET_MGMT; prMgmtTxMsdu->ucPacketType = HIF_TX_PACKET_TYPE_MGMT; prMgmtTxMsdu->ucStaRecIndex = (prStaRec != NULL)?(prStaRec->ucIndex):(0xFF); if (prStaRec != NULL) { DBGLOG(P2P, TRACE, ("Mgmt with station record: "MACSTR" .\n", MAC2STR(prStaRec->aucMacAddr))); } prMgmtTxMsdu->ucMacHeaderLength = WLAN_MAC_MGMT_HEADER_LEN; // TODO: undcertain. prMgmtTxMsdu->fgIs802_1x = FALSE; prMgmtTxMsdu->fgIs802_11 = TRUE; prMgmtTxMsdu->ucTxSeqNum = nicIncreaseTxSeqNum(prAdapter); prMgmtTxMsdu->pfTxDoneHandler = p2pFsmRunEventMgmtFrameTxDone; prMgmtTxMsdu->fgIsBasicRate = TRUE; DBGLOG(P2P, TRACE, ("Mgmt seq NO. %d .\n", prMgmtTxMsdu->ucTxSeqNum)); nicTxEnqueueMsdu(prAdapter, prMgmtTxMsdu); } while (FALSE); return rWlanStatus; } /* p2pFuncTxMgmtFrame */ VOID p2pFuncSetChannel ( IN P_ADAPTER_T prAdapter, IN P_RF_CHANNEL_INFO_T prRfChannelInfo ) { P_P2P_CONNECTION_SETTINGS_T prP2pConnSettings = (P_P2P_CONNECTION_SETTINGS_T)NULL; do { ASSERT_BREAK((prAdapter != NULL) && (prRfChannelInfo != NULL)); prP2pConnSettings = prAdapter->rWifiVar.prP2PConnSettings; prP2pConnSettings->ucOperatingChnl = prRfChannelInfo->ucChannelNum; prP2pConnSettings->eBand = prRfChannelInfo->eBand; } while (FALSE); return; } /* p2pFuncSetChannel */ /*----------------------------------------------------------------------------*/ /*! * @brief Retry JOIN for AUTH_MODE_AUTO_SWITCH * * @param[in] prStaRec Pointer to the STA_RECORD_T * * @retval TRUE We will retry JOIN * @retval FALSE We will not retry JOIN */ /*----------------------------------------------------------------------------*/ BOOLEAN p2pFuncRetryJOIN ( IN P_ADAPTER_T prAdapter, IN P_STA_RECORD_T prStaRec, IN P_P2P_JOIN_INFO_T prJoinInfo ) { P_MSG_JOIN_REQ_T prJoinReqMsg = (P_MSG_JOIN_REQ_T)NULL; BOOLEAN fgRetValue = FALSE; do { ASSERT_BREAK((prAdapter != NULL) && (prStaRec != NULL) && (prJoinInfo != NULL)); /* Retry other AuthType if possible */ if (!prJoinInfo->ucAvailableAuthTypes) { break; } if (prJoinInfo->ucAvailableAuthTypes & (UINT_8)AUTH_TYPE_SHARED_KEY) { DBGLOG(P2P, INFO, ("RETRY JOIN INIT: Retry Authentication with AuthType == SHARED_KEY.\n")); prJoinInfo->ucAvailableAuthTypes &= ~(UINT_8)AUTH_TYPE_SHARED_KEY; prStaRec->ucAuthAlgNum = (UINT_8)AUTH_ALGORITHM_NUM_SHARED_KEY; } else { DBGLOG(P2P, ERROR, ("RETRY JOIN INIT: Retry Authentication with Unexpected AuthType.\n")); ASSERT(0); break; } prJoinInfo->ucAvailableAuthTypes = 0; /* No more available Auth Types */ /* Trigger SAA to start JOIN process. */ prJoinReqMsg = (P_MSG_JOIN_REQ_T)cnmMemAlloc(prAdapter, RAM_TYPE_MSG, sizeof(MSG_JOIN_REQ_T)); if (!prJoinReqMsg) { ASSERT(0); // Can't trigger SAA FSM break; } prJoinReqMsg->rMsgHdr.eMsgId = MID_P2P_SAA_FSM_START; prJoinReqMsg->ucSeqNum = ++prJoinInfo->ucSeqNumOfReqMsg; prJoinReqMsg->prStaRec = prStaRec; mboxSendMsg(prAdapter, MBOX_ID_0, (P_MSG_HDR_T) prJoinReqMsg, MSG_SEND_METHOD_BUF); fgRetValue = TRUE; } while (FALSE); return fgRetValue; }/* end of p2pFuncRetryJOIN() */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will update the contain of BSS_INFO_T for AIS network once * the association was completed. * * @param[in] prStaRec Pointer to the STA_RECORD_T * @param[in] prAssocRspSwRfb Pointer to SW RFB of ASSOC RESP FRAME. * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID p2pFuncUpdateBssInfoForJOIN ( IN P_ADAPTER_T prAdapter, IN P_BSS_DESC_T prBssDesc, IN P_STA_RECORD_T prStaRec, IN P_SW_RFB_T prAssocRspSwRfb ) { P_BSS_INFO_T prP2pBssInfo = (P_BSS_INFO_T)NULL; P_P2P_CONNECTION_SETTINGS_T prP2pConnSettings = (P_P2P_CONNECTION_SETTINGS_T)NULL; P_WLAN_ASSOC_RSP_FRAME_T prAssocRspFrame = (P_WLAN_ASSOC_RSP_FRAME_T)NULL; UINT_16 u2IELength; PUINT_8 pucIE; DEBUGFUNC("p2pUpdateBssInfoForJOIN()"); ASSERT(prAdapter); ASSERT(prStaRec); ASSERT(prAssocRspSwRfb); prP2pBssInfo = &(prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_P2P_INDEX]); prP2pConnSettings = prAdapter->rWifiVar.prP2PConnSettings; prAssocRspFrame = (P_WLAN_ASSOC_RSP_FRAME_T) prAssocRspSwRfb->pvHeader; DBGLOG(P2P, INFO, ("Update P2P_BSS_INFO_T and apply settings to MAC\n")); //3 <1> Update BSS_INFO_T from AIS_FSM_INFO_T or User Settings //4 <1.1> Setup Operation Mode prP2pBssInfo->eCurrentOPMode = OP_MODE_INFRASTRUCTURE; //4 <1.2> Setup SSID COPY_SSID(prP2pBssInfo->aucSSID, prP2pBssInfo->ucSSIDLen, prP2pConnSettings->aucSSID, prP2pConnSettings->ucSSIDLen); if (prBssDesc == NULL) { /* Target BSS NULL. */ DBGLOG(P2P, TRACE,("Target BSS NULL\n")); return; } if (UNEQUAL_MAC_ADDR(prBssDesc->aucBSSID, prAssocRspFrame->aucBSSID)) { ASSERT(FALSE); } //4 <1.3> Setup Channel, Band prP2pBssInfo->ucPrimaryChannel = prBssDesc->ucChannelNum; prP2pBssInfo->eBand = prBssDesc->eBand; //3 <2> Update BSS_INFO_T from STA_RECORD_T //4 <2.1> Save current AP's STA_RECORD_T and current AID prP2pBssInfo->prStaRecOfAP = prStaRec; prP2pBssInfo->u2AssocId = prStaRec->u2AssocId; //4 <2.2> Setup Capability prP2pBssInfo->u2CapInfo = prStaRec->u2CapInfo; /* Use AP's Cap Info as BSS Cap Info */ if (prP2pBssInfo->u2CapInfo & CAP_INFO_SHORT_PREAMBLE) { prP2pBssInfo->fgIsShortPreambleAllowed = TRUE; } else { prP2pBssInfo->fgIsShortPreambleAllowed = FALSE; } //4 <2.3> Setup PHY Attributes and Basic Rate Set/Operational Rate Set prP2pBssInfo->ucPhyTypeSet = prStaRec->ucDesiredPhyTypeSet; prP2pBssInfo->ucNonHTBasicPhyType = prStaRec->ucNonHTBasicPhyType; prP2pBssInfo->u2OperationalRateSet = prStaRec->u2OperationalRateSet; prP2pBssInfo->u2BSSBasicRateSet = prStaRec->u2BSSBasicRateSet; //3 <3> Update BSS_INFO_T from SW_RFB_T (Association Resp Frame) //4 <3.1> Setup BSSID COPY_MAC_ADDR(prP2pBssInfo->aucBSSID, prAssocRspFrame->aucBSSID); u2IELength = (UINT_16) ((prAssocRspSwRfb->u2PacketLen - prAssocRspSwRfb->u2HeaderLen) - (OFFSET_OF(WLAN_ASSOC_RSP_FRAME_T, aucInfoElem[0]) - WLAN_MAC_MGMT_HEADER_LEN)); pucIE = prAssocRspFrame->aucInfoElem; //4 <3.2> Parse WMM and setup QBSS flag /* Parse WMM related IEs and configure HW CRs accordingly */ mqmProcessAssocRsp(prAdapter, prAssocRspSwRfb, pucIE, u2IELength); prP2pBssInfo->fgIsQBSS = prStaRec->fgIsQoS; //3 <4> Update BSS_INFO_T from BSS_DESC_T ASSERT(prBssDesc); prBssDesc->fgIsConnecting = FALSE; prBssDesc->fgIsConnected = TRUE; //4 <4.1> Setup MIB for current BSS prP2pBssInfo->u2BeaconInterval = prBssDesc->u2BeaconInterval; /* NOTE: Defer ucDTIMPeriod updating to when beacon is received after connection */ prP2pBssInfo->ucDTIMPeriod = 0; prP2pBssInfo->u2ATIMWindow = 0; prP2pBssInfo->ucBeaconTimeoutCount = AIS_BEACON_TIMEOUT_COUNT_INFRA; //4 <4.2> Update HT information and set channel /* Record HT related parameters in rStaRec and rBssInfo * Note: it shall be called before nicUpdateBss() */ rlmProcessAssocRsp(prAdapter, prAssocRspSwRfb, pucIE, u2IELength); //4 <4.3> Sync with firmware for BSS-INFO nicUpdateBss(prAdapter, NETWORK_TYPE_P2P_INDEX); //4 <4.4> *DEFER OPERATION* nicPmIndicateBssConnected() will be invoked //inside scanProcessBeaconAndProbeResp() after 1st beacon is received return; } /* end of p2pUpdateBssInfoForJOIN() */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will validate the Rx Auth Frame and then return * the status code to AAA to indicate if need to perform following actions * when the specified conditions were matched. * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] prSwRfb Pointer to SW RFB data structure. * @param[in] pprStaRec Pointer to pointer of STA_RECORD_T structure. * @param[out] pu2StatusCode The Status Code of Validation Result * * @retval TRUE Reply the Auth * @retval FALSE Don't reply the Auth */ /*----------------------------------------------------------------------------*/ BOOLEAN p2pFuncValidateAuth ( IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb, IN PP_STA_RECORD_T pprStaRec, OUT PUINT_16 pu2StatusCode ) { BOOLEAN fgReplyAuth = TRUE; P_BSS_INFO_T prP2pBssInfo = (P_BSS_INFO_T)NULL; P_STA_RECORD_T prStaRec = (P_STA_RECORD_T)NULL; P_WLAN_AUTH_FRAME_T prAuthFrame = (P_WLAN_AUTH_FRAME_T)NULL; DBGLOG(P2P, TRACE, ("p2pValidate Authentication Frame\n")) do { ASSERT_BREAK((prAdapter != NULL) && (prSwRfb != NULL) && (pprStaRec != NULL) && (pu2StatusCode != NULL)); /* P2P 3.2.8 */ *pu2StatusCode = STATUS_CODE_REQ_DECLINED; prP2pBssInfo = &(prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_P2P_INDEX]); prAuthFrame = (P_WLAN_AUTH_FRAME_T)prSwRfb->pvHeader; if (prP2pBssInfo->eCurrentOPMode != OP_MODE_ACCESS_POINT) { /* We are not under AP Mode yet. */ fgReplyAuth = FALSE; DBGLOG(P2P, WARN, ("Current OP mode is not under AP mode. (%d)\n", prP2pBssInfo->eCurrentOPMode)); break; } prStaRec = cnmGetStaRecByAddress(prAdapter, (UINT_8) NETWORK_TYPE_P2P_INDEX, prAuthFrame->aucSrcAddr); if (!prStaRec) { prStaRec = cnmStaRecAlloc(prAdapter, (UINT_8) NETWORK_TYPE_P2P_INDEX); /* TODO(Kevin): Error handling of allocation of STA_RECORD_T for * exhausted case and do removal of unused STA_RECORD_T. */ /* Sent a message event to clean un-used STA_RECORD_T. */ ASSERT(prStaRec); COPY_MAC_ADDR(prStaRec->aucMacAddr, prAuthFrame->aucSrcAddr); prSwRfb->ucStaRecIdx = prStaRec->ucIndex; prStaRec->u2BSSBasicRateSet = prP2pBssInfo->u2BSSBasicRateSet; prStaRec->u2DesiredNonHTRateSet = RATE_SET_ERP_P2P; prStaRec->u2OperationalRateSet = RATE_SET_ERP_P2P; prStaRec->ucPhyTypeSet = PHY_TYPE_SET_802_11GN; prStaRec->eStaType = STA_TYPE_P2P_GC; /* NOTE(Kevin): Better to change state here, not at TX Done */ cnmStaRecChangeState(prAdapter, prStaRec, STA_STATE_1); } else { prSwRfb->ucStaRecIdx = prStaRec->ucIndex; if ((prStaRec->ucStaState > STA_STATE_1) && (IS_STA_IN_P2P(prStaRec))) { cnmStaRecChangeState(prAdapter, prStaRec, STA_STATE_1); p2pFuncResetStaRecStatus(prAdapter, prStaRec); bssRemoveStaRecFromClientList(prAdapter, prP2pBssInfo, prStaRec); } } if (prP2pBssInfo->rStaRecOfClientList.u4NumElem > P2P_MAXIMUM_CLIENT_COUNT || kalP2PMaxClients(prAdapter->prGlueInfo, prP2pBssInfo->rStaRecOfClientList.u4NumElem)) { /* GROUP limit full. */ /* P2P 3.2.8 */ DBGLOG(P2P, WARN, ("Group Limit Full. (%d)\n", (INT_16)prP2pBssInfo->rStaRecOfClientList.u4NumElem)); bssRemoveStaRecFromClientList(prAdapter, prP2pBssInfo, prStaRec); cnmStaRecFree(prAdapter, prStaRec, FALSE); break; } else { /* Hotspot Blacklist */ if(prAuthFrame->aucSrcAddr) { if(kalP2PCmpBlackList(prAdapter->prGlueInfo, prAuthFrame->aucSrcAddr)) { fgReplyAuth = FALSE; return fgReplyAuth; } } } //prStaRec->eStaType = STA_TYPE_INFRA_CLIENT; prStaRec->eStaType = STA_TYPE_P2P_GC; prStaRec->ucNetTypeIndex = NETWORK_TYPE_P2P_INDEX; /* Update Station Record - Status/Reason Code */ prStaRec->u2StatusCode = STATUS_CODE_SUCCESSFUL; prStaRec->ucJoinFailureCount = 0; *pprStaRec = prStaRec; *pu2StatusCode = STATUS_CODE_SUCCESSFUL; } while (FALSE); return fgReplyAuth; } /* p2pFuncValidateAuth */ VOID p2pFuncResetStaRecStatus ( IN P_ADAPTER_T prAdapter, IN P_STA_RECORD_T prStaRec ) { do { if ((prAdapter == NULL) || (prStaRec == NULL)) { ASSERT(FALSE); break; } prStaRec->u2StatusCode = STATUS_CODE_SUCCESSFUL; prStaRec->u2ReasonCode = REASON_CODE_RESERVED; prStaRec->ucJoinFailureCount = 0; prStaRec->fgTransmitKeyExist = FALSE; prStaRec->fgSetPwrMgtBit = FALSE; } while (FALSE); return; } /* p2pFuncResetStaRecStatus */ /*----------------------------------------------------------------------------*/ /*! * @brief The function is used to initialize the value of the connection settings for * P2P network * * @param (none) * * @return (none) */ /*----------------------------------------------------------------------------*/ VOID p2pFuncInitConnectionSettings ( IN P_ADAPTER_T prAdapter, IN P_P2P_CONNECTION_SETTINGS_T prP2PConnSettings ) { P_DEVICE_TYPE_T prDevType; UINT_8 aucDefaultDevName[] = P2P_DEFAULT_DEV_NAME; UINT_8 aucWfaOui[] = VENDOR_OUI_WFA; ASSERT(prP2PConnSettings); /* Setup Default Device Name */ prP2PConnSettings->ucDevNameLen = P2P_DEFAULT_DEV_NAME_LEN; kalMemCopy(prP2PConnSettings->aucDevName, aucDefaultDevName, sizeof(aucDefaultDevName)); /* Setup Primary Device Type (Big-Endian) */ prDevType = &prP2PConnSettings->rPrimaryDevTypeBE; prDevType->u2CategoryId = HTONS(P2P_DEFAULT_PRIMARY_CATEGORY_ID); prDevType->u2SubCategoryId = HTONS(P2P_DEFAULT_PRIMARY_SUB_CATEGORY_ID); prDevType->aucOui[0] = aucWfaOui[0]; prDevType->aucOui[1] = aucWfaOui[1]; prDevType->aucOui[2] = aucWfaOui[2]; prDevType->aucOui[3] = VENDOR_OUI_TYPE_WPS; /* Setup Secondary Device Type */ prP2PConnSettings->ucSecondaryDevTypeCount = 0; /* Setup Default Config Method */ prP2PConnSettings->eConfigMethodSelType = ENUM_CONFIG_METHOD_SEL_AUTO; prP2PConnSettings->u2ConfigMethodsSupport = P2P_DEFAULT_CONFIG_METHOD; prP2PConnSettings->u2TargetConfigMethod = 0; prP2PConnSettings->u2LocalConfigMethod = 0; prP2PConnSettings->fgIsPasswordIDRdy = FALSE; /* For Device Capability */ prP2PConnSettings->fgSupportServiceDiscovery = FALSE; prP2PConnSettings->fgSupportClientDiscoverability = TRUE; prP2PConnSettings->fgSupportConcurrentOperation = TRUE; prP2PConnSettings->fgSupportInfraManaged = FALSE; prP2PConnSettings->fgSupportInvitationProcedure = FALSE; /* For Group Capability */ #if CFG_SUPPORT_PERSISTENT_GROUP prP2PConnSettings->fgSupportPersistentP2PGroup = TRUE; #else prP2PConnSettings->fgSupportPersistentP2PGroup = FALSE; #endif prP2PConnSettings->fgSupportIntraBSSDistribution = TRUE; prP2PConnSettings->fgSupportCrossConnection = TRUE; prP2PConnSettings->fgSupportPersistentReconnect = FALSE; prP2PConnSettings->fgSupportOppPS = FALSE; prP2PConnSettings->u2CTWindow = P2P_CTWINDOW_DEFAULT; /* For Connection Settings. */ prP2PConnSettings->eAuthMode = AUTH_MODE_OPEN; prP2PConnSettings->prTargetP2pDesc = NULL; prP2PConnSettings->ucSSIDLen = 0; /* Misc */ prP2PConnSettings->fgIsScanReqIssued = FALSE; prP2PConnSettings->fgIsServiceDiscoverIssued = FALSE; prP2PConnSettings->fgP2pGroupLimit = FALSE; prP2PConnSettings->ucOperatingChnl = 0; prP2PConnSettings->ucListenChnl = 0; prP2PConnSettings->ucTieBreaker = (UINT_8)(kalRandomNumber() & 0x1); prP2PConnSettings->eFormationPolicy = ENUM_P2P_FORMATION_POLICY_AUTO; return; } /* p2pFuncInitConnectionSettings */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will validate the Rx Assoc Req Frame and then return * the status code to AAA to indicate if need to perform following actions * when the specified conditions were matched. * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] prSwRfb Pointer to SW RFB data structure. * @param[out] pu2StatusCode The Status Code of Validation Result * * @retval TRUE Reply the Assoc Resp * @retval FALSE Don't reply the Assoc Resp */ /*----------------------------------------------------------------------------*/ BOOLEAN p2pFuncValidateAssocReq ( IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb, OUT PUINT_16 pu2StatusCode ) { BOOLEAN fgReplyAssocResp = TRUE; P_WLAN_ASSOC_REQ_FRAME_T prAssocReqFrame = (P_WLAN_ASSOC_REQ_FRAME_T)NULL; P_STA_RECORD_T prStaRec = (P_STA_RECORD_T)NULL; P_BSS_INFO_T prP2pBssInfo = (P_BSS_INFO_T)NULL; #if CFG_SUPPORT_WFD P_WFD_CFG_SETTINGS_T prWfdCfgSettings = (P_WFD_CFG_SETTINGS_T)NULL; P_WFD_ATTRIBUTE_T prWfdAttribute = (P_WFD_ATTRIBUTE_T)NULL; BOOLEAN fgNeedFree = FALSE; #endif /* TODO(Kevin): Call P2P functions to check .. 2. Check we can accept connection from thsi peer a. If we are in PROVISION state, only accept the peer we do the GO formation previously. b. If we are in OPERATION state, only accept the other peer when P2P_GROUP_LIMIT is 0. 3. Check Black List here. */ do { ASSERT_BREAK((prAdapter != NULL) && (prSwRfb != NULL) && (pu2StatusCode != NULL)); *pu2StatusCode = STATUS_CODE_REQ_DECLINED; prP2pBssInfo = &(prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_P2P_INDEX]); prAssocReqFrame = (P_WLAN_ASSOC_REQ_FRAME_T)prSwRfb->pvHeader; prStaRec = cnmGetStaRecByIndex(prAdapter, prSwRfb->ucStaRecIdx); if (prStaRec == NULL) { /* Station record should be ready while RX AUTH frame. */ fgReplyAssocResp = FALSE; ASSERT(FALSE); break; } else { prStaRec->ucRCPI = prSwRfb->prHifRxHdr->ucRcpi; } prStaRec->u2DesiredNonHTRateSet &= prP2pBssInfo->u2OperationalRateSet; prStaRec->ucDesiredPhyTypeSet = prStaRec->ucPhyTypeSet & prP2pBssInfo->ucPhyTypeSet; if (prStaRec->ucDesiredPhyTypeSet == 0) { /* The station only support 11B rate. */ *pu2StatusCode = STATUS_CODE_ASSOC_DENIED_RATE_NOT_SUPPORTED; break; } #if CFG_SUPPORT_WFD && 1 //LOG_FUNC("Skip check WFD IE becasue some API is not ready\n"); /* Eddie */ if (!prAdapter->rWifiVar.prP2pFsmInfo) { fgReplyAssocResp = FALSE; ASSERT(FALSE); break; } prWfdCfgSettings = &prAdapter->rWifiVar.prP2pFsmInfo->rWfdConfigureSettings; DBGLOG(P2P, INFO,("Current WfdCfgSettings wfd_en %u wfd_info 0x%x wfd_policy 0x%x wfd_flag 0x%x\n", prWfdCfgSettings->ucWfdEnable, prWfdCfgSettings->u2WfdDevInfo, prWfdCfgSettings->u4WfdPolicy, prWfdCfgSettings->u4WfdFlag)); /* Eddie */ if (prWfdCfgSettings->ucWfdEnable) { if (prWfdCfgSettings->u4WfdPolicy & BIT(6)) { /* Rejected all. */ break; } else { //UINT_16 u2AttriListLen = 0; UINT_16 u2WfdDevInfo = 0; P_WFD_DEVICE_INFORMATION_IE_T prAttriWfdDevInfo = (P_WFD_DEVICE_INFORMATION_IE_T)NULL; //fgNeedFree = p2pFuncGetAttriList(prAdapter, // VENDOR_OUI_TYPE_WFD, // (PUINT_8)prAssocReqFrame->aucInfoElem, // (prSwRfb->u2PacketLen - OFFSET_OF(WLAN_ASSOC_REQ_FRAME_T, aucInfoElem)), // (PPUINT_8)&prWfdAttribute, // &u2AttriListLen); prAttriWfdDevInfo = (P_WFD_DEVICE_INFORMATION_IE_T) p2pFuncGetSpecAttri(prAdapter, VENDOR_OUI_TYPE_WFD, (PUINT_8)prAssocReqFrame->aucInfoElem, (prSwRfb->u2PacketLen - OFFSET_OF(WLAN_ASSOC_REQ_FRAME_T, aucInfoElem)), WFD_ATTRI_ID_DEV_INFO); if ((prWfdCfgSettings->u4WfdPolicy & BIT(5)) && (prAttriWfdDevInfo != NULL)) { /* Rejected with WFD IE. */ break; } if ((prWfdCfgSettings->u4WfdPolicy & BIT(0)) && (prAttriWfdDevInfo == NULL)) { /* Rejected without WFD IE. */ break; } if (prAttriWfdDevInfo != NULL) { //prAttriWfdDevInfo = (P_WFD_DEVICE_INFORMATION_IE_T)p2pFuncGetSpecAttri(prAdapter, // VENDOR_OUI_TYPE_WFD, // (PUINT_8)prWfdAttribute, // u2AttriListLen, // WFD_ATTRI_ID_DEV_INFO); //if (prAttriWfdDevInfo == NULL) { // /* No such attribute. */ // break; //} WLAN_GET_FIELD_BE16(&prAttriWfdDevInfo->u2WfdDevInfo, &u2WfdDevInfo); DBGLOG(P2P, INFO,("RX Assoc Req WFD Info:0x%x.\n", u2WfdDevInfo)); if ((prWfdCfgSettings->u4WfdPolicy & BIT(1)) && ((u2WfdDevInfo & 0x3) == 0x0)) { /* Rejected because of SOURCE. */ break; } if ((prWfdCfgSettings->u4WfdPolicy & BIT(2)) && ((u2WfdDevInfo & 0x3) == 0x1)) { /* Rejected because of Primary Sink. */ break; } if ((prWfdCfgSettings->u4WfdPolicy & BIT(3)) && ((u2WfdDevInfo & 0x3) == 0x2)) { /* Rejected because of Secondary Sink. */ break; } if ((prWfdCfgSettings->u4WfdPolicy & BIT(4)) && ((u2WfdDevInfo & 0x3) == 0x3)) { /* Rejected because of Source & Primary Sink. */ break; } /* Check role */ if(prWfdCfgSettings->u4WfdFlag & WFD_FLAGS_DEV_INFO_VALID) { if((prWfdCfgSettings->u2WfdDevInfo & BITS(0,1)) == 0x3) { //P_MSG_WFD_CONFIG_SETTINGS_CHANGED_T prMsgWfdCfgUpdate = (P_MSG_WFD_CONFIG_SETTINGS_CHANGED_T)NULL; UINT_16 u2DevInfo = prWfdCfgSettings->u2WfdDevInfo; /* We may change role here if we are dual role */ if((u2WfdDevInfo & BITS(0,1)) == 0x00 /* Peer is Source*/) { DBGLOG(P2P, INFO,("WFD: Switch role to primary sink\n")); prWfdCfgSettings->u2WfdDevInfo &= ~BITS(0,1); prWfdCfgSettings->u2WfdDevInfo |= 0x1; /* event to annonce the role is chanaged to P-Sink */ } else if((u2WfdDevInfo & BITS(0,1)) == 0x01 /* Peer is P-Sink */) { DBGLOG(P2P, INFO,("WFD: Switch role to source\n")); prWfdCfgSettings->u2WfdDevInfo &= ~BITS(0,1); /* event to annonce the role is chanaged to Source */ } else { DBGLOG(P2P, INFO,("WFD: Peer role is wrong type(dev 0x%x)\n", (u2DevInfo))); DBGLOG(P2P, INFO,("WFD: Switch role to source\n")); prWfdCfgSettings->u2WfdDevInfo &= ~BITS(0,1); /* event to annonce the role is chanaged to Source */ } p2pFsmRunEventWfdSettingUpdate (prAdapter,NULL); } /* Dual role p2p->wfd_params->WfdDevInfo */ } /* WFD_FLAG_DEV_INFO_VALID */ } else { /* Without WFD IE. * Do nothing. Accept the connection request. */ } } } /* ucWfdEnable */ #endif *pu2StatusCode = STATUS_CODE_SUCCESSFUL; } while (FALSE); #if CFG_SUPPORT_WFD if ((prWfdAttribute) && (fgNeedFree)) { kalMemFree(prWfdAttribute, VIR_MEM_TYPE, WPS_MAXIMUM_ATTRIBUTES_CACHE_SIZE); } #endif return fgReplyAssocResp; } /* p2pFuncValidateAssocReq */ /*----------------------------------------------------------------------------*/ /*! * @brief This function is used to check the P2P IE * * * @return none */ /*----------------------------------------------------------------------------*/ BOOLEAN p2pFuncParseCheckForP2PInfoElem ( IN P_ADAPTER_T prAdapter, IN PUINT_8 pucBuf, OUT PUINT_8 pucOuiType ) { UINT_8 aucWfaOui[] = VENDOR_OUI_WFA_SPECIFIC; P_IE_WFA_T prWfaIE = (P_IE_WFA_T)NULL; do { ASSERT_BREAK((prAdapter != NULL) && (pucBuf != NULL) && (pucOuiType != NULL)); prWfaIE = (P_IE_WFA_T)pucBuf; if (IE_LEN(pucBuf) <= ELEM_MIN_LEN_WFA_OUI_TYPE_SUBTYPE) { break; } else if (prWfaIE->aucOui[0] != aucWfaOui[0] || prWfaIE->aucOui[1] != aucWfaOui[1] || prWfaIE->aucOui[2] != aucWfaOui[2]) { break; } *pucOuiType = prWfaIE->ucOuiType; return TRUE; } while (FALSE); return FALSE; } /* p2pFuncParseCheckForP2PInfoElem */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will validate the Rx Probe Request Frame and then return * result to BSS to indicate if need to send the corresponding Probe Response * Frame if the specified conditions were matched. * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] prSwRfb Pointer to SW RFB data structure. * @param[out] pu4ControlFlags Control flags for replying the Probe Response * * @retval TRUE Reply the Probe Response * @retval FALSE Don't reply the Probe Response */ /*----------------------------------------------------------------------------*/ BOOLEAN p2pFuncValidateProbeReq ( IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb, OUT PUINT_32 pu4ControlFlags ) { BOOLEAN fgIsReplyProbeRsp = FALSE; P_P2P_FSM_INFO_T prP2pFsmInfo = (P_P2P_FSM_INFO_T)NULL; DEBUGFUNC("p2pFuncValidateProbeReq"); DBGLOG(P2P, TRACE, ("p2pFuncValidateProbeReq\n")); do { ASSERT_BREAK((prAdapter != NULL) && (prSwRfb != NULL)); prP2pFsmInfo = prAdapter->rWifiVar.prP2pFsmInfo; if (prP2pFsmInfo->u4P2pPacketFilter & PARAM_PACKET_FILTER_PROBE_REQ) { printk("p2pFuncValidateProbeReq\n"); /* Leave the probe response to p2p_supplicant. */ kalP2PIndicateRxMgmtFrame(prAdapter->prGlueInfo, prSwRfb); } } while (FALSE); return fgIsReplyProbeRsp; } /* end of p2pFuncValidateProbeReq() */ /*----------------------------------------------------------------------------*/ /*! * @brief This function will validate the Rx Probe Request Frame and then return * result to BSS to indicate if need to send the corresponding Probe Response * Frame if the specified conditions were matched. * * @param[in] prAdapter Pointer to the Adapter structure. * @param[in] prSwRfb Pointer to SW RFB data structure. * @param[out] pu4ControlFlags Control flags for replying the Probe Response * * @retval TRUE Reply the Probe Response * @retval FALSE Don't reply the Probe Response */ /*----------------------------------------------------------------------------*/ VOID p2pFuncValidateRxActionFrame ( IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb ) { P_P2P_FSM_INFO_T prP2pFsmInfo = (P_P2P_FSM_INFO_T)NULL; DEBUGFUNC("p2pFuncValidateProbeReq"); do { ASSERT_BREAK((prAdapter != NULL) && (prSwRfb != NULL)); prP2pFsmInfo = prAdapter->rWifiVar.prP2pFsmInfo; if (prP2pFsmInfo->u4P2pPacketFilter & PARAM_PACKET_FILTER_ACTION_FRAME) { /* Leave the probe response to p2p_supplicant. */ kalP2PIndicateRxMgmtFrame(prAdapter->prGlueInfo, prSwRfb); } } while (FALSE); return; } /* p2pFuncValidateRxMgmtFrame */ BOOLEAN p2pFuncIsAPMode ( IN P_P2P_FSM_INFO_T prP2pFsmInfo ) { if (prP2pFsmInfo) { if(prP2pFsmInfo->fgIsWPSMode == 1){ return FALSE; } return prP2pFsmInfo->fgIsApMode; } else { return FALSE; } } /* p2pFuncIsAPMode */ VOID p2pFuncParseBeaconContent ( IN P_ADAPTER_T prAdapter, IN P_BSS_INFO_T prP2pBssInfo, IN PUINT_8 pucIEInfo, IN UINT_32 u4IELen ) { PUINT_8 pucIE = (PUINT_8)NULL; UINT_16 u2Offset = 0; P_P2P_SPECIFIC_BSS_INFO_T prP2pSpecificBssInfo = (P_P2P_SPECIFIC_BSS_INFO_T)NULL; BOOL ucNewSecMode = FALSE; BOOL ucOldSecMode = FALSE; do { ASSERT_BREAK((prAdapter != NULL) && (prP2pBssInfo != NULL)); if (u4IELen == 0) { break; } prP2pSpecificBssInfo = prAdapter->rWifiVar.prP2pSpecificBssInfo; prP2pSpecificBssInfo->u2AttributeLen = 0; ASSERT_BREAK(pucIEInfo != NULL); pucIE = pucIEInfo; ucOldSecMode = kalP2PGetCipher(prAdapter->prGlueInfo); IE_FOR_EACH(pucIE, u4IELen, u2Offset) { switch (IE_ID(pucIE)) { case ELEM_ID_SSID: /* 0 */ /* V */ /* Done */ { DBGLOG(P2P, TRACE, ("SSID update\n")); /* Update when starting GO. */ COPY_SSID(prP2pBssInfo->aucSSID, prP2pBssInfo->ucSSIDLen, SSID_IE(pucIE)->aucSSID, SSID_IE(pucIE)->ucLength); COPY_SSID(prP2pSpecificBssInfo->aucGroupSsid, prP2pSpecificBssInfo->u2GroupSsidLen, SSID_IE(pucIE)->aucSSID, SSID_IE(pucIE)->ucLength); } break; case ELEM_ID_SUP_RATES: /* 1 */ /* V */ /* Done */ { DBGLOG(P2P, TRACE, ("Support Rate IE\n")); kalMemCopy(prP2pBssInfo->aucAllSupportedRates, SUP_RATES_IE(pucIE)->aucSupportedRates, SUP_RATES_IE(pucIE)->ucLength); prP2pBssInfo->ucAllSupportedRatesLen = SUP_RATES_IE(pucIE)->ucLength; DBGLOG_MEM8(P2P, TRACE, SUP_RATES_IE(pucIE)->aucSupportedRates, SUP_RATES_IE(pucIE)->ucLength); } break; case ELEM_ID_DS_PARAM_SET: /* 3 */ /* V */ /* Done */ { P_P2P_CONNECTION_SETTINGS_T prP2pConnSettings = prAdapter->rWifiVar.prP2PConnSettings; DBGLOG(P2P, TRACE, ("DS PARAM IE\n")); ASSERT(prP2pConnSettings->ucOperatingChnl == DS_PARAM_IE(pucIE)->ucCurrChnl); if (prP2pConnSettings->eBand != BAND_2G4) { ASSERT(FALSE); break; } //prP2pBssInfo->ucPrimaryChannel = DS_PARAM_IE(pucIE)->ucCurrChnl; //prP2pBssInfo->eBand = BAND_2G4; } break; case ELEM_ID_TIM: /* 5 */ /* V */ DBGLOG(P2P, TRACE, ("TIM IE\n")); TIM_IE(pucIE)->ucDTIMPeriod = prP2pBssInfo->ucDTIMPeriod; break; case ELEM_ID_ERP_INFO: /* 42 */ /* V */ { #if 1 /* This IE would dynamic change due to FW detection change is required. */ DBGLOG(P2P, TRACE, ("ERP IE will be over write by driver\n")); DBGLOG(P2P, TRACE, (" ucERP: %x. \n", ERP_INFO_IE(pucIE)->ucERP)); #else /* This IE would dynamic change due to FW detection change is required. */ DBGLOG(P2P, TRACE, ("ERP IE.\n")); prP2pBssInfo->ucPhyTypeSet |= PHY_TYPE_SET_802_11GN; ASSERT(prP2pBssInfo->eBand == BAND_2G4); prP2pBssInfo->fgObssErpProtectMode = ((ERP_INFO_IE(pucIE)->ucERP & ERP_INFO_USE_PROTECTION)? TRUE : FALSE); prP2pBssInfo->fgErpProtectMode = ((ERP_INFO_IE(pucIE)->ucERP & (ERP_INFO_USE_PROTECTION | ERP_INFO_NON_ERP_PRESENT))? TRUE : FALSE); #endif } break; case ELEM_ID_HT_CAP: /* 45 */ /* V */ { #if 1 DBGLOG(P2P, TRACE, ("HT CAP IE would be overwritten by driver\n")); DBGLOG(P2P, TRACE, ("HT Cap Info:%x, AMPDU Param:%x\n", HT_CAP_IE(pucIE)->u2HtCapInfo, HT_CAP_IE(pucIE)->ucAmpduParam)); DBGLOG(P2P, TRACE, ("HT Extended Cap Info:%x, TX Beamforming Cap Info:%lx, Ant Selection Cap Info%x \n", HT_CAP_IE(pucIE)->u2HtExtendedCap, HT_CAP_IE(pucIE)->u4TxBeamformingCap, HT_CAP_IE(pucIE)->ucAselCap)); #else prP2pBssInfo->ucPhyTypeSet |= PHY_TYPE_SET_802_11N; /* u2HtCapInfo */ if ((HT_CAP_IE(pucIE)->u2HtCapInfo & (HT_CAP_INFO_SUP_CHNL_WIDTH | HT_CAP_INFO_SHORT_GI_40M | HT_CAP_INFO_DSSS_CCK_IN_40M)) == 0) { prP2pBssInfo->fgAssoc40mBwAllowed = FALSE; } else { prP2pBssInfo->fgAssoc40mBwAllowed = TRUE; } if ((HT_CAP_IE(pucIE)->u2HtCapInfo & (HT_CAP_INFO_SHORT_GI_20M | HT_CAP_INFO_SHORT_GI_40M)) == 0) { prAdapter->rWifiVar.rConnSettings.fgRxShortGIDisabled = TRUE; } else { prAdapter->rWifiVar.rConnSettings.fgRxShortGIDisabled = FALSE; } /* ucAmpduParam */ DBGLOG(P2P, TRACE, ("AMPDU setting from supplicant:0x%x, & default value:0x%x\n", (UINT_8)HT_CAP_IE(pucIE)->ucAmpduParam, (UINT_8)AMPDU_PARAM_DEFAULT_VAL)); /* rSupMcsSet */ /* Can do nothing. the field is default value from other configuration. */ //HT_CAP_IE(pucIE)->rSupMcsSet; /* u2HtExtendedCap */ ASSERT(HT_CAP_IE(pucIE)->u2HtExtendedCap == (HT_EXT_CAP_DEFAULT_VAL & ~(HT_EXT_CAP_PCO | HT_EXT_CAP_PCO_TRANS_TIME_NONE))); /* u4TxBeamformingCap */ ASSERT(HT_CAP_IE(pucIE)->u4TxBeamformingCap == TX_BEAMFORMING_CAP_DEFAULT_VAL); /* ucAselCap */ ASSERT(HT_CAP_IE(pucIE)->ucAselCap == ASEL_CAP_DEFAULT_VAL); #endif } break; case ELEM_ID_RSN: /* 48 */ /* V */ { RSN_INFO_T rRsnIe; DBGLOG(P2P, TRACE, ("RSN IE\n")); kalP2PSetCipher(prAdapter->prGlueInfo, IW_AUTH_CIPHER_CCMP); ucNewSecMode = TRUE; if (rsnParseRsnIE(prAdapter, RSN_IE(pucIE), &rRsnIe)) { prP2pBssInfo = &prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_P2P_INDEX]; prP2pBssInfo->u4RsnSelectedGroupCipher = RSN_CIPHER_SUITE_CCMP; prP2pBssInfo->u4RsnSelectedPairwiseCipher = RSN_CIPHER_SUITE_CCMP; prP2pBssInfo->u4RsnSelectedAKMSuite = RSN_AKM_SUITE_PSK; prP2pBssInfo->u2RsnSelectedCapInfo = rRsnIe.u2RsnCap; } } break; case ELEM_ID_EXTENDED_SUP_RATES: /* 50 */ /* V */ /* Be attention, ELEM_ID_SUP_RATES should be placed before ELEM_ID_EXTENDED_SUP_RATES. */ DBGLOG(P2P, TRACE, ("Ex Support Rate IE\n")); kalMemCopy(&(prP2pBssInfo->aucAllSupportedRates[prP2pBssInfo->ucAllSupportedRatesLen]), EXT_SUP_RATES_IE(pucIE)->aucExtSupportedRates, EXT_SUP_RATES_IE(pucIE)->ucLength); DBGLOG_MEM8(P2P, TRACE, EXT_SUP_RATES_IE(pucIE)->aucExtSupportedRates, EXT_SUP_RATES_IE(pucIE)->ucLength); prP2pBssInfo->ucAllSupportedRatesLen += EXT_SUP_RATES_IE(pucIE)->ucLength; break; case ELEM_ID_HT_OP: /* 61 */ /* V */ // TODO: { #if 1 DBGLOG(P2P, TRACE, ("HT OP IE would be overwritten by driver\n")); DBGLOG(P2P, TRACE, (" Primary Channel: %x, Info1: %x, Info2: %x, Info3: %x\n", HT_OP_IE(pucIE)->ucPrimaryChannel, HT_OP_IE(pucIE)->ucInfo1, HT_OP_IE(pucIE)->u2Info2, HT_OP_IE(pucIE)->u2Info3)); #else UINT_16 u2Info2 = 0; prP2pBssInfo->ucPhyTypeSet |= PHY_TYPE_SET_802_11N; DBGLOG(P2P, TRACE, ("HT OP IE\n")); /* ucPrimaryChannel. */ ASSERT(HT_OP_IE(pucIE)->ucPrimaryChannel == prP2pBssInfo->ucPrimaryChannel); /* ucInfo1 */ prP2pBssInfo->ucHtOpInfo1 = HT_OP_IE(pucIE)->ucInfo1; /* u2Info2 */ u2Info2 = HT_OP_IE(pucIE)->u2Info2; if (u2Info2 & HT_OP_INFO2_NON_GF_HT_STA_PRESENT) { ASSERT(prP2pBssInfo->eGfOperationMode != GF_MODE_NORMAL); u2Info2 &= ~HT_OP_INFO2_NON_GF_HT_STA_PRESENT; } if (u2Info2 & HT_OP_INFO2_OBSS_NON_HT_STA_PRESENT) { prP2pBssInfo->eObssHtProtectMode = HT_PROTECT_MODE_NON_MEMBER; u2Info2 &= ~HT_OP_INFO2_OBSS_NON_HT_STA_PRESENT; } switch (u2Info2 & HT_OP_INFO2_HT_PROTECTION) { case HT_PROTECT_MODE_NON_HT: prP2pBssInfo->eHtProtectMode = HT_PROTECT_MODE_NON_HT; break; case HT_PROTECT_MODE_NON_MEMBER: prP2pBssInfo->eHtProtectMode = HT_PROTECT_MODE_NONE; prP2pBssInfo->eObssHtProtectMode = HT_PROTECT_MODE_NON_MEMBER; break; default: prP2pBssInfo->eHtProtectMode = HT_OP_IE(pucIE)->u2Info2; break; } /* u2Info3 */ prP2pBssInfo->u2HtOpInfo3 = HT_OP_IE(pucIE)->u2Info3; /* aucBasicMcsSet */ DBGLOG_MEM8(P2P, TRACE, HT_OP_IE(pucIE)->aucBasicMcsSet, 16); #endif } break; case ELEM_ID_OBSS_SCAN_PARAMS: /* 74 */ /* V */ { DBGLOG(P2P, TRACE, ("ELEM_ID_OBSS_SCAN_PARAMS IE would be replaced by driver\n")); } break; case ELEM_ID_EXTENDED_CAP: /* 127 */ /* V */ { DBGLOG(P2P, TRACE, ("ELEM_ID_EXTENDED_CAP IE would be replaced by driver\n")); } break; case ELEM_ID_VENDOR: /* 221 */ /* V */ DBGLOG(P2P, TRACE, ("Vender Specific IE\n")); { UINT_8 ucOuiType; UINT_16 u2SubTypeVersion; if (rsnParseCheckForWFAInfoElem(prAdapter, pucIE, &ucOuiType, &u2SubTypeVersion)) { if ((ucOuiType == VENDOR_OUI_TYPE_WPA) && (u2SubTypeVersion == VERSION_WPA)) { kalP2PSetCipher(prAdapter->prGlueInfo, IW_AUTH_CIPHER_TKIP); ucNewSecMode = TRUE; } else if ((ucOuiType == VENDOR_OUI_TYPE_WPS)) { kalP2PUpdateWSC_IE(prAdapter->prGlueInfo, 0, pucIE, IE_SIZE(pucIE)); } // WMM here. } else if (p2pFuncParseCheckForP2PInfoElem(prAdapter, pucIE, &ucOuiType)) { // TODO Store the whole P2P IE & generate later. // Be aware that there may be one or more P2P IE. if (ucOuiType == VENDOR_OUI_TYPE_P2P) { kalMemCopy(&prP2pSpecificBssInfo->aucAttributesCache[prP2pSpecificBssInfo->u2AttributeLen], pucIE, IE_SIZE(pucIE)); prP2pSpecificBssInfo->u2AttributeLen += IE_SIZE(pucIE); } else if(ucOuiType == VENDOR_OUI_TYPE_WFD) { kalMemCopy(&prP2pSpecificBssInfo->aucAttributesCache[prP2pSpecificBssInfo->u2AttributeLen], pucIE, IE_SIZE(pucIE)); prP2pSpecificBssInfo->u2AttributeLen += IE_SIZE(pucIE); } } else { kalMemCopy(&prP2pSpecificBssInfo->aucAttributesCache[prP2pSpecificBssInfo->u2AttributeLen], pucIE, IE_SIZE(pucIE)); prP2pSpecificBssInfo->u2AttributeLen += IE_SIZE(pucIE); DBGLOG(P2P, TRACE, ("Driver unprocessed Vender Specific IE\n")); ASSERT(FALSE); } // TODO: Store other Vender IE except for WMM Param. } break; default: DBGLOG(P2P, TRACE, ("Unprocessed element ID:%d \n", IE_ID(pucIE))); break; } } if (!ucNewSecMode && ucOldSecMode) kalP2PSetCipher(prAdapter->prGlueInfo, IW_AUTH_CIPHER_NONE); } while (FALSE); return; } /* p2pFuncParseBeaconContent */ P_BSS_DESC_T p2pFuncKeepOnConnection ( IN P_ADAPTER_T prAdapter, IN P_P2P_CONNECTION_REQ_INFO_T prConnReqInfo, IN P_P2P_CHNL_REQ_INFO_T prChnlReqInfo, IN P_P2P_SCAN_REQ_INFO_T prScanReqInfo ) { P_BSS_DESC_T prTargetBss = (P_BSS_DESC_T)NULL; P_BSS_INFO_T prP2pBssInfo = (P_BSS_INFO_T)NULL; do { ASSERT_BREAK((prAdapter != NULL) && (prConnReqInfo != NULL) && (prChnlReqInfo != NULL) && (prScanReqInfo != NULL)); prP2pBssInfo = &(prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_P2P_INDEX]); if (prP2pBssInfo->eCurrentOPMode != OP_MODE_INFRASTRUCTURE) { break; } // Update connection request information. ASSERT(prConnReqInfo->fgIsConnRequest == TRUE); /* Find BSS Descriptor first. */ prTargetBss = scanP2pSearchDesc(prAdapter, prP2pBssInfo, prConnReqInfo); if (prTargetBss == NULL) { /* Update scan parameter... to scan target device. */ prScanReqInfo->ucNumChannelList = 1; prScanReqInfo->eScanType = SCAN_TYPE_ACTIVE_SCAN; prScanReqInfo->eChannelSet = SCAN_CHANNEL_FULL; prScanReqInfo->u4BufLength = 0; /* Prevent other P2P ID in IE. */ prScanReqInfo->fgIsAbort = TRUE; } else { prChnlReqInfo->u8Cookie = 0; prChnlReqInfo->ucReqChnlNum = prTargetBss->ucChannelNum; prChnlReqInfo->eBand = prTargetBss->eBand; prChnlReqInfo->eChnlSco = prTargetBss->eSco; prChnlReqInfo->u4MaxInterval = AIS_JOIN_CH_REQUEST_INTERVAL; prChnlReqInfo->eChannelReqType = CHANNEL_REQ_TYPE_GC_JOIN_REQ; } } while (FALSE); return prTargetBss; } /* p2pFuncKeepOnConnection */ /* Currently Only for ASSOC Response Frame. */ VOID p2pFuncStoreAssocRspIEBuffer ( IN P_ADAPTER_T prAdapter, IN P_SW_RFB_T prSwRfb ) { P_P2P_FSM_INFO_T prP2pFsmInfo = (P_P2P_FSM_INFO_T)NULL; P_P2P_JOIN_INFO_T prJoinInfo = (P_P2P_JOIN_INFO_T)NULL; P_WLAN_ASSOC_RSP_FRAME_T prAssocRspFrame = (P_WLAN_ASSOC_RSP_FRAME_T)NULL; INT_16 i2IELen = 0; do { ASSERT_BREAK((prAdapter != NULL) && (prSwRfb != NULL)); prAssocRspFrame = (P_WLAN_ASSOC_RSP_FRAME_T)prSwRfb->pvHeader; if (prAssocRspFrame->u2FrameCtrl != MAC_FRAME_ASSOC_RSP) { break; } i2IELen = prSwRfb->u2PacketLen - (WLAN_MAC_HEADER_LEN + CAP_INFO_FIELD_LEN + STATUS_CODE_FIELD_LEN + AID_FIELD_LEN); if (i2IELen <= 0) { break; } prP2pFsmInfo = prAdapter->rWifiVar.prP2pFsmInfo; prJoinInfo = &(prP2pFsmInfo->rJoinInfo); prJoinInfo->u4BufLength = (UINT_32)i2IELen; kalMemCopy(prJoinInfo->aucIEBuf, prAssocRspFrame->aucInfoElem, prJoinInfo->u4BufLength); } while (FALSE); return; } /* p2pFuncStoreAssocRspIEBuffer */ /*----------------------------------------------------------------------------*/ /*! * \brief This routine is called to set Packet Filter. * * \param[in] prAdapter Pointer to the Adapter structure. * \param[in] pvSetBuffer Pointer to the buffer that holds the data to be set. * \param[in] u4SetBufferLen The length of the set buffer. * \param[out] pu4SetInfoLen If the call is successful, returns the number of * bytes read from the set buffer. If the call failed * due to invalid length of the set buffer, returns * the amount of storage needed. * * \retval WLAN_STATUS_SUCCESS * \retval WLAN_STATUS_INVALID_LENGTH * \retval WLAN_STATUS_NOT_SUPPORTED * \retval WLAN_STATUS_ADAPTER_NOT_READY */ /*----------------------------------------------------------------------------*/ VOID p2pFuncMgmtFrameRegister ( IN P_ADAPTER_T prAdapter, IN UINT_16 u2FrameType, IN BOOLEAN fgIsRegistered, OUT PUINT_32 pu4P2pPacketFilter ) { UINT_32 u4NewPacketFilter = 0; DEBUGFUNC("p2pFuncMgmtFrameRegister"); do { ASSERT_BREAK(prAdapter != NULL); if (pu4P2pPacketFilter) { u4NewPacketFilter = *pu4P2pPacketFilter; } switch (u2FrameType) { case MAC_FRAME_PROBE_REQ: if (fgIsRegistered) { u4NewPacketFilter |= PARAM_PACKET_FILTER_PROBE_REQ; DBGLOG(P2P, TRACE, ("Open packet filer probe request\n")); } else { u4NewPacketFilter &= ~PARAM_PACKET_FILTER_PROBE_REQ; DBGLOG(P2P, TRACE, ("Close packet filer probe request\n")); } break; case MAC_FRAME_ACTION: if (fgIsRegistered) { u4NewPacketFilter |= PARAM_PACKET_FILTER_ACTION_FRAME; DBGLOG(P2P, TRACE, ("Open packet filer action frame.\n")); } else { u4NewPacketFilter &= ~PARAM_PACKET_FILTER_ACTION_FRAME; DBGLOG(P2P, TRACE, ("Close packet filer action frame.\n")); } break; default: DBGLOG(P2P, TRACE, ("Ask frog to add code for mgmt:%x\n", u2FrameType)); break; } if (pu4P2pPacketFilter) { *pu4P2pPacketFilter = u4NewPacketFilter; } // u4NewPacketFilter |= prAdapter->u4OsPacketFilter; prAdapter->u4OsPacketFilter &= ~PARAM_PACKET_FILTER_P2P_MASK; prAdapter->u4OsPacketFilter |= u4NewPacketFilter; DBGLOG(P2P, TRACE, ("P2P Set PACKET filter:0x%lx\n", prAdapter->u4OsPacketFilter)); wlanSendSetQueryCmd(prAdapter, CMD_ID_SET_RX_FILTER, TRUE, FALSE, FALSE, nicCmdEventSetCommon, nicOidCmdTimeoutCommon, sizeof(UINT_32), (PUINT_8)&prAdapter->u4OsPacketFilter, &u4NewPacketFilter, sizeof(u4NewPacketFilter) ); } while (FALSE); return; } /* p2pFuncMgmtFrameRegister */ VOID p2pFuncUpdateMgmtFrameRegister ( IN P_ADAPTER_T prAdapter, IN UINT_32 u4OsFilter ) { do { prAdapter->rWifiVar.prP2pFsmInfo->u4P2pPacketFilter = u4OsFilter; if ((prAdapter->u4OsPacketFilter & PARAM_PACKET_FILTER_P2P_MASK) ^ u4OsFilter) { prAdapter->u4OsPacketFilter &= ~PARAM_PACKET_FILTER_P2P_MASK; prAdapter->u4OsPacketFilter |= (u4OsFilter & PARAM_PACKET_FILTER_P2P_MASK); wlanSendSetQueryCmd(prAdapter, CMD_ID_SET_RX_FILTER, TRUE, FALSE, FALSE, nicCmdEventSetCommon, nicOidCmdTimeoutCommon, sizeof(UINT_32), (PUINT_8)&prAdapter->u4OsPacketFilter, &u4OsFilter, sizeof(u4OsFilter) ); DBGLOG(P2P, TRACE, ("P2P Set PACKET filter:0x%lx\n", prAdapter->u4OsPacketFilter)); } } while (FALSE); return; } /* p2pFuncUpdateMgmtFrameRegister */ VOID p2pFuncGetStationInfo ( IN P_ADAPTER_T prAdapter, IN PUINT_8 pucMacAddr, OUT P_P2P_STATION_INFO_T prStaInfo ) { do { ASSERT_BREAK((prAdapter != NULL) && (pucMacAddr != NULL) && (prStaInfo != NULL)); prStaInfo->u4InactiveTime = 0; prStaInfo->u4RxBytes = 0; prStaInfo->u4TxBytes = 0; prStaInfo->u4RxPackets = 0; prStaInfo->u4TxPackets = 0; // TODO: } while (FALSE); return; } /* p2pFuncGetStationInfo */ BOOLEAN p2pFuncGetAttriList ( IN P_ADAPTER_T prAdapter, IN UINT_8 ucOuiType, IN PUINT_8 pucIE, IN UINT_16 u2IELength, OUT PPUINT_8 ppucAttriList, OUT PUINT_16 pu2AttriListLen ) { BOOLEAN fgIsAllocMem = FALSE; UINT_8 aucWfaOui[] = VENDOR_OUI_WFA_SPECIFIC; UINT_16 u2Offset = 0; P_IE_P2P_T prIe = (P_IE_P2P_T)NULL; PUINT_8 pucAttriListStart = (PUINT_8)NULL; UINT_16 u2AttriListLen = 0, u2BufferSize = 0; BOOLEAN fgBackupAttributes = FALSE; do { ASSERT_BREAK((prAdapter != NULL) && (pucIE != NULL) && (u2IELength != 0) && (ppucAttriList != NULL) && (pu2AttriListLen != NULL)); if(ppucAttriList) { *ppucAttriList = NULL; } if(pu2AttriListLen) { *pu2AttriListLen = 0; } if (ucOuiType == VENDOR_OUI_TYPE_WPS){ aucWfaOui[0] = 0x00; aucWfaOui[1] = 0x50; aucWfaOui[2] = 0xF2; } else if ((ucOuiType != VENDOR_OUI_TYPE_P2P) #if CFG_SUPPORT_WFD && (ucOuiType != VENDOR_OUI_TYPE_WFD) #endif ) { DBGLOG(P2P, INFO, ("Not supported OUI Type to parsing 0x%x\n", ucOuiType)); break; } IE_FOR_EACH(pucIE, u2IELength, u2Offset) { if (ELEM_ID_VENDOR == IE_ID(pucIE)) { prIe = (P_IE_P2P_T)pucIE; if (prIe->ucLength <= P2P_OUI_TYPE_LEN) { continue; } if ((prIe->aucOui[0] == aucWfaOui[0]) && (prIe->aucOui[1] == aucWfaOui[1]) && (prIe->aucOui[2] == aucWfaOui[2]) && (ucOuiType == prIe->ucOuiType)) { if (!pucAttriListStart) { pucAttriListStart = &prIe->aucP2PAttributes[0]; if (prIe->ucLength > P2P_OUI_TYPE_LEN) { u2AttriListLen = (UINT_16)(prIe->ucLength - P2P_OUI_TYPE_LEN); } else { ASSERT(FALSE); } } else { /* More than 2 attributes. */ UINT_16 u2CopyLen; if (FALSE == fgBackupAttributes) { P_P2P_SPECIFIC_BSS_INFO_T prP2pSpecificBssInfo = prAdapter->rWifiVar.prP2pSpecificBssInfo;; fgBackupAttributes = TRUE; if (ucOuiType == VENDOR_OUI_TYPE_P2P) { kalMemCopy(&prP2pSpecificBssInfo->aucAttributesCache[0], pucAttriListStart, u2AttriListLen); pucAttriListStart = &prP2pSpecificBssInfo->aucAttributesCache[0]; u2BufferSize = P2P_MAXIMUM_ATTRIBUTE_LEN; } else if (ucOuiType == VENDOR_OUI_TYPE_WPS) { kalMemCopy(&prP2pSpecificBssInfo->aucWscAttributesCache[0], pucAttriListStart, u2AttriListLen); pucAttriListStart = &prP2pSpecificBssInfo->aucWscAttributesCache[0]; u2BufferSize = WPS_MAXIMUM_ATTRIBUTES_CACHE_SIZE; } #if CFG_SUPPORT_WFD else if (ucOuiType == VENDOR_OUI_TYPE_WFD) { PUINT_8 pucTmpBuf = (PUINT_8)NULL; pucTmpBuf = (PUINT_8)kalMemAlloc(WPS_MAXIMUM_ATTRIBUTES_CACHE_SIZE, VIR_MEM_TYPE); if (pucTmpBuf != NULL) { fgIsAllocMem = TRUE; } else { /* Can't alloca memory for WFD IE relocate. */ ASSERT(FALSE); break; } kalMemCopy(pucTmpBuf, pucAttriListStart, u2AttriListLen); pucAttriListStart = pucTmpBuf; u2BufferSize = WPS_MAXIMUM_ATTRIBUTES_CACHE_SIZE; } #endif else { fgBackupAttributes = FALSE; } } u2CopyLen = (UINT_16)(prIe->ucLength - P2P_OUI_TYPE_LEN); if ((u2AttriListLen + u2CopyLen) > u2BufferSize) { u2CopyLen = u2BufferSize - u2AttriListLen; DBGLOG(P2P, WARN, ("Length of received P2P attributes > maximum cache size.\n")); } if (u2CopyLen) { kalMemCopy((PUINT_8)((UINT_32)pucAttriListStart + (UINT_32)u2AttriListLen), &prIe->aucP2PAttributes[0], u2CopyLen); u2AttriListLen += u2CopyLen; } } } /* prIe->aucOui */ } /* ELEM_ID_VENDOR */ } /* IE_FOR_EACH */ } while (FALSE); if (pucAttriListStart) { PUINT_8 pucAttribute = pucAttriListStart; DBGLOG(P2P, LOUD, ("Checking Attribute Length.\n")); if (ucOuiType == VENDOR_OUI_TYPE_P2P) { P2P_ATTRI_FOR_EACH(pucAttribute, u2AttriListLen, u2Offset); } else if (ucOuiType == VENDOR_OUI_TYPE_WFD) { } else if (ucOuiType == VENDOR_OUI_TYPE_WPS) { /* Big Endian: WSC, WFD. */ WSC_ATTRI_FOR_EACH(pucAttribute, u2AttriListLen, u2Offset) { DBGLOG(P2P, LOUD, ("Attribute ID:%d, Length:%d.\n", WSC_ATTRI_ID(pucAttribute), WSC_ATTRI_LEN(pucAttribute))); } } else { } ASSERT(u2Offset == u2AttriListLen); *ppucAttriList = pucAttriListStart; *pu2AttriListLen = u2AttriListLen; } else { *ppucAttriList = (PUINT_8)NULL; *pu2AttriListLen = 0; } return fgIsAllocMem; } /* p2pFuncGetAttriList */ P_MSDU_INFO_T p2pFuncProcessP2pProbeRsp ( IN P_ADAPTER_T prAdapter, IN P_MSDU_INFO_T prMgmtTxMsdu ) { P_MSDU_INFO_T prRetMsduInfo = prMgmtTxMsdu; P_WLAN_PROBE_RSP_FRAME_T prProbeRspFrame = (P_WLAN_PROBE_RSP_FRAME_T)NULL; PUINT_8 pucIEBuf = (PUINT_8)NULL; UINT_16 u2Offset = 0, u2IELength = 0, u2ProbeRspHdrLen = 0; BOOLEAN fgIsP2PIE = FALSE, fgIsWSCIE = FALSE; P_BSS_INFO_T prP2pBssInfo = (P_BSS_INFO_T)NULL; UINT_16 u2EstimateSize = 0, u2EstimatedExtraIELen = 0; UINT_32 u4IeArraySize = 0, u4Idx = 0; do { ASSERT_BREAK((prAdapter != NULL) && (prMgmtTxMsdu != NULL)); prP2pBssInfo = &(prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_P2P_INDEX]); //3 Make sure this is probe response frame. prProbeRspFrame = (P_WLAN_PROBE_RSP_FRAME_T)((UINT_32)prMgmtTxMsdu->prPacket + MAC_TX_RESERVED_FIELD); ASSERT_BREAK((prProbeRspFrame->u2FrameCtrl & MASK_FRAME_TYPE) == MAC_FRAME_PROBE_RSP); //3 Get the importent P2P IE. u2ProbeRspHdrLen = (WLAN_MAC_MGMT_HEADER_LEN + TIMESTAMP_FIELD_LEN + BEACON_INTERVAL_FIELD_LEN + CAP_INFO_FIELD_LEN); pucIEBuf = prProbeRspFrame->aucInfoElem; u2IELength = prMgmtTxMsdu->u2FrameLength - u2ProbeRspHdrLen; #if CFG_SUPPORT_WFD prAdapter->prGlueInfo->prP2PInfo->u2VenderIELen = 0; #endif IE_FOR_EACH(pucIEBuf, u2IELength, u2Offset) { switch (IE_ID(pucIEBuf)) { case ELEM_ID_SSID: { COPY_SSID(prP2pBssInfo->aucSSID, prP2pBssInfo->ucSSIDLen, SSID_IE(pucIEBuf)->aucSSID, SSID_IE(pucIEBuf)->ucLength); } break; case ELEM_ID_VENDOR: { UINT_8 ucOuiType = 0; UINT_16 u2SubTypeVersion = 0; #if! CFG_SUPPORT_WFD if (rsnParseCheckForWFAInfoElem(prAdapter, pucIEBuf, &ucOuiType, &u2SubTypeVersion)) { if (ucOuiType == VENDOR_OUI_TYPE_WPS) { kalP2PUpdateWSC_IE(prAdapter->prGlueInfo, 2, pucIEBuf, IE_SIZE(pucIEBuf)); fgIsWSCIE = TRUE; } } else if (p2pFuncParseCheckForP2PInfoElem(prAdapter, pucIEBuf, &ucOuiType)) { if (ucOuiType == VENDOR_OUI_TYPE_P2P) { //2 Note(frog): I use WSC IE buffer for Probe Request to store the P2P IE for Probe Response. kalP2PUpdateWSC_IE(prAdapter->prGlueInfo, 1, pucIEBuf, IE_SIZE(pucIEBuf)); fgIsP2PIE = TRUE; } } else { if((prAdapter->prGlueInfo->prP2PInfo->u2VenderIELen+IE_SIZE(pucIEBuf))<512) { kalMemCopy(prAdapter->prGlueInfo->prP2PInfo->aucVenderIE, pucIEBuf, IE_SIZE(pucIEBuf)); prAdapter->prGlueInfo->prP2PInfo->u2VenderIELen += IE_SIZE(pucIEBuf); } } #else /* Eddie May be WFD */ if (rsnParseCheckForWFAInfoElem(prAdapter, pucIEBuf, &ucOuiType, &u2SubTypeVersion)) { if(ucOuiType == VENDOR_OUI_TYPE_WMM) { break; } } if((prAdapter->prGlueInfo->prP2PInfo->u2VenderIELen+IE_SIZE(pucIEBuf))<1024) { kalMemCopy(prAdapter->prGlueInfo->prP2PInfo->aucVenderIE + prAdapter->prGlueInfo->prP2PInfo->u2VenderIELen , pucIEBuf, IE_SIZE(pucIEBuf)); prAdapter->prGlueInfo->prP2PInfo->u2VenderIELen += IE_SIZE(pucIEBuf); } #endif } break; default: break; } } //3 Check the total size & current frame. u2EstimateSize = WLAN_MAC_MGMT_HEADER_LEN + \ TIMESTAMP_FIELD_LEN + \ BEACON_INTERVAL_FIELD_LEN + \ CAP_INFO_FIELD_LEN + \ (ELEM_HDR_LEN + ELEM_MAX_LEN_SSID) + \ (ELEM_HDR_LEN + ELEM_MAX_LEN_SUP_RATES) + \ (ELEM_HDR_LEN + ELEM_MAX_LEN_DS_PARAMETER_SET); u2EstimatedExtraIELen = 0; u4IeArraySize = sizeof(txProbeRspIETable)/sizeof(APPEND_VAR_IE_ENTRY_T); for (u4Idx = 0; u4Idx < u4IeArraySize; u4Idx++) { if (txProbeRspIETable[u4Idx].u2EstimatedFixedIELen) { u2EstimatedExtraIELen += txProbeRspIETable[u4Idx].u2EstimatedFixedIELen; } else { ASSERT(txProbeRspIETable[u4Idx].pfnCalculateVariableIELen); u2EstimatedExtraIELen += (UINT_16)(txProbeRspIETable[u4Idx].pfnCalculateVariableIELen(prAdapter, NETWORK_TYPE_P2P_INDEX, NULL)); } } if (fgIsWSCIE) { u2EstimatedExtraIELen += kalP2PCalWSC_IELen(prAdapter->prGlueInfo, 2); } if (fgIsP2PIE) { u2EstimatedExtraIELen += kalP2PCalWSC_IELen(prAdapter->prGlueInfo, 1); } #if CFG_SUPPORT_WFD u2EstimatedExtraIELen += prAdapter->prGlueInfo->prP2PInfo->u2VenderIELen; #endif if ((u2EstimateSize += u2EstimatedExtraIELen) > (prRetMsduInfo->u2FrameLength)) { prRetMsduInfo = cnmMgtPktAlloc(prAdapter, u2EstimateSize); if (prRetMsduInfo == NULL) { DBGLOG(P2P, WARN, ("No packet for sending new probe response, use original one\n")); prRetMsduInfo = prMgmtTxMsdu; break; } prRetMsduInfo->ucNetworkType = NETWORK_TYPE_P2P_INDEX; } //3 Compose / Re-compose probe response frame. bssComposeBeaconProbeRespFrameHeaderAndFF( (PUINT_8)((UINT_32)(prRetMsduInfo->prPacket) + MAC_TX_RESERVED_FIELD), prProbeRspFrame->aucDestAddr, prProbeRspFrame->aucSrcAddr, prProbeRspFrame->aucBSSID, prProbeRspFrame->u2BeaconInterval, prProbeRspFrame->u2CapInfo); prRetMsduInfo->u2FrameLength = (WLAN_MAC_MGMT_HEADER_LEN + TIMESTAMP_FIELD_LEN + BEACON_INTERVAL_FIELD_LEN + CAP_INFO_FIELD_LEN); bssBuildBeaconProbeRespFrameCommonIEs(prRetMsduInfo, prP2pBssInfo, prProbeRspFrame->aucDestAddr); for (u4Idx = 0; u4Idx < u4IeArraySize; u4Idx++) { if (txProbeRspIETable[u4Idx].pfnAppendIE) { txProbeRspIETable[u4Idx].pfnAppendIE(prAdapter, prRetMsduInfo); } } if (fgIsWSCIE) { kalP2PGenWSC_IE(prAdapter->prGlueInfo, 2, (PUINT_8)((UINT_32)prRetMsduInfo->prPacket + (UINT_32)prRetMsduInfo->u2FrameLength)); prRetMsduInfo->u2FrameLength += (UINT_16)kalP2PCalWSC_IELen(prAdapter->prGlueInfo, 2); } if (fgIsP2PIE) { kalP2PGenWSC_IE(prAdapter->prGlueInfo, 1, (PUINT_8)((UINT_32)prRetMsduInfo->prPacket + (UINT_32)prRetMsduInfo->u2FrameLength)); prRetMsduInfo->u2FrameLength += (UINT_16)kalP2PCalWSC_IELen(prAdapter->prGlueInfo, 1); } #if CFG_SUPPORT_WFD if(prAdapter->prGlueInfo->prP2PInfo->u2VenderIELen>0) { kalMemCopy((PUINT_8)((UINT_32)prRetMsduInfo->prPacket + (UINT_32)prRetMsduInfo->u2FrameLength), prAdapter->prGlueInfo->prP2PInfo->aucVenderIE, prAdapter->prGlueInfo->prP2PInfo->u2VenderIELen); prRetMsduInfo->u2FrameLength += (UINT_16) prAdapter->prGlueInfo->prP2PInfo->u2VenderIELen; } #endif } while (FALSE); if (prRetMsduInfo != prMgmtTxMsdu) { cnmMgtPktFree(prAdapter, prMgmtTxMsdu); } return prRetMsduInfo; } /* p2pFuncProcessP2pProbeRsp */ #if 0 //LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) UINT_32 p2pFuncCalculateExtra_IELenForBeacon ( IN P_ADAPTER_T prAdapter, IN ENUM_NETWORK_TYPE_INDEX_T eNetTypeIndex, IN P_STA_RECORD_T prStaRec ) { P_P2P_SPECIFIC_BSS_INFO_T prP2pSpeBssInfo = (P_P2P_SPECIFIC_BSS_INFO_T)NULL; UINT_32 u4IELen = 0; do { ASSERT_BREAK((prAdapter != NULL) && (eNetTypeIndex == NETWORK_TYPE_P2P_INDEX)); if (p2pFuncIsAPMode(prAdapter->rWifiVar.prP2pFsmInfo)) { break; } prP2pSpeBssInfo = prAdapter->rWifiVar.prP2pSpecificBssInfo; u4IELen = prP2pSpeBssInfo->u2IELenForBCN; } while (FALSE); return u4IELen; } /* p2pFuncCalculateP2p_IELenForBeacon */ VOID p2pFuncGenerateExtra_IEForBeacon ( IN P_ADAPTER_T prAdapter, IN P_MSDU_INFO_T prMsduInfo ) { P_P2P_SPECIFIC_BSS_INFO_T prP2pSpeBssInfo = (P_P2P_SPECIFIC_BSS_INFO_T)NULL; PUINT_8 pucIEBuf = (PUINT_8)NULL; do { ASSERT_BREAK((prAdapter != NULL) && (prMsduInfo != NULL)); prP2pSpeBssInfo = prAdapter->rWifiVar.prP2pSpecificBssInfo; if (p2pFuncIsAPMode(prAdapter->rWifiVar.prP2pFsmInfo)) { break; } pucIEBuf = (PUINT_8)((UINT_32)prMsduInfo->prPacket + (UINT_32)prMsduInfo->u2FrameLength); kalMemCopy(pucIEBuf, prP2pSpeBssInfo->aucBeaconIECache, prP2pSpeBssInfo->u2IELenForBCN); prMsduInfo->u2FrameLength += prP2pSpeBssInfo->u2IELenForBCN; } while (FALSE); return; } /* p2pFuncGenerateExtra_IEForBeacon */ #else UINT_32 p2pFuncCalculateP2p_IELenForBeacon ( IN P_ADAPTER_T prAdapter, IN ENUM_NETWORK_TYPE_INDEX_T eNetTypeIndex, IN P_STA_RECORD_T prStaRec ) { P_P2P_SPECIFIC_BSS_INFO_T prP2pSpeBssInfo = (P_P2P_SPECIFIC_BSS_INFO_T)NULL; UINT_32 u4IELen = 0; do { ASSERT_BREAK((prAdapter != NULL) && (eNetTypeIndex == NETWORK_TYPE_P2P_INDEX)); if (!prAdapter->fgIsP2PRegistered) { break; } if (p2pFuncIsAPMode(prAdapter->rWifiVar.prP2pFsmInfo)) { break; } prP2pSpeBssInfo = prAdapter->rWifiVar.prP2pSpecificBssInfo; u4IELen = prP2pSpeBssInfo->u2AttributeLen; } while (FALSE); return u4IELen; } /* p2pFuncCalculateP2p_IELenForBeacon */ VOID p2pFuncGenerateP2p_IEForBeacon ( IN P_ADAPTER_T prAdapter, IN P_MSDU_INFO_T prMsduInfo ) { P_P2P_SPECIFIC_BSS_INFO_T prP2pSpeBssInfo = (P_P2P_SPECIFIC_BSS_INFO_T)NULL; PUINT_8 pucIEBuf = (PUINT_8)NULL; do { ASSERT_BREAK((prAdapter != NULL) && (prMsduInfo != NULL)); if (!prAdapter->fgIsP2PRegistered) { break; } prP2pSpeBssInfo = prAdapter->rWifiVar.prP2pSpecificBssInfo; if (p2pFuncIsAPMode(prAdapter->rWifiVar.prP2pFsmInfo)) { break; } pucIEBuf = (PUINT_8)((UINT_32)prMsduInfo->prPacket + (UINT_32)prMsduInfo->u2FrameLength); kalMemCopy(pucIEBuf, prP2pSpeBssInfo->aucAttributesCache, prP2pSpeBssInfo->u2AttributeLen); prMsduInfo->u2FrameLength += prP2pSpeBssInfo->u2AttributeLen; } while (FALSE); return; } /* p2pFuncGenerateP2p_IEForBeacon */ UINT_32 p2pFuncCalculateWSC_IELenForBeacon ( IN P_ADAPTER_T prAdapter, IN ENUM_NETWORK_TYPE_INDEX_T eNetTypeIndex, IN P_STA_RECORD_T prStaRec ) { if (eNetTypeIndex != NETWORK_TYPE_P2P_INDEX) { return 0; } return kalP2PCalWSC_IELen(prAdapter->prGlueInfo, 0); } /* p2pFuncCalculateP2p_IELenForBeacon */ VOID p2pFuncGenerateWSC_IEForBeacon ( IN P_ADAPTER_T prAdapter, IN P_MSDU_INFO_T prMsduInfo ) { PUINT_8 pucBuffer; UINT_16 u2IELen = 0; ASSERT(prAdapter); ASSERT(prMsduInfo); if (prMsduInfo->ucNetworkType != NETWORK_TYPE_P2P_INDEX) { return; } u2IELen = (UINT_16)kalP2PCalWSC_IELen(prAdapter->prGlueInfo, 0); pucBuffer = (PUINT_8)((UINT_32)prMsduInfo->prPacket + (UINT_32)prMsduInfo->u2FrameLength); ASSERT(pucBuffer); // TODO: Check P2P FSM State. kalP2PGenWSC_IE(prAdapter->prGlueInfo, 0, pucBuffer); prMsduInfo->u2FrameLength += u2IELen; return; } /* p2pFuncGenerateP2p_IEForBeacon */ #endif /*----------------------------------------------------------------------------*/ /*! * @brief This function is used to calculate P2P IE length for Beacon frame. * * @param[in] eNetTypeIndex Specify which network * @param[in] prStaRec Pointer to the STA_RECORD_T * * @return The length of P2P IE added */ /*----------------------------------------------------------------------------*/ UINT_32 p2pFuncCalculateP2p_IELenForAssocRsp ( IN P_ADAPTER_T prAdapter, IN ENUM_NETWORK_TYPE_INDEX_T eNetTypeIndex, IN P_STA_RECORD_T prStaRec ) { if (eNetTypeIndex != NETWORK_TYPE_P2P_INDEX) { return 0; } return p2pFuncCalculateP2P_IELen(prAdapter, eNetTypeIndex, prStaRec, txAssocRspAttributesTable, sizeof(txAssocRspAttributesTable)/sizeof(APPEND_VAR_ATTRI_ENTRY_T)); } /* p2pFuncCalculateP2p_IELenForAssocRsp */ /*----------------------------------------------------------------------------*/ /*! * @brief This function is used to generate P2P IE for Beacon frame. * * @param[in] prMsduInfo Pointer to the composed MSDU_INFO_T. * * @return none */ /*----------------------------------------------------------------------------*/ VOID p2pFuncGenerateP2p_IEForAssocRsp ( IN P_ADAPTER_T prAdapter, IN P_MSDU_INFO_T prMsduInfo ) { P_P2P_FSM_INFO_T prP2pFsmInfo = (P_P2P_FSM_INFO_T)NULL; P_STA_RECORD_T prStaRec = (P_STA_RECORD_T)NULL; do { ASSERT_BREAK((prAdapter != NULL) && (prMsduInfo != NULL)); prP2pFsmInfo = prAdapter->rWifiVar.prP2pFsmInfo; prStaRec = cnmGetStaRecByIndex(prAdapter, prMsduInfo->ucStaRecIndex); if (IS_STA_P2P_TYPE(prStaRec)) { DBGLOG(P2P, TRACE, ("Generate NULL P2P IE for Assoc Rsp.\n")); p2pFuncGenerateP2P_IE(prAdapter, TRUE, &prMsduInfo->u2FrameLength, prMsduInfo->prPacket, 1500, txAssocRspAttributesTable, sizeof(txAssocRspAttributesTable)/sizeof(APPEND_VAR_ATTRI_ENTRY_T)); } else { DBGLOG(P2P, TRACE, ("Legacy device, no P2P IE.\n")); } } while (FALSE); return; } /* p2pFuncGenerateP2p_IEForAssocRsp */ UINT_32 p2pFuncCalculateWSC_IELenForAssocRsp ( IN P_ADAPTER_T prAdapter, IN ENUM_NETWORK_TYPE_INDEX_T eNetTypeIndex, IN P_STA_RECORD_T prStaRec ) { DBGLOG(P2P, TRACE, ("p2pFuncCalculateWSC_IELenForAssocRsp\n")); if (eNetTypeIndex != NETWORK_TYPE_P2P_INDEX) { return 0; } return kalP2PCalWSC_IELen(prAdapter->prGlueInfo, 0); } /* p2pFuncCalculateP2p_IELenForAssocRsp */ VOID p2pFuncGenerateWSC_IEForAssocRsp ( IN P_ADAPTER_T prAdapter, IN P_MSDU_INFO_T prMsduInfo ) { PUINT_8 pucBuffer; UINT_16 u2IELen = 0; ASSERT(prAdapter); ASSERT(prMsduInfo); if (prMsduInfo->ucNetworkType != NETWORK_TYPE_P2P_INDEX) { return; } DBGLOG(P2P, TRACE, ("p2pFuncGenerateWSC_IEForAssocRsp\n")); u2IELen = (UINT_16)kalP2PCalWSC_IELen(prAdapter->prGlueInfo, 0); pucBuffer = (PUINT_8)((UINT_32)prMsduInfo->prPacket + (UINT_32)prMsduInfo->u2FrameLength); ASSERT(pucBuffer); // TODO: Check P2P FSM State. kalP2PGenWSC_IE(prAdapter->prGlueInfo, 0, pucBuffer); prMsduInfo->u2FrameLength += u2IELen; return; } /* p2pFuncGenerateP2p_IEForAssocRsp */ UINT_32 p2pFuncCalculateP2P_IELen ( IN P_ADAPTER_T prAdapter, IN ENUM_NETWORK_TYPE_INDEX_T eNetTypeIndex, IN P_STA_RECORD_T prStaRec, IN APPEND_VAR_ATTRI_ENTRY_T arAppendAttriTable[], IN UINT_32 u4AttriTableSize ) { UINT_32 u4OverallAttriLen, u4Dummy; UINT_16 u2EstimatedFixedAttriLen; UINT_32 i; /* Overall length of all Attributes */ u4OverallAttriLen = 0; for (i = 0; i < u4AttriTableSize; i++) { u2EstimatedFixedAttriLen = arAppendAttriTable[i].u2EstimatedFixedAttriLen; if (u2EstimatedFixedAttriLen) { u4OverallAttriLen += u2EstimatedFixedAttriLen; } else { ASSERT(arAppendAttriTable[i].pfnCalculateVariableAttriLen); u4OverallAttriLen += arAppendAttriTable[i].pfnCalculateVariableAttriLen(prAdapter, prStaRec); } } u4Dummy = u4OverallAttriLen; u4OverallAttriLen += P2P_IE_OUI_HDR; for (;(u4Dummy > P2P_MAXIMUM_ATTRIBUTE_LEN);) { u4OverallAttriLen += P2P_IE_OUI_HDR; u4Dummy -= P2P_MAXIMUM_ATTRIBUTE_LEN; } return u4OverallAttriLen; } /* p2pFuncCalculateP2P_IELen */ VOID p2pFuncGenerateP2P_IE ( IN P_ADAPTER_T prAdapter, IN BOOLEAN fgIsAssocFrame, IN PUINT_16 pu2Offset, IN PUINT_8 pucBuf, IN UINT_16 u2BufSize, IN APPEND_VAR_ATTRI_ENTRY_T arAppendAttriTable[], IN UINT_32 u4AttriTableSize ) { PUINT_8 pucBuffer = (PUINT_8)NULL; P_IE_P2P_T prIeP2P = (P_IE_P2P_T)NULL; UINT_32 u4OverallAttriLen; UINT_32 u4AttriLen; UINT_8 aucWfaOui[] = VENDOR_OUI_WFA_SPECIFIC; UINT_8 aucTempBuffer[P2P_MAXIMUM_ATTRIBUTE_LEN]; UINT_32 i; do { ASSERT_BREAK((prAdapter != NULL) && (pucBuf != NULL)); pucBuffer = (PUINT_8)((UINT_32)pucBuf + (*pu2Offset)); ASSERT_BREAK(pucBuffer != NULL); /* Check buffer length is still enough. */ ASSERT_BREAK((u2BufSize - (*pu2Offset)) >= P2P_IE_OUI_HDR); prIeP2P = (P_IE_P2P_T)pucBuffer; prIeP2P->ucId = ELEM_ID_P2P; prIeP2P->aucOui[0] = aucWfaOui[0]; prIeP2P->aucOui[1] = aucWfaOui[1]; prIeP2P->aucOui[2] = aucWfaOui[2]; prIeP2P->ucOuiType = VENDOR_OUI_TYPE_P2P; (*pu2Offset) += P2P_IE_OUI_HDR; /* Overall length of all Attributes */ u4OverallAttriLen = 0; for (i = 0; i < u4AttriTableSize; i++) { if (arAppendAttriTable[i].pfnAppendAttri) { u4AttriLen = arAppendAttriTable[i].pfnAppendAttri(prAdapter, fgIsAssocFrame, pu2Offset, pucBuf, u2BufSize); u4OverallAttriLen += u4AttriLen; if (u4OverallAttriLen > P2P_MAXIMUM_ATTRIBUTE_LEN) { u4OverallAttriLen -= P2P_MAXIMUM_ATTRIBUTE_LEN; prIeP2P->ucLength = (VENDOR_OUI_TYPE_LEN + P2P_MAXIMUM_ATTRIBUTE_LEN); pucBuffer = (PUINT_8)((UINT_32)prIeP2P + (VENDOR_OUI_TYPE_LEN + P2P_MAXIMUM_ATTRIBUTE_LEN)); prIeP2P = (P_IE_P2P_T)((UINT_32)prIeP2P + (ELEM_HDR_LEN + (VENDOR_OUI_TYPE_LEN + P2P_MAXIMUM_ATTRIBUTE_LEN))); kalMemCopy(aucTempBuffer, pucBuffer, u4OverallAttriLen); prIeP2P->ucId = ELEM_ID_P2P; prIeP2P->aucOui[0] = aucWfaOui[0]; prIeP2P->aucOui[1] = aucWfaOui[1]; prIeP2P->aucOui[2] = aucWfaOui[2]; prIeP2P->ucOuiType = VENDOR_OUI_TYPE_P2P; kalMemCopy(prIeP2P->aucP2PAttributes, aucTempBuffer, u4OverallAttriLen); (*pu2Offset) += P2P_IE_OUI_HDR; } } } prIeP2P->ucLength = (UINT_8)(VENDOR_OUI_TYPE_LEN + u4OverallAttriLen); } while (FALSE); return; } /* p2pFuncGenerateP2P_IE */ UINT_32 p2pFuncAppendAttriStatusForAssocRsp ( IN P_ADAPTER_T prAdapter, IN BOOLEAN fgIsAssocFrame, IN PUINT_16 pu2Offset, IN PUINT_8 pucBuf, IN UINT_16 u2BufSize ) { PUINT_8 pucBuffer; P_P2P_ATTRI_STATUS_T prAttriStatus; P_P2P_CONNECTION_SETTINGS_T prP2pConnSettings = (P_P2P_CONNECTION_SETTINGS_T)NULL; UINT_32 u4AttriLen = 0; ASSERT(prAdapter); ASSERT(pucBuf); prP2pConnSettings = prAdapter->rWifiVar.prP2PConnSettings; if (fgIsAssocFrame) { return u4AttriLen; } // TODO: For assoc request P2P IE check in driver & return status in P2P IE. pucBuffer = (PUINT_8)((UINT_32)pucBuf + (UINT_32)(*pu2Offset)); ASSERT(pucBuffer); prAttriStatus = (P_P2P_ATTRI_STATUS_T)pucBuffer; ASSERT(u2BufSize >= ((*pu2Offset) + (UINT_16)u4AttriLen)); prAttriStatus->ucId = P2P_ATTRI_ID_STATUS; WLAN_SET_FIELD_16(&prAttriStatus->u2Length, P2P_ATTRI_MAX_LEN_STATUS); prAttriStatus->ucStatusCode = P2P_STATUS_FAIL_PREVIOUS_PROTOCOL_ERR; u4AttriLen = (P2P_ATTRI_HDR_LEN + P2P_ATTRI_MAX_LEN_STATUS); (*pu2Offset) += (UINT_16)u4AttriLen; return u4AttriLen; } /* p2pFuncAppendAttriStatusForAssocRsp */ UINT_32 p2pFuncAppendAttriExtListenTiming ( IN P_ADAPTER_T prAdapter, IN BOOLEAN fgIsAssocFrame, IN PUINT_16 pu2Offset, IN PUINT_8 pucBuf, IN UINT_16 u2BufSize ) { UINT_32 u4AttriLen = 0; P_P2P_ATTRI_EXT_LISTEN_TIMING_T prP2pExtListenTiming = (P_P2P_ATTRI_EXT_LISTEN_TIMING_T)NULL; P_P2P_SPECIFIC_BSS_INFO_T prP2pSpecificBssInfo = (P_P2P_SPECIFIC_BSS_INFO_T)NULL; PUINT_8 pucBuffer = NULL; ASSERT(prAdapter); ASSERT(pucBuf); if (fgIsAssocFrame) { return u4AttriLen; } // TODO: For extend listen timing. prP2pSpecificBssInfo = prAdapter->rWifiVar.prP2pSpecificBssInfo; u4AttriLen = (P2P_ATTRI_HDR_LEN + P2P_ATTRI_MAX_LEN_EXT_LISTEN_TIMING); ASSERT(u2BufSize >= ((*pu2Offset) + (UINT_16)u4AttriLen)); pucBuffer = (PUINT_8)((UINT_32)pucBuf + (UINT_32)(*pu2Offset)); ASSERT(pucBuffer); prP2pExtListenTiming = (P_P2P_ATTRI_EXT_LISTEN_TIMING_T)pucBuffer; prP2pExtListenTiming->ucId = P2P_ATTRI_ID_EXT_LISTEN_TIMING; WLAN_SET_FIELD_16(&prP2pExtListenTiming->u2Length, P2P_ATTRI_MAX_LEN_EXT_LISTEN_TIMING); WLAN_SET_FIELD_16(&prP2pExtListenTiming->u2AvailInterval, prP2pSpecificBssInfo->u2AvailabilityInterval); WLAN_SET_FIELD_16(&prP2pExtListenTiming->u2AvailPeriod, prP2pSpecificBssInfo->u2AvailabilityPeriod); (*pu2Offset) += (UINT_16)u4AttriLen; return u4AttriLen; } /* p2pFuncAppendAttriExtListenTiming */ P_IE_HDR_T p2pFuncGetSpecIE ( IN P_ADAPTER_T prAdapter, IN PUINT_8 pucIEBuf, IN UINT_16 u2BufferLen, IN UINT_8 ucElemID, IN PBOOLEAN pfgIsMore ) { P_IE_HDR_T prTargetIE = (P_IE_HDR_T)NULL; PUINT_8 pucIE = (PUINT_8)NULL; UINT_16 u2Offset = 0; if (pfgIsMore) { *pfgIsMore = FALSE; } do { ASSERT_BREAK((prAdapter != NULL) && (pucIEBuf != NULL)); pucIE = pucIEBuf; IE_FOR_EACH(pucIE, u2BufferLen, u2Offset) { if (IE_ID(pucIE) == ucElemID) { if ((prTargetIE) && (pfgIsMore)) { *pfgIsMore = TRUE; break; } else { prTargetIE = (P_IE_HDR_T)pucIE; if (pfgIsMore == NULL) { break; } } } } } while (FALSE); return prTargetIE; } /* p2pFuncGetSpecIE */ P_ATTRIBUTE_HDR_T p2pFuncGetSpecAttri ( IN P_ADAPTER_T prAdapter, IN UINT_8 ucOuiType, IN PUINT_8 pucIEBuf, IN UINT_16 u2BufferLen, IN UINT_16 u2AttriID ) { P_IE_P2P_T prP2pIE = (P_IE_P2P_T)NULL; P_ATTRIBUTE_HDR_T prTargetAttri = (P_ATTRIBUTE_HDR_T)NULL; BOOLEAN fgIsMore = FALSE; PUINT_8 pucIE = (PUINT_8)NULL, pucAttri = (PUINT_8)NULL; UINT_16 u2OffsetAttri = 0; UINT_16 u2BufferLenLeft = 0; UINT_8 aucWfaOui[] = VENDOR_OUI_WFA_SPECIFIC; DBGLOG(P2P, INFO, ("Check AssocReq Oui type %u attri %u for len %u\n",ucOuiType, u2AttriID, u2BufferLen)); do { ASSERT_BREAK((prAdapter != NULL) && (pucIEBuf != NULL)); u2BufferLenLeft = u2BufferLen; pucIE = pucIEBuf; do { fgIsMore = FALSE; prP2pIE = (P_IE_P2P_T)p2pFuncGetSpecIE(prAdapter, pucIE, u2BufferLenLeft, ELEM_ID_VENDOR, &fgIsMore); if (prP2pIE) { ASSERT(prP2pIE>pucIE); u2BufferLenLeft = u2BufferLen - (UINT_16)( ((UINT_32)prP2pIE) - ((UINT_32)pucIEBuf)); DBGLOG(P2P, INFO, ("Find vendor id %u len %u oui %u more %u LeftLen %u\n", IE_ID(prP2pIE), IE_LEN(prP2pIE), prP2pIE->ucOuiType, fgIsMore, u2BufferLenLeft)); if(IE_LEN(prP2pIE) > P2P_OUI_TYPE_LEN) { if (prP2pIE->ucOuiType == ucOuiType) { switch (ucOuiType) { case VENDOR_OUI_TYPE_WPS: aucWfaOui[0] = 0x00; aucWfaOui[1] = 0x50; aucWfaOui[2] = 0xF2; break; case VENDOR_OUI_TYPE_P2P: break; case VENDOR_OUI_TYPE_WPA: case VENDOR_OUI_TYPE_WMM: case VENDOR_OUI_TYPE_WFD: default: break; } if ((prP2pIE->aucOui[0] == aucWfaOui[0]) && (prP2pIE->aucOui[1] == aucWfaOui[1]) && (prP2pIE->aucOui[2] == aucWfaOui[2]) ) { u2OffsetAttri = 0; pucAttri = prP2pIE->aucP2PAttributes; if (ucOuiType == VENDOR_OUI_TYPE_WPS) { WSC_ATTRI_FOR_EACH(pucAttri, (IE_LEN(prP2pIE) - P2P_OUI_TYPE_LEN), u2OffsetAttri) { //LOG_FUNC("WSC: attri id=%u len=%u\n",WSC_ATTRI_ID(pucAttri), WSC_ATTRI_LEN(pucAttri)); if (WSC_ATTRI_ID(pucAttri) == u2AttriID) { prTargetAttri = (P_ATTRIBUTE_HDR_T)pucAttri; break; } } } else if (ucOuiType == VENDOR_OUI_TYPE_P2P) { P2P_ATTRI_FOR_EACH(pucAttri, (IE_LEN(prP2pIE) - P2P_OUI_TYPE_LEN), u2OffsetAttri) { //LOG_FUNC("P2P: attri id=%u len=%u\n",ATTRI_ID(pucAttri), ATTRI_LEN(pucAttri)); if (ATTRI_ID(pucAttri) == (UINT_8)u2AttriID) { prTargetAttri = (P_ATTRIBUTE_HDR_T)pucAttri; break; } } } #if CFG_SUPPORT_WFD else if (ucOuiType == VENDOR_OUI_TYPE_WFD) { WFD_ATTRI_FOR_EACH(pucAttri, (IE_LEN(prP2pIE) - P2P_OUI_TYPE_LEN), u2OffsetAttri) { //DBGLOG(P2P, INFO, ("WFD: attri id=%u len=%u\n",WFD_ATTRI_ID(pucAttri), WFD_ATTRI_LEN(pucAttri))); if (ATTRI_ID(pucAttri) == (UINT_8)u2AttriID) { prTargetAttri = (P_ATTRIBUTE_HDR_T)pucAttri; break; } } } #endif else { // Possible or else. } } } /* ucOuiType */ } /* P2P_OUI_TYPE_LEN */ pucIE = (PUINT_8)(((UINT_32)prP2pIE) + IE_SIZE(prP2pIE)); } /* prP2pIE */ } while (prP2pIE && fgIsMore && u2BufferLenLeft); } while (FALSE); return prTargetAttri; } /* p2pFuncGetSpecAttri */ WLAN_STATUS p2pFuncGenerateBeaconProbeRsp ( IN P_ADAPTER_T prAdapter, IN P_BSS_INFO_T prBssInfo, IN P_MSDU_INFO_T prMsduInfo, IN BOOLEAN fgIsProbeRsp ) { WLAN_STATUS rWlanStatus = WLAN_STATUS_SUCCESS; P_WLAN_BEACON_FRAME_T prBcnFrame = (P_WLAN_BEACON_FRAME_T)NULL; // P_APPEND_VAR_IE_ENTRY_T prAppendIeTable = (P_APPEND_VAR_IE_ENTRY_T)NULL; do { ASSERT_BREAK((prAdapter != NULL) && (prBssInfo != NULL) && (prMsduInfo != NULL)); // txBcnIETable // txProbeRspIETable prBcnFrame = (P_WLAN_BEACON_FRAME_T)prMsduInfo->prPacket; return nicUpdateBeaconIETemplate(prAdapter, IE_UPD_METHOD_UPDATE_ALL, NETWORK_TYPE_P2P_INDEX, prBssInfo->u2CapInfo, (PUINT_8)prBcnFrame->aucInfoElem, prMsduInfo->u2FrameLength - OFFSET_OF(WLAN_BEACON_FRAME_T, aucInfoElem)); } while (FALSE); return rWlanStatus; } /* p2pFuncGenerateBeaconProbeRsp */ WLAN_STATUS p2pFuncComposeBeaconProbeRspTemplate ( IN P_ADAPTER_T prAdapter, IN PUINT_8 pucBcnBuffer, IN UINT_32 u4BcnBufLen, IN BOOLEAN fgIsProbeRsp, IN P_P2P_PROBE_RSP_UPDATE_INFO_T prP2pProbeRspInfo, IN BOOLEAN fgSynToFW ) { WLAN_STATUS rWlanStatus = WLAN_STATUS_SUCCESS; P_MSDU_INFO_T prMsduInfo = (P_MSDU_INFO_T)NULL; P_WLAN_MAC_HEADER_T prWlanBcnFrame = (P_WLAN_MAC_HEADER_T)NULL; P_BSS_INFO_T prP2pBssInfo = (P_BSS_INFO_T)NULL; PUINT_8 pucBuffer = (PUINT_8)NULL; do { ASSERT_BREAK((prAdapter != NULL) && (pucBcnBuffer != NULL)); prWlanBcnFrame = (P_WLAN_MAC_HEADER_T)pucBcnBuffer; if ((prWlanBcnFrame->u2FrameCtrl != MAC_FRAME_BEACON) && (!fgIsProbeRsp)) { rWlanStatus = WLAN_STATUS_INVALID_DATA; break; } else if (prWlanBcnFrame->u2FrameCtrl != MAC_FRAME_PROBE_RSP) { rWlanStatus = WLAN_STATUS_INVALID_DATA; break; } if (fgIsProbeRsp) { ASSERT_BREAK(prP2pProbeRspInfo != NULL); if (!prP2pProbeRspInfo->prProbeRspMsduTemplate) { cnmMgtPktFree(prAdapter, prP2pProbeRspInfo->prProbeRspMsduTemplate); } prP2pProbeRspInfo->prProbeRspMsduTemplate = cnmMgtPktAlloc(prAdapter, u4BcnBufLen); prMsduInfo = prP2pProbeRspInfo->prProbeRspMsduTemplate; prMsduInfo->eSrc = TX_PACKET_MGMT; prMsduInfo->ucStaRecIndex = 0xFF; prMsduInfo->ucNetworkType = NETWORK_TYPE_P2P_INDEX; } else { prP2pBssInfo = &(prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_P2P_INDEX]); prMsduInfo = prP2pBssInfo->prBeacon; if (prMsduInfo == NULL) { rWlanStatus = WLAN_STATUS_FAILURE; break; } if (u4BcnBufLen > (OFFSET_OF(WLAN_BEACON_FRAME_T, aucInfoElem[0]) + MAX_IE_LENGTH)) { /* Unexpected error, buffer overflow. */ ASSERT(FALSE); break; } } pucBuffer = (PUINT_8)((UINT_32)(prMsduInfo->prPacket) + MAC_TX_RESERVED_FIELD); kalMemCopy(pucBuffer, pucBcnBuffer, u4BcnBufLen); prMsduInfo->fgIs802_11 = TRUE; prMsduInfo->u2FrameLength = (UINT_16)u4BcnBufLen; if (fgSynToFW) { rWlanStatus = p2pFuncGenerateBeaconProbeRsp(prAdapter, prP2pBssInfo, prMsduInfo, fgIsProbeRsp); } } while (FALSE); return rWlanStatus; } /* p2pFuncComposeBeaconTemplate */ #if CFG_SUPPORT_WFD WLAN_STATUS wfdAdjustResource( IN P_ADAPTER_T prAdapter, IN BOOLEAN fgEnable ) { #if 1 /* The API shall be called in tx_thread */ P_QUE_MGT_T prQM = &prAdapter->rQM; DBGLOG(P2P, INFO, ("wfdAdjustResource %d\n", fgEnable)); if(fgEnable) { prQM->au4MinReservedTcResource[TC2_INDEX] = QM_GUARANTEED_TC2_RESOURCE; if(QM_GUARANTEED_TC0_RESOURCE>2) { prQM->au4GuaranteedTcResource[TC0_INDEX] = QM_GUARANTEED_TC0_RESOURCE - 2; prQM->au4GuaranteedTcResource[TC2_INDEX] += 2; } if(QM_GUARANTEED_TC1_RESOURCE>2) { prQM->au4GuaranteedTcResource[TC1_INDEX] = QM_GUARANTEED_TC1_RESOURCE - 2; prQM->au4GuaranteedTcResource[TC2_INDEX] += 2; } } else { prQM->au4MinReservedTcResource[TC2_INDEX] = QM_MIN_RESERVED_TC2_RESOURCE; prQM->au4GuaranteedTcResource[TC0_INDEX] = QM_GUARANTEED_TC0_RESOURCE; prQM->au4GuaranteedTcResource[TC1_INDEX] = QM_GUARANTEED_TC1_RESOURCE; prQM->au4GuaranteedTcResource[TC2_INDEX] = QM_GUARANTEED_TC2_RESOURCE; } #endif return WLAN_STATUS_SUCCESS; } WLAN_STATUS wfdAdjustThread( IN P_ADAPTER_T prAdapter, IN BOOLEAN fgEnable ) { #define WFD_TX_THREAD_PRIORITY 70 DBGLOG(P2P, INFO, ("wfdAdjustResource %d\n", fgEnable)); if(fgEnable) { #ifdef LINUX /* TODO the change schedule API shall be provided by OS glue layer */ /* Or the API shall be put in os glue layer */ struct sched_param param = { .sched_priority = WFD_TX_THREAD_PRIORITY }; sched_setscheduler(prAdapter->prGlueInfo->main_thread , SCHED_RR, &param); #endif } else { #ifdef LINUX /* TODO the change schedule API shall be provided by OS glue layer */ struct sched_param param = { .sched_priority = 0 }; sched_setscheduler(prAdapter->prGlueInfo->main_thread , SCHED_NORMAL, &param); #endif } return WLAN_STATUS_SUCCESS; } #endif /* CFG_SUPPORT_WFD */ WLAN_STATUS wfdChangeMediaState( IN P_ADAPTER_T prAdapter, IN ENUM_NETWORK_TYPE_INDEX_T eNetworkTypeIdx, ENUM_PARAM_MEDIA_STATE_T eConnectionState) { #if CFG_SUPPORT_WFD P_WFD_CFG_SETTINGS_T prWfdCfgSettings = (P_WFD_CFG_SETTINGS_T)NULL; prWfdCfgSettings = &prAdapter->rWifiVar.prP2pFsmInfo->rWfdConfigureSettings; if ((prWfdCfgSettings->ucWfdEnable) && ((prWfdCfgSettings->u4WfdFlag & WFD_FLAGS_DEV_INFO_VALID))) { if(prAdapter->rWifiVar.arBssInfo[NETWORK_TYPE_P2P_INDEX].eConnectionState == PARAM_MEDIA_STATE_CONNECTED ) { wfdAdjustResource(prAdapter, TRUE); wfdAdjustThread(prAdapter, TRUE); } else { wfdAdjustResource(prAdapter, FALSE); wfdAdjustThread(prAdapter, FALSE); } } #endif return WLAN_STATUS_SUCCESS; }
gpl-2.0
RisingFog/dolphin-avsync
Source/Core/Core/ActionReplay.cpp
3
23755
// Copyright 2013 Dolphin Emulator Project // Licensed under GPLv2 // Refer to the license.txt file included. // ----------------------------------------------------------------------------------------- // Partial Action Replay code system implementation. // Will never be able to support some AR codes - specifically those that patch the running // Action Replay engine itself - yes they do exist!!! // Action Replay actually is a small virtual machine with a limited number of commands. // It probably is Turing complete - but what does that matter when AR codes can write // actual PowerPC code... // ----------------------------------------------------------------------------------------- // ------------------------------------------------------------------------------------------------------------- // Code Types: // (Unconditional) Normal Codes (0): this one has subtypes inside // (Conditional) Normal Codes (1 - 7): these just compare values and set the line skip info // Zero Codes: any code with no address. These codes are used to do special operations like memory copy, etc // ------------------------------------------------------------------------------------------------------------- #include <string> #include <vector> #include "Common/CommonTypes.h" #include "Common/StringUtil.h" #include "Common/Logging/LogManager.h" #include "Core/ActionReplay.h" #include "Core/ARDecrypt.h" #include "Core/ConfigManager.h" #include "Core/Core.h" #include "Core/HW/Memmap.h" namespace ActionReplay { enum { // Zero Code Types ZCODE_END = 0x00, ZCODE_NORM = 0x02, ZCODE_ROW = 0x03, ZCODE_04 = 0x04, // Conditional Codes CONDTIONAL_EQUAL = 0x01, CONDTIONAL_NOT_EQUAL = 0x02, CONDTIONAL_LESS_THAN_SIGNED = 0x03, CONDTIONAL_GREATER_THAN_SIGNED = 0x04, CONDTIONAL_LESS_THAN_UNSIGNED = 0x05, CONDTIONAL_GREATER_THAN_UNSIGNED = 0x06, CONDTIONAL_AND = 0x07, // bitwise AND // Conditional Line Counts CONDTIONAL_ONE_LINE = 0x00, CONDTIONAL_TWO_LINES = 0x01, CONDTIONAL_ALL_LINES_UNTIL = 0x02, CONDTIONAL_ALL_LINES = 0x03, // Data Types DATATYPE_8BIT = 0x00, DATATYPE_16BIT = 0x01, DATATYPE_32BIT = 0x02, DATATYPE_32BIT_FLOAT = 0x03, // Normal Code 0 Subtypes SUB_RAM_WRITE = 0x00, SUB_WRITE_POINTER = 0x01, SUB_ADD_CODE = 0x02, SUB_MASTER_CODE = 0x03, }; // pointer to the code currently being run, (used by log messages that include the code name) static ARCode const* current_code = nullptr; static bool b_RanOnce = false; static std::vector<ARCode> arCodes; static std::vector<ARCode> activeCodes; static bool logSelf = false; static std::vector<std::string> arLog; struct ARAddr { union { u32 address; struct { u32 gcaddr : 25; u32 size : 2; u32 type : 3; u32 subtype : 2; }; }; ARAddr(const u32 addr) : address(addr) {} u32 GCAddress() const { return gcaddr | 0x80000000; } operator u32() const { return address; } }; // ---------------------- // AR Remote Functions void LoadCodes(const IniFile& globalIni, const IniFile& localIni, bool forceLoad) { // Parses the Action Replay section of a game ini file. if (!SConfig::GetInstance().m_LocalCoreStartupParameter.bEnableCheats && !forceLoad) return; arCodes.clear(); std::vector<std::string> enabledLines; std::set<std::string> enabledNames; localIni.GetLines("ActionReplay_Enabled", &enabledLines); for (const std::string& line : enabledLines) { if (line.size() != 0 && line[0] == '$') { std::string name = line.substr(1, line.size() - 1); enabledNames.insert(name); } } const IniFile* inis[2] = {&globalIni, &localIni}; for (const IniFile* ini : inis) { std::vector<std::string> lines; std::vector<std::string> encryptedLines; ARCode currentCode; ini->GetLines("ActionReplay", &lines); for (const std::string& line : lines) { if (line.empty()) { continue; } std::vector<std::string> pieces; // Check if the line is a name of the code if (line[0] == '$') { if (currentCode.ops.size()) { arCodes.push_back(currentCode); currentCode.ops.clear(); } if (encryptedLines.size()) { DecryptARCode(encryptedLines, currentCode.ops); arCodes.push_back(currentCode); currentCode.ops.clear(); encryptedLines.clear(); } currentCode.name = line.substr(1, line.size() - 1); currentCode.active = enabledNames.find(currentCode.name) != enabledNames.end(); currentCode.user_defined = (ini == &localIni); } else { SplitString(line, ' ', pieces); // Check if the AR code is decrypted if (pieces.size() == 2 && pieces[0].size() == 8 && pieces[1].size() == 8) { AREntry op; bool success_addr = TryParse(std::string("0x") + pieces[0], &op.cmd_addr); bool success_val = TryParse(std::string("0x") + pieces[1], &op.value); if (success_addr && success_val) { currentCode.ops.push_back(op); } else { PanicAlertT("Action Replay Error: invalid AR code line: %s", line.c_str()); if (!success_addr) PanicAlertT("The address is invalid"); if (!success_val) PanicAlertT("The value is invalid"); } } else { SplitString(line, '-', pieces); if (pieces.size() == 3 && pieces[0].size() == 4 && pieces[1].size() == 4 && pieces[2].size() == 5) { // Encrypted AR code // Decryption is done in "blocks", so we must push blocks into a vector, // then send to decrypt when a new block is encountered, or if it's the last block. encryptedLines.push_back(pieces[0]+pieces[1]+pieces[2]); } } } } // Handle the last code correctly. if (currentCode.ops.size()) { arCodes.push_back(currentCode); } if (encryptedLines.size()) { DecryptARCode(encryptedLines, currentCode.ops); arCodes.push_back(currentCode); } } UpdateActiveList(); } void LoadCodes(std::vector<ARCode> &_arCodes, IniFile &globalIni, IniFile& localIni) { LoadCodes(globalIni, localIni, true); _arCodes = arCodes; } static void LogInfo(const char *format, ...) { if (!b_RanOnce) { if (LogManager::GetMaxLevel() >= LogTypes::LINFO || logSelf) { char* temp = (char*)alloca(strlen(format)+512); va_list args; va_start(args, format); CharArrayFromFormatV(temp, 512, format, args); va_end(args); INFO_LOG(ACTIONREPLAY, "%s", temp); if (logSelf) { std::string text = temp; text += '\n'; arLog.push_back(text); } } } } size_t GetCodeListSize() { return arCodes.size(); } ARCode GetARCode(size_t index) { if (index > arCodes.size()) { PanicAlertT("GetARCode: Index is greater than " "ar code list size %lu", (unsigned long)index); return ARCode(); } return arCodes[index]; } void SetARCode_IsActive(bool active, size_t index) { if (index > arCodes.size()) { PanicAlertT("SetARCode_IsActive: Index is greater than " "ar code list size %lu", (unsigned long)index); return; } arCodes[index].active = active; UpdateActiveList(); } void UpdateActiveList() { bool old_value = SConfig::GetInstance().m_LocalCoreStartupParameter.bEnableCheats; SConfig::GetInstance().m_LocalCoreStartupParameter.bEnableCheats = false; b_RanOnce = false; activeCodes.clear(); for (auto& arCode : arCodes) { if (arCode.active) activeCodes.push_back(arCode); } SConfig::GetInstance().m_LocalCoreStartupParameter.bEnableCheats = old_value; } void EnableSelfLogging(bool enable) { logSelf = enable; } const std::vector<std::string> &GetSelfLog() { return arLog; } bool IsSelfLogging() { return logSelf; } // ---------------------- // Code Functions static bool Subtype_RamWriteAndFill(const ARAddr& addr, const u32 data) { const u32 new_addr = addr.GCAddress(); LogInfo("Hardware Address: %08x", new_addr); LogInfo("Size: %08x", addr.size); switch (addr.size) { case DATATYPE_8BIT: { LogInfo("8-bit Write"); LogInfo("--------"); u32 repeat = data >> 8; for (u32 i = 0; i <= repeat; ++i) { Memory::Write_U8(data & 0xFF, new_addr + i); LogInfo("Wrote %08x to address %08x", data & 0xFF, new_addr + i); } LogInfo("--------"); break; } case DATATYPE_16BIT: { LogInfo("16-bit Write"); LogInfo("--------"); u32 repeat = data >> 16; for (u32 i = 0; i <= repeat; ++i) { Memory::Write_U16(data & 0xFFFF, new_addr + i * 2); LogInfo("Wrote %08x to address %08x", data & 0xFFFF, new_addr + i * 2); } LogInfo("--------"); break; } case DATATYPE_32BIT_FLOAT: case DATATYPE_32BIT: // Dword write LogInfo("32-bit Write"); LogInfo("--------"); Memory::Write_U32(data, new_addr); LogInfo("Wrote %08x to address %08x", data, new_addr); LogInfo("--------"); break; default: LogInfo("Bad Size"); PanicAlertT("Action Replay Error: Invalid size " "(%08x : address = %08x) in Ram Write And Fill (%s)", addr.size, addr.gcaddr, current_code->name.c_str()); return false; } return true; } static bool Subtype_WriteToPointer(const ARAddr& addr, const u32 data) { const u32 new_addr = addr.GCAddress(); const u32 ptr = Memory::Read_U32(new_addr); LogInfo("Hardware Address: %08x", new_addr); LogInfo("Size: %08x", addr.size); switch (addr.size) { case DATATYPE_8BIT: { LogInfo("Write 8-bit to pointer"); LogInfo("--------"); const u8 thebyte = data & 0xFF; const u32 offset = data >> 8; LogInfo("Pointer: %08x", ptr); LogInfo("Byte: %08x", thebyte); LogInfo("Offset: %08x", offset); Memory::Write_U8(thebyte, ptr + offset); LogInfo("Wrote %08x to address %08x", thebyte, ptr + offset); LogInfo("--------"); break; } case DATATYPE_16BIT: { LogInfo("Write 16-bit to pointer"); LogInfo("--------"); const u16 theshort = data & 0xFFFF; const u32 offset = (data >> 16) << 1; LogInfo("Pointer: %08x", ptr); LogInfo("Byte: %08x", theshort); LogInfo("Offset: %08x", offset); Memory::Write_U16(theshort, ptr + offset); LogInfo("Wrote %08x to address %08x", theshort, ptr + offset); LogInfo("--------"); break; } case DATATYPE_32BIT_FLOAT: case DATATYPE_32BIT: LogInfo("Write 32-bit to pointer"); LogInfo("--------"); Memory::Write_U32(data, ptr); LogInfo("Wrote %08x to address %08x", data, ptr); LogInfo("--------"); break; default: LogInfo("Bad Size"); PanicAlertT("Action Replay Error: Invalid size " "(%08x : address = %08x) in Write To Pointer (%s)", addr.size, addr.gcaddr, current_code->name.c_str()); return false; } return true; } static bool Subtype_AddCode(const ARAddr& addr, const u32 data) { // Used to increment/decrement a value in memory const u32 new_addr = addr.GCAddress(); LogInfo("Hardware Address: %08x", new_addr); LogInfo("Size: %08x", addr.size); switch (addr.size) { case DATATYPE_8BIT: LogInfo("8-bit Add"); LogInfo("--------"); Memory::Write_U8(Memory::Read_U8(new_addr) + data, new_addr); LogInfo("Wrote %08x to address %08x", Memory::Read_U8(new_addr) + (data & 0xFF), new_addr); LogInfo("--------"); break; case DATATYPE_16BIT: LogInfo("16-bit Add"); LogInfo("--------"); Memory::Write_U16(Memory::Read_U16(new_addr) + data, new_addr); LogInfo("Wrote %08x to address %08x", Memory::Read_U16(new_addr) + (data & 0xFFFF), new_addr); LogInfo("--------"); break; case DATATYPE_32BIT: LogInfo("32-bit Add"); LogInfo("--------"); Memory::Write_U32(Memory::Read_U32(new_addr) + data, new_addr); LogInfo("Wrote %08x to address %08x", Memory::Read_U32(new_addr) + data, new_addr); LogInfo("--------"); break; case DATATYPE_32BIT_FLOAT: { LogInfo("32-bit floating Add"); LogInfo("--------"); const u32 read = Memory::Read_U32(new_addr); const float fread = *((float*)&read) + (float)data; // data contains an integer value const u32 newval = *((u32*)&fread); Memory::Write_U32(newval, new_addr); LogInfo("Old Value %08x", read); LogInfo("Increment %08x", data); LogInfo("New value %08x", newval); LogInfo("--------"); break; } default: LogInfo("Bad Size"); PanicAlertT("Action Replay Error: Invalid size " "(%08x : address = %08x) in Add Code (%s)", addr.size, addr.gcaddr, current_code->name.c_str()); return false; } return true; } static bool Subtype_MasterCodeAndWriteToCCXXXXXX(const ARAddr& addr, const u32 data) { // code not yet implemented - TODO // u32 new_addr = (addr & 0x01FFFFFF) | 0x80000000; // u8 mcode_type = (data & 0xFF0000) >> 16; // u8 mcode_count = (data & 0xFF00) >> 8; // u8 mcode_number = data & 0xFF; PanicAlertT("Action Replay Error: Master Code and Write To CCXXXXXX not implemented (%s)\n" "Master codes are not needed. Do not use master codes.", current_code->name.c_str()); return false; } static bool ZeroCode_FillAndSlide(const u32 val_last, const ARAddr& addr, const u32 data) // This needs more testing { const u32 new_addr = ((ARAddr*)&val_last)->GCAddress(); const u8 size = ((ARAddr*)&val_last)->size; const s16 addr_incr = (s16)(data & 0xFFFF); const s8 val_incr = (s8)(data >> 24); const u8 write_num = (data & 0xFF0000) >> 16; u32 val = addr; u32 curr_addr = new_addr; LogInfo("Current Hardware Address: %08x", new_addr); LogInfo("Size: %08x", addr.size); LogInfo("Write Num: %08x", write_num); LogInfo("Address Increment: %i", addr_incr); LogInfo("Value Increment: %i", val_incr); switch (size) { case DATATYPE_8BIT: LogInfo("8-bit Write"); LogInfo("--------"); for (int i = 0; i < write_num; ++i) { Memory::Write_U8(val & 0xFF, curr_addr); curr_addr += addr_incr; val += val_incr; LogInfo("Write %08x to address %08x", val & 0xFF, curr_addr); LogInfo("Value Update: %08x", val); LogInfo("Current Hardware Address Update: %08x", curr_addr); } LogInfo("--------"); break; case DATATYPE_16BIT: LogInfo("16-bit Write"); LogInfo("--------"); for (int i=0; i < write_num; ++i) { Memory::Write_U16(val & 0xFFFF, curr_addr); LogInfo("Write %08x to address %08x", val & 0xFFFF, curr_addr); curr_addr += addr_incr * 2; val += val_incr; LogInfo("Value Update: %08x", val); LogInfo("Current Hardware Address Update: %08x", curr_addr); } LogInfo("--------"); break; case DATATYPE_32BIT: LogInfo("32-bit Write"); LogInfo("--------"); for (int i = 0; i < write_num; ++i) { Memory::Write_U32(val, curr_addr); LogInfo("Write %08x to address %08x", val, curr_addr); curr_addr += addr_incr * 4; val += val_incr; LogInfo("Value Update: %08x", val); LogInfo("Current Hardware Address Update: %08x", curr_addr); } LogInfo("--------"); break; default: LogInfo("Bad Size"); PanicAlertT("Action Replay Error: Invalid size (%08x : address = %08x) in Fill and Slide (%s)", size, new_addr, current_code->name.c_str()); return false; } return true; } // Looks like this is new?? - untested static bool ZeroCode_MemoryCopy(const u32 val_last, const ARAddr& addr, const u32 data) { const u32 addr_dest = val_last | 0x06000000; const u32 addr_src = addr.GCAddress(); const u8 num_bytes = data & 0x7FFF; LogInfo("Dest Address: %08x", addr_dest); LogInfo("Src Address: %08x", addr_src); LogInfo("Size: %08x", num_bytes); if ((data & ~0x7FFF) == 0x0000) { if ((data >> 24) != 0x0) { // Memory Copy With Pointers Support LogInfo("Memory Copy With Pointers Support"); LogInfo("--------"); for (int i = 0; i < 138; ++i) { Memory::Write_U8(Memory::Read_U8(addr_src + i), addr_dest + i); LogInfo("Wrote %08x to address %08x", Memory::Read_U8(addr_src + i), addr_dest + i); } LogInfo("--------"); } else { // Memory Copy Without Pointer Support LogInfo("Memory Copy Without Pointers Support"); LogInfo("--------"); for (int i=0; i < num_bytes; ++i) { Memory::Write_U32(Memory::Read_U32(addr_src + i), addr_dest + i); LogInfo("Wrote %08x to address %08x", Memory::Read_U32(addr_src + i), addr_dest + i); } LogInfo("--------"); return true; } } else { LogInfo("Bad Value"); PanicAlertT("Action Replay Error: Invalid value (%08x) in Memory Copy (%s)", (data & ~0x7FFF), current_code->name.c_str()); return false; } return true; } static bool NormalCode(const ARAddr& addr, const u32 data) { switch (addr.subtype) { case SUB_RAM_WRITE: // Ram write (and fill) LogInfo("Doing Ram Write And Fill"); if (!Subtype_RamWriteAndFill(addr, data)) return false; break; case SUB_WRITE_POINTER: // Write to pointer LogInfo("Doing Write To Pointer"); if (!Subtype_WriteToPointer(addr, data)) return false; break; case SUB_ADD_CODE: // Increment Value LogInfo("Doing Add Code"); if (!Subtype_AddCode(addr, data)) return false; break; case SUB_MASTER_CODE: // Master Code & Write to CCXXXXXX LogInfo("Doing Master Code And Write to CCXXXXXX (ncode not supported)"); if (!Subtype_MasterCodeAndWriteToCCXXXXXX(addr, data)) return false; break; default: LogInfo("Bad Subtype"); PanicAlertT("Action Replay: Normal Code 0: Invalid Subtype %08x (%s)", addr.subtype, current_code->name.c_str()); return false; break; } return true; } static bool CompareValues(const u32 val1, const u32 val2, const int type) { switch (type) { case CONDTIONAL_EQUAL: LogInfo("Type 1: If Equal"); return (val1 == val2); break; case CONDTIONAL_NOT_EQUAL: LogInfo("Type 2: If Not Equal"); return (val1 != val2); break; case CONDTIONAL_LESS_THAN_SIGNED: LogInfo("Type 3: If Less Than (Signed)"); return ((int)val1 < (int)val2); break; case CONDTIONAL_GREATER_THAN_SIGNED: LogInfo("Type 4: If Greater Than (Signed)"); return ((int)val1 >(int)val2); break; case CONDTIONAL_LESS_THAN_UNSIGNED: LogInfo("Type 5: If Less Than (Unsigned)"); return (val1 < val2); break; case CONDTIONAL_GREATER_THAN_UNSIGNED: LogInfo("Type 6: If Greater Than (Unsigned)"); return (val1 > val2); break; case CONDTIONAL_AND: LogInfo("Type 7: If And"); return !!(val1 & val2); // bitwise AND break; default: LogInfo("Unknown Compare type"); PanicAlertT("Action Replay: Invalid Normal Code Type %08x (%s)", type, current_code->name.c_str()); return false; break; } } static bool ConditionalCode(const ARAddr& addr, const u32 data, int* const pSkipCount) { const u32 new_addr = addr.GCAddress(); LogInfo("Size: %08x", addr.size); LogInfo("Hardware Address: %08x", new_addr); bool result = true; switch (addr.size) { case DATATYPE_8BIT: result = CompareValues((u32)Memory::Read_U8(new_addr), (data & 0xFF), addr.type); break; case DATATYPE_16BIT: result = CompareValues((u32)Memory::Read_U16(new_addr), (data & 0xFFFF), addr.type); break; case DATATYPE_32BIT_FLOAT: case DATATYPE_32BIT: result = CompareValues(Memory::Read_U32(new_addr), data, addr.type); break; default: LogInfo("Bad Size"); PanicAlertT("Action Replay: Conditional Code: Invalid Size %08x (%s)", addr.size, current_code->name.c_str()); return false; break; } // if the comparison failed we need to skip some lines if (false == result) { switch (addr.subtype) { case CONDTIONAL_ONE_LINE: case CONDTIONAL_TWO_LINES: *pSkipCount = addr.subtype + 1; // Skip 1 or 2 lines break; // Skip all lines, // Skip lines until a "00000000 40000000" line is reached case CONDTIONAL_ALL_LINES: case CONDTIONAL_ALL_LINES_UNTIL: *pSkipCount = -(int) addr.subtype; break; default: LogInfo("Bad Subtype"); PanicAlertT("Action Replay: Normal Code %i: Invalid subtype %08x (%s)", 1, addr.subtype, current_code->name.c_str()); return false; break; } } return true; } void RunAllActive() { if (SConfig::GetInstance().m_LocalCoreStartupParameter.bEnableCheats) { for (auto& activeCode : activeCodes) { if (activeCode.active) { activeCode.active = RunCode(activeCode); LogInfo("\n"); } } b_RanOnce = true; } } bool RunCode(const ARCode &arcode) { // The mechanism is different than what the real AR uses, so there may be compatibility problems. bool doFillNSlide = false; bool doMemoryCopy = false; // used for conditional codes int skip_count = 0; u32 val_last = 0; current_code = &arcode; LogInfo("Code Name: %s", arcode.name.c_str()); LogInfo("Number of codes: %i", arcode.ops.size()); for (const AREntry& entry : arcode.ops) { const ARAddr& addr = *(ARAddr*)&entry.cmd_addr; const u32 data = entry.value; // after a conditional code, skip lines if needed if (skip_count) { if (skip_count > 0) // skip x lines { LogInfo("Line skipped"); --skip_count; } else if (-CONDTIONAL_ALL_LINES == skip_count) { // skip all lines LogInfo("All Lines skipped"); return true; // don't need to iterate through the rest of the ops } else if (-CONDTIONAL_ALL_LINES_UNTIL == skip_count) { // skip until a "00000000 40000000" line is reached LogInfo("Line skipped"); if (addr == 0 && 0x40000000 == data) // check for an endif line skip_count = 0; } continue; } LogInfo("--- Running Code: %08x %08x ---", addr.address, data); //LogInfo("Command: %08x", cmd); // Do Fill & Slide if (doFillNSlide) { doFillNSlide = false; LogInfo("Doing Fill And Slide"); if (false == ZeroCode_FillAndSlide(val_last, addr, data)) return false; continue; } // Memory Copy if (doMemoryCopy) { doMemoryCopy = false; LogInfo("Doing Memory Copy"); if (false == ZeroCode_MemoryCopy(val_last, addr, data)) return false; continue; } // ActionReplay program self modification codes if (addr >= 0x00002000 && addr < 0x00003000) { LogInfo("This action replay simulator does not support codes that modify Action Replay itself."); PanicAlertT("This action replay simulator does not support codes that modify Action Replay itself."); return false; } // skip these weird init lines // TODO: Where are the "weird init lines"? //if (iter == code.ops.begin() && cmd == 1) //continue; // Zero codes if (0x0 == addr) // Check if the code is a zero code { const u8 zcode = (data >> 29); LogInfo("Doing Zero Code %08x", zcode); switch (zcode) { case ZCODE_END: // END OF CODES LogInfo("ZCode: End Of Codes"); return true; break; // TODO: the "00000000 40000000"(end if) codes fall into this case, I don't think that is correct case ZCODE_NORM: // Normal execution of codes // Todo: Set register 1BB4 to 0 LogInfo("ZCode: Normal execution of codes, set register 1BB4 to 0 (zcode not supported)"); break; case ZCODE_ROW: // Executes all codes in the same row // Todo: Set register 1BB4 to 1 LogInfo("ZCode: Executes all codes in the same row, Set register 1BB4 to 1 (zcode not supported)"); PanicAlertT("Zero 3 code not supported"); return false; break; case ZCODE_04: // Fill & Slide or Memory Copy if (0x3 == ((data >> 25) & 0x03)) { LogInfo("ZCode: Memory Copy"); doMemoryCopy = true; val_last = data; } else { LogInfo("ZCode: Fill And Slide"); doFillNSlide = true; val_last = data; } break; default: LogInfo("ZCode: Unknown"); PanicAlertT("Zero code unknown to dolphin: %08x", zcode); return false; break; } // done handling zero codes continue; } // Normal codes LogInfo("Doing Normal Code %08x", addr.type); LogInfo("Subtype: %08x", addr.subtype); switch (addr.type) { case 0x00: if (false == NormalCode(addr, data)) return false; break; default: LogInfo("This Normal Code is a Conditional Code"); if (false == ConditionalCode(addr, data, &skip_count)) return false; break; } } b_RanOnce = true; return true; } } // namespace ActionReplay
gpl-2.0
heidsoft/VirtualBox
src/VBox/Runtime/r0drv/solaris/initterm-r0drv-solaris.c
3
9933
/* $Id: initterm-r0drv-solaris.c $ */ /** @file * IPRT - Initialization & Termination, Ring-0 Driver, Solaris. */ /* * Copyright (C) 2006-2012 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; * you can redistribute it and/or modify it under the terms of the GNU * General Public License (GPL) as published by the Free Software * Foundation, in version 2 as it comes in the "COPYING" file of the * VirtualBox OSE distribution. VirtualBox OSE is distributed in the * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. * * The contents of this file may alternatively be used under the terms * of the Common Development and Distribution License Version 1.0 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the * VirtualBox OSE distribution, in which case the provisions of the * CDDL are applicable instead of those of the GPL. * * You may elect to license modified versions of this file under the * terms and conditions of either the GPL or the CDDL or both. */ /******************************************************************************* * Header Files * *******************************************************************************/ #include "the-solaris-kernel.h" #include "internal/iprt.h" #include <iprt/assert.h> #include <iprt/err.h> #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) # include <iprt/asm-amd64-x86.h> #endif #include "internal/initterm.h" /******************************************************************************* * Global Variables * *******************************************************************************/ /** Kernel debug info handle. */ RTDBGKRNLINFO g_hKrnlDbgInfo; /** Indicates that the spl routines (and therefore a bunch of other ones too) * will set EFLAGS::IF and break code that disables interrupts. */ bool g_frtSolSplSetsEIF = false; /** timeout_generic address. */ PFNSOL_timeout_generic g_pfnrtR0Sol_timeout_generic = NULL; /** untimeout_generic address. */ PFNSOL_untimeout_generic g_pfnrtR0Sol_untimeout_generic = NULL; /** cyclic_reprogram address. */ PFNSOL_cyclic_reprogram g_pfnrtR0Sol_cyclic_reprogram = NULL; /** page_noreloc_supported address. */ PFNSOL_page_noreloc_supported g_pfnrtR0Sol_page_noreloc_supported = NULL; /** Whether to use the kernel page freelist. */ bool g_frtSolUseKflt = false; /** Whether we've completed R0 initialization. */ bool g_frtSolInitDone = false; /** Whether to use old-style xc_call interface. */ bool g_frtSolOldIPI = false; /** Whether to use old-style xc_call interface using one ulong_t as the CPU set * representation. */ bool g_frtSolOldIPIUlong = false; /** The xc_call callout table structure. */ RTR0FNSOLXCCALL g_rtSolXcCall; /** Whether to use the old-style installctx()/removectx() routines. */ bool g_frtSolOldThreadCtx = false; /** The thread-context hooks callout table structure. */ RTR0FNSOLTHREADCTX g_rtSolThreadCtx; /** Thread preemption offset. */ size_t g_offrtSolThreadPreempt; /** Host scheduler preemption offset. */ size_t g_offrtSolCpuPreempt; /** Host scheduler force preemption offset. */ size_t g_offrtSolCpuForceKernelPreempt; /* Resolve using dl_lookup (remove if no longer relevant for supported S10 versions) */ extern void contig_free(void *addr, size_t size); #pragma weak contig_free /** contig_free address. */ PFNSOL_contig_free g_pfnrtR0Sol_contig_free = contig_free; DECLHIDDEN(int) rtR0InitNative(void) { /* * IPRT has not yet been initialized at this point, so use Solaris' native cmn_err() for logging. */ int rc = RTR0DbgKrnlInfoOpen(&g_hKrnlDbgInfo, 0 /* fFlags */); if (RT_SUCCESS(rc)) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) /* * Detect whether spl*() is preserving the interrupt flag or not. * This is a problem on S10. */ RTCCUINTREG uOldFlags = ASMIntDisableFlags(); int iOld = splr(DISP_LEVEL); if (ASMIntAreEnabled()) g_frtSolSplSetsEIF = true; splx(iOld); if (ASMIntAreEnabled()) g_frtSolSplSetsEIF = true; ASMSetFlags(uOldFlags); #else /* PORTME: See if the amd64/x86 problem applies to this architecture. */ #endif /* * Mandatory: Preemption offsets. */ rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, "cpu_t", "cpu_runrun", &g_offrtSolCpuPreempt); if (RT_FAILURE(rc)) { cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_runrun!\n"); goto errorbail; } rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, "cpu_t", "cpu_kprunrun", &g_offrtSolCpuForceKernelPreempt); if (RT_FAILURE(rc)) { cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_kprunrun!\n"); goto errorbail; } rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, "kthread_t", "t_preempt", &g_offrtSolThreadPreempt); if (RT_FAILURE(rc)) { cmn_err(CE_NOTE, "Failed to find kthread_t::t_preempt!\n"); goto errorbail; } cmn_err(CE_CONT, "!cpu_t::cpu_runrun @ 0x%lx (%ld)\n", g_offrtSolCpuPreempt, g_offrtSolCpuPreempt); cmn_err(CE_CONT, "!cpu_t::cpu_kprunrun @ 0x%lx (%ld)\n", g_offrtSolCpuForceKernelPreempt, g_offrtSolCpuForceKernelPreempt); cmn_err(CE_CONT, "!kthread_t::t_preempt @ 0x%lx (%ld)\n", g_offrtSolThreadPreempt, g_offrtSolThreadPreempt); /* * Mandatory: CPU cross call infrastructure. Refer the-solaris-kernel.h for details. */ rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "xc_init_cpu", NULL /* ppvSymbol */); if (RT_SUCCESS(rc)) { if (ncpus > IPRT_SOL_NCPUS) { cmn_err(CE_NOTE, "rtR0InitNative: CPU count mismatch! ncpus=%d IPRT_SOL_NCPUS=%d\n", ncpus, IPRT_SOL_NCPUS); rc = VERR_NOT_SUPPORTED; goto errorbail; } g_rtSolXcCall.u.pfnSol_xc_call = (void *)xc_call; } else { g_frtSolOldIPI = true; g_rtSolXcCall.u.pfnSol_xc_call_old = (void *)xc_call; if (max_cpuid + 1 == sizeof(ulong_t) * 8) { g_frtSolOldIPIUlong = true; g_rtSolXcCall.u.pfnSol_xc_call_old_ulong = (void *)xc_call; } else if (max_cpuid + 1 != IPRT_SOL_NCPUS) { cmn_err(CE_NOTE, "rtR0InitNative: cpuset_t size mismatch! max_cpuid=%d IPRT_SOL_NCPUS=%d\n", max_cpuid, IPRT_SOL_NCPUS); rc = VERR_NOT_SUPPORTED; goto errorbail; } } /* * Mandatory: Thread-context hooks. */ rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "exitctx", NULL /* ppvSymbol */); if (RT_SUCCESS(rc)) { g_rtSolThreadCtx.Install.pfnSol_installctx = (void *)installctx; g_rtSolThreadCtx.Remove.pfnSol_removectx = (void *)removectx; } else { g_frtSolOldThreadCtx = true; g_rtSolThreadCtx.Install.pfnSol_installctx_old = (void *)installctx; g_rtSolThreadCtx.Remove.pfnSol_removectx_old = (void *)removectx; } /* * Optional: Timeout hooks. */ RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "timeout_generic", (void **)&g_pfnrtR0Sol_timeout_generic); RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "untimeout_generic", (void **)&g_pfnrtR0Sol_untimeout_generic); if ((g_pfnrtR0Sol_timeout_generic == NULL) != (g_pfnrtR0Sol_untimeout_generic == NULL)) { static const char *s_apszFn[2] = { "timeout_generic", "untimeout_generic" }; bool iMissingFn = g_pfnrtR0Sol_timeout_generic == NULL; cmn_err(CE_NOTE, "rtR0InitNative: Weird! Found %s but not %s!\n", s_apszFn[!iMissingFn], s_apszFn[iMissingFn]); g_pfnrtR0Sol_timeout_generic = NULL; g_pfnrtR0Sol_untimeout_generic = NULL; } RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "cyclic_reprogram", (void **)&g_pfnrtR0Sol_cyclic_reprogram); /* * Optional: Querying page no-relocation support. */ RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /*pszModule */, "page_noreloc_supported", (void **)&g_pfnrtR0Sol_page_noreloc_supported); /* * Weak binding failures: contig_free */ if (g_pfnrtR0Sol_contig_free == NULL) { rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "contig_free", (void **)&g_pfnrtR0Sol_contig_free); if (RT_FAILURE(rc)) { cmn_err(CE_NOTE, "rtR0InitNative: failed to find contig_free!\n"); goto errorbail; } } g_frtSolInitDone = true; return VINF_SUCCESS; } else { cmn_err(CE_NOTE, "RTR0DbgKrnlInfoOpen failed. rc=%d\n", rc); return rc; } errorbail: RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo); return rc; } DECLHIDDEN(void) rtR0TermNative(void) { RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo); g_frtSolInitDone = false; }
gpl-2.0
CFDEMproject/LIGGGHTS-PUBLIC
lib/hotint/HotInt_V1/tools/HOTINT_Log/VC++ solution/HOTINT_Log/AssemblyInfo.cpp
3
1360
#include "stdafx.h" using namespace System; using namespace System::Reflection; using namespace System::Runtime::CompilerServices; using namespace System::Runtime::InteropServices; using namespace System::Security::Permissions; // // Allgemeine Informationen über eine Assembly werden über die folgenden // Attribute gesteuert. Ändern Sie diese Attributwerte, um die Informationen zu ändern, // die mit einer Assembly verknüpft sind. // [assembly:AssemblyTitleAttribute("HOTINT_Log")]; [assembly:AssemblyDescriptionAttribute("")]; [assembly:AssemblyConfigurationAttribute("")]; [assembly:AssemblyCompanyAttribute("")]; [assembly:AssemblyProductAttribute("HOTINT_Log")]; [assembly:AssemblyCopyrightAttribute("Copyright (c) 2013")]; [assembly:AssemblyTrademarkAttribute("")]; [assembly:AssemblyCultureAttribute("")]; // // Versionsinformationen für eine Assembly bestehen aus den folgenden vier Werten: // // Hauptversion // Nebenversion // Buildnummer // Revision // // Sie können alle Werte angeben oder für die Revisions- und Buildnummer den Standard // übernehmen, indem Sie "*" eingeben: [assembly:AssemblyVersionAttribute("1.0.*")]; [assembly:ComVisible(false)]; [assembly:CLSCompliantAttribute(true)]; [assembly:SecurityPermission(SecurityAction::RequestMinimum, UnmanagedCode = true)];
gpl-2.0
google-code-export/bricked
mm/filemap.c
259
67742
/* * linux/mm/filemap.c * * Copyright (C) 1994-1999 Linus Torvalds */ /* * This file handles the generic file mmap semantics used by * most "normal" filesystems (but you don't /have/ to use this: * the NFS filesystem used to do this differently, for example) */ #include <linux/module.h> #include <linux/compiler.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/aio.h> #include <linux/capability.h> #include <linux/kernel_stat.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/uio.h> #include <linux/hash.h> #include <linux/writeback.h> #include <linux/backing-dev.h> #include <linux/pagevec.h> #include <linux/blkdev.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/cpuset.h> #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ #include <linux/memcontrol.h> #include <linux/mm_inline.h> /* for page_is_file_cache() */ #include "internal.h" /* * FIXME: remove all knowledge of the buffer layer from the core VM */ #include <linux/buffer_head.h> /* for try_to_free_buffers */ #include <asm/mman.h> /* * Shared mappings implemented 30.11.1994. It's not fully working yet, * though. * * Shared mappings now work. 15.8.1995 Bruno. * * finished 'unifying' the page and buffer cache and SMP-threaded the * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> * * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> */ /* * Lock ordering: * * ->i_mmap_lock (truncate_pagecache) * ->private_lock (__free_pte->__set_page_dirty_buffers) * ->swap_lock (exclusive_swap_page, others) * ->mapping->tree_lock * * ->i_mutex * ->i_mmap_lock (truncate->unmap_mapping_range) * * ->mmap_sem * ->i_mmap_lock * ->page_table_lock or pte_lock (various, mainly in memory.c) * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) * * ->mmap_sem * ->lock_page (access_process_vm) * * ->i_mutex (generic_file_buffered_write) * ->mmap_sem (fault_in_pages_readable->do_page_fault) * * ->i_mutex * ->i_alloc_sem (various) * * ->inode_lock * ->sb_lock (fs/fs-writeback.c) * ->mapping->tree_lock (__sync_single_inode) * * ->i_mmap_lock * ->anon_vma.lock (vma_adjust) * * ->anon_vma.lock * ->page_table_lock or pte_lock (anon_vma_prepare and various) * * ->page_table_lock or pte_lock * ->swap_lock (try_to_unmap_one) * ->private_lock (try_to_unmap_one) * ->tree_lock (try_to_unmap_one) * ->zone.lru_lock (follow_page->mark_page_accessed) * ->zone.lru_lock (check_pte_range->isolate_lru_page) * ->private_lock (page_remove_rmap->set_page_dirty) * ->tree_lock (page_remove_rmap->set_page_dirty) * ->inode_lock (page_remove_rmap->set_page_dirty) * ->inode_lock (zap_pte_range->set_page_dirty) * ->private_lock (zap_pte_range->__set_page_dirty_buffers) * * ->task->proc_lock * ->dcache_lock (proc_pid_lookup) * * (code doesn't rely on that order, so you could switch it around) * ->tasklist_lock (memory_failure, collect_procs_ao) * ->i_mmap_lock */ /* * Remove a page from the page cache and free it. Caller has to make * sure the page is locked and that nobody else uses it - or that usage * is safe. The caller must hold the mapping's tree_lock. */ void __remove_from_page_cache(struct page *page) { struct address_space *mapping = page->mapping; radix_tree_delete(&mapping->page_tree, page->index); page->mapping = NULL; mapping->nrpages--; __dec_zone_page_state(page, NR_FILE_PAGES); if (PageSwapBacked(page)) __dec_zone_page_state(page, NR_SHMEM); BUG_ON(page_mapped(page)); /* * Some filesystems seem to re-dirty the page even after * the VM has canceled the dirty bit (eg ext3 journaling). * * Fix it up by doing a final dirty accounting check after * having removed the page entirely. */ if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { dec_zone_page_state(page, NR_FILE_DIRTY); dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); } } void remove_from_page_cache(struct page *page) { struct address_space *mapping = page->mapping; BUG_ON(!PageLocked(page)); spin_lock_irq(&mapping->tree_lock); __remove_from_page_cache(page); spin_unlock_irq(&mapping->tree_lock); mem_cgroup_uncharge_cache_page(page); } EXPORT_SYMBOL(remove_from_page_cache); static int sync_page(void *word) { struct address_space *mapping; struct page *page; page = container_of((unsigned long *)word, struct page, flags); /* * page_mapping() is being called without PG_locked held. * Some knowledge of the state and use of the page is used to * reduce the requirements down to a memory barrier. * The danger here is of a stale page_mapping() return value * indicating a struct address_space different from the one it's * associated with when it is associated with one. * After smp_mb(), it's either the correct page_mapping() for * the page, or an old page_mapping() and the page's own * page_mapping() has gone NULL. * The ->sync_page() address_space operation must tolerate * page_mapping() going NULL. By an amazing coincidence, * this comes about because none of the users of the page * in the ->sync_page() methods make essential use of the * page_mapping(), merely passing the page down to the backing * device's unplug functions when it's non-NULL, which in turn * ignore it for all cases but swap, where only page_private(page) is * of interest. When page_mapping() does go NULL, the entire * call stack gracefully ignores the page and returns. * -- wli */ smp_mb(); mapping = page_mapping(page); if (mapping && mapping->a_ops && mapping->a_ops->sync_page) mapping->a_ops->sync_page(page); io_schedule(); return 0; } static int sync_page_killable(void *word) { sync_page(word); return fatal_signal_pending(current) ? -EINTR : 0; } /** * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range * @mapping: address space structure to write * @start: offset in bytes where the range starts * @end: offset in bytes where the range ends (inclusive) * @sync_mode: enable synchronous operation * * Start writeback against all of a mapping's dirty pages that lie * within the byte offsets <start, end> inclusive. * * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as * opposed to a regular memory cleansing writeback. The difference between * these two operations is that if a dirty page/buffer is encountered, it must * be waited upon, and not just skipped over. */ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode) { int ret; struct writeback_control wbc = { .sync_mode = sync_mode, .nr_to_write = LONG_MAX, .range_start = start, .range_end = end, }; if (!mapping_cap_writeback_dirty(mapping)) return 0; ret = do_writepages(mapping, &wbc); return ret; } static inline int __filemap_fdatawrite(struct address_space *mapping, int sync_mode) { return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); } int filemap_fdatawrite(struct address_space *mapping) { return __filemap_fdatawrite(mapping, WB_SYNC_ALL); } EXPORT_SYMBOL(filemap_fdatawrite); int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end) { return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); } EXPORT_SYMBOL(filemap_fdatawrite_range); /** * filemap_flush - mostly a non-blocking flush * @mapping: target address_space * * This is a mostly non-blocking flush. Not suitable for data-integrity * purposes - I/O may not be started against all dirty pages. */ int filemap_flush(struct address_space *mapping) { return __filemap_fdatawrite(mapping, WB_SYNC_NONE); } EXPORT_SYMBOL(filemap_flush); /** * filemap_fdatawait_range - wait for writeback to complete * @mapping: address space structure to wait for * @start_byte: offset in bytes where the range starts * @end_byte: offset in bytes where the range ends (inclusive) * * Walk the list of under-writeback pages of the given address space * in the given range and wait for all of them. */ int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, loff_t end_byte) { pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; struct pagevec pvec; int nr_pages; int ret = 0; if (end_byte < start_byte) return 0; pagevec_init(&pvec, 0); while ((index <= end) && (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_WRITEBACK, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { unsigned i; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; /* until radix tree lookup accepts end_index */ if (page->index > end) continue; wait_on_page_writeback(page); if (PageError(page)) ret = -EIO; } pagevec_release(&pvec); cond_resched(); } /* Check for outstanding write errors */ if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) ret = -ENOSPC; if (test_and_clear_bit(AS_EIO, &mapping->flags)) ret = -EIO; return ret; } EXPORT_SYMBOL(filemap_fdatawait_range); /** * filemap_fdatawait - wait for all under-writeback pages to complete * @mapping: address space structure to wait for * * Walk the list of under-writeback pages of the given address space * and wait for all of them. */ int filemap_fdatawait(struct address_space *mapping) { loff_t i_size = i_size_read(mapping->host); if (i_size == 0) return 0; return filemap_fdatawait_range(mapping, 0, i_size - 1); } EXPORT_SYMBOL(filemap_fdatawait); int filemap_write_and_wait(struct address_space *mapping) { int err = 0; if (mapping->nrpages) { err = filemap_fdatawrite(mapping); /* * Even if the above returned error, the pages may be * written partially (e.g. -ENOSPC), so we wait for it. * But the -EIO is special case, it may indicate the worst * thing (e.g. bug) happened, so we avoid waiting for it. */ if (err != -EIO) { int err2 = filemap_fdatawait(mapping); if (!err) err = err2; } } return err; } EXPORT_SYMBOL(filemap_write_and_wait); /** * filemap_write_and_wait_range - write out & wait on a file range * @mapping: the address_space for the pages * @lstart: offset in bytes where the range starts * @lend: offset in bytes where the range ends (inclusive) * * Write out and wait upon file offsets lstart->lend, inclusive. * * Note that `lend' is inclusive (describes the last byte to be written) so * that this function can be used to write to the very end-of-file (end = -1). */ int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend) { int err = 0; if (mapping->nrpages) { err = __filemap_fdatawrite_range(mapping, lstart, lend, WB_SYNC_ALL); /* See comment of filemap_write_and_wait() */ if (err != -EIO) { int err2 = filemap_fdatawait_range(mapping, lstart, lend); if (!err) err = err2; } } return err; } EXPORT_SYMBOL(filemap_write_and_wait_range); /** * add_to_page_cache_locked - add a locked page to the pagecache * @page: page to add * @mapping: the page's address_space * @offset: page index * @gfp_mask: page allocation mode * * This function is used to add a page to the pagecache. It must be locked. * This function does not add the page to the LRU. The caller must do that. */ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { int error; VM_BUG_ON(!PageLocked(page)); error = mem_cgroup_cache_charge(page, current->mm, gfp_mask & GFP_RECLAIM_MASK); if (error) goto out; error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); if (error == 0) { page_cache_get(page); page->mapping = mapping; page->index = offset; spin_lock_irq(&mapping->tree_lock); error = radix_tree_insert(&mapping->page_tree, offset, page); if (likely(!error)) { mapping->nrpages++; __inc_zone_page_state(page, NR_FILE_PAGES); if (PageSwapBacked(page)) __inc_zone_page_state(page, NR_SHMEM); spin_unlock_irq(&mapping->tree_lock); } else { page->mapping = NULL; spin_unlock_irq(&mapping->tree_lock); mem_cgroup_uncharge_cache_page(page); page_cache_release(page); } radix_tree_preload_end(); } else mem_cgroup_uncharge_cache_page(page); out: return error; } EXPORT_SYMBOL(add_to_page_cache_locked); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { int ret; /* * Splice_read and readahead add shmem/tmpfs pages into the page cache * before shmem_readpage has a chance to mark them as SwapBacked: they * need to go on the anon lru below, and mem_cgroup_cache_charge * (called in add_to_page_cache) needs to know where they're going too. */ if (mapping_cap_swap_backed(mapping)) SetPageSwapBacked(page); ret = add_to_page_cache(page, mapping, offset, gfp_mask); if (ret == 0) { if (page_is_file_cache(page)) lru_cache_add_file(page); else lru_cache_add_anon(page); } return ret; } EXPORT_SYMBOL_GPL(add_to_page_cache_lru); #ifdef CONFIG_NUMA struct page *__page_cache_alloc(gfp_t gfp) { int n; struct page *page; if (cpuset_do_page_mem_spread()) { get_mems_allowed(); n = cpuset_mem_spread_node(); page = alloc_pages_exact_node(n, gfp, 0); put_mems_allowed(); return page; } return alloc_pages(gfp, 0); } EXPORT_SYMBOL(__page_cache_alloc); #endif static int __sleep_on_page_lock(void *word) { io_schedule(); return 0; } /* * In order to wait for pages to become available there must be * waitqueues associated with pages. By using a hash table of * waitqueues where the bucket discipline is to maintain all * waiters on the same queue and wake all when any of the pages * become available, and for the woken contexts to check to be * sure the appropriate page became available, this saves space * at a cost of "thundering herd" phenomena during rare hash * collisions. */ static wait_queue_head_t *page_waitqueue(struct page *page) { const struct zone *zone = page_zone(page); return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; } static inline void wake_up_page(struct page *page, int bit) { __wake_up_bit(page_waitqueue(page), &page->flags, bit); } void wait_on_page_bit(struct page *page, int bit_nr) { DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); if (test_bit(bit_nr, &page->flags)) __wait_on_bit(page_waitqueue(page), &wait, sync_page, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(wait_on_page_bit); /** * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue * @page: Page defining the wait queue of interest * @waiter: Waiter to add to the queue * * Add an arbitrary @waiter to the wait queue for the nominated @page. */ void add_page_wait_queue(struct page *page, wait_queue_t *waiter) { wait_queue_head_t *q = page_waitqueue(page); unsigned long flags; spin_lock_irqsave(&q->lock, flags); __add_wait_queue(q, waiter); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL_GPL(add_page_wait_queue); /** * unlock_page - unlock a locked page * @page: the page * * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). * Also wakes sleepers in wait_on_page_writeback() because the wakeup * mechananism between PageLocked pages and PageWriteback pages is shared. * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. * * The mb is necessary to enforce ordering between the clear_bit and the read * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). */ void unlock_page(struct page *page) { VM_BUG_ON(!PageLocked(page)); clear_bit_unlock(PG_locked, &page->flags); smp_mb__after_clear_bit(); wake_up_page(page, PG_locked); } EXPORT_SYMBOL(unlock_page); /** * end_page_writeback - end writeback against a page * @page: the page */ void end_page_writeback(struct page *page) { if (TestClearPageReclaim(page)) rotate_reclaimable_page(page); if (!test_clear_page_writeback(page)) BUG(); smp_mb__after_clear_bit(); wake_up_page(page, PG_writeback); } EXPORT_SYMBOL(end_page_writeback); /** * __lock_page - get a lock on the page, assuming we need to sleep to get it * @page: the page to lock * * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some * random driver's requestfn sets TASK_RUNNING, we could busywait. However * chances are that on the second loop, the block layer's plug list is empty, * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. */ void __lock_page(struct page *page) { DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(__lock_page); int __lock_page_killable(struct page *page) { DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); return __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page_killable, TASK_KILLABLE); } EXPORT_SYMBOL_GPL(__lock_page_killable); /** * __lock_page_nosync - get a lock on the page, without calling sync_page() * @page: the page to lock * * Variant of lock_page that does not require the caller to hold a reference * on the page's mapping. */ void __lock_page_nosync(struct page *page) { DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock, TASK_UNINTERRUPTIBLE); } /** * find_get_page - find and get a page reference * @mapping: the address_space to search * @offset: the page index * * Is there a pagecache struct page at the given (mapping, offset) tuple? * If yes, increment its refcount and return it; if no, return NULL. */ struct page *find_get_page(struct address_space *mapping, pgoff_t offset) { void **pagep; struct page *page; rcu_read_lock(); repeat: page = NULL; pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); if (pagep) { page = radix_tree_deref_slot(pagep); if (unlikely(!page)) goto out; if (radix_tree_deref_retry(page)) goto repeat; if (!page_cache_get_speculative(page)) goto repeat; /* * Has the page moved? * This is part of the lockless pagecache protocol. See * include/linux/pagemap.h for details. */ if (unlikely(page != *pagep)) { page_cache_release(page); goto repeat; } } out: rcu_read_unlock(); return page; } EXPORT_SYMBOL(find_get_page); /** * find_lock_page - locate, pin and lock a pagecache page * @mapping: the address_space to search * @offset: the page index * * Locates the desired pagecache page, locks it, increments its reference * count and returns its address. * * Returns zero if the page was not present. find_lock_page() may sleep. */ struct page *find_lock_page(struct address_space *mapping, pgoff_t offset) { struct page *page; repeat: page = find_get_page(mapping, offset); if (page) { lock_page(page); /* Has the page been truncated? */ if (unlikely(page->mapping != mapping)) { unlock_page(page); page_cache_release(page); goto repeat; } VM_BUG_ON(page->index != offset); } return page; } EXPORT_SYMBOL(find_lock_page); /** * find_or_create_page - locate or add a pagecache page * @mapping: the page's address_space * @index: the page's index into the mapping * @gfp_mask: page allocation mode * * Locates a page in the pagecache. If the page is not present, a new page * is allocated using @gfp_mask and is added to the pagecache and to the VM's * LRU list. The returned page is locked and has its reference count * incremented. * * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic * allocation! * * find_or_create_page() returns the desired page's address, or zero on * memory exhaustion. */ struct page *find_or_create_page(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask) { struct page *page; int err; repeat: page = find_lock_page(mapping, index); if (!page) { page = __page_cache_alloc(gfp_mask); if (!page) return NULL; /* * We want a regular kernel memory (not highmem or DMA etc) * allocation for the radix tree nodes, but we need to honour * the context-specific requirements the caller has asked for. * GFP_RECLAIM_MASK collects those requirements. */ err = add_to_page_cache_lru(page, mapping, index, (gfp_mask & GFP_RECLAIM_MASK)); if (unlikely(err)) { page_cache_release(page); page = NULL; if (err == -EEXIST) goto repeat; } } return page; } EXPORT_SYMBOL(find_or_create_page); /** * find_get_pages - gang pagecache lookup * @mapping: The address_space to search * @start: The starting page index * @nr_pages: The maximum number of pages * @pages: Where the resulting pages are placed * * find_get_pages() will search for and return a group of up to * @nr_pages pages in the mapping. The pages are placed at @pages. * find_get_pages() takes a reference against the returned pages. * * The search returns a group of mapping-contiguous pages with ascending * indexes. There may be holes in the indices due to not-present pages. * * find_get_pages() returns the number of pages which were found. */ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages) { unsigned int i; unsigned int ret; unsigned int nr_found; rcu_read_lock(); restart: nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, (void ***)pages, start, nr_pages); ret = 0; for (i = 0; i < nr_found; i++) { struct page *page; repeat: page = radix_tree_deref_slot((void **)pages[i]); if (unlikely(!page)) continue; if (radix_tree_deref_retry(page)) { if (ret) start = pages[ret-1]->index; goto restart; } if (!page_cache_get_speculative(page)) goto repeat; /* Has the page moved? */ if (unlikely(page != *((void **)pages[i]))) { page_cache_release(page); goto repeat; } pages[ret] = page; ret++; } rcu_read_unlock(); return ret; } /** * find_get_pages_contig - gang contiguous pagecache lookup * @mapping: The address_space to search * @index: The starting page index * @nr_pages: The maximum number of pages * @pages: Where the resulting pages are placed * * find_get_pages_contig() works exactly like find_get_pages(), except * that the returned number of pages are guaranteed to be contiguous. * * find_get_pages_contig() returns the number of pages which were found. */ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, unsigned int nr_pages, struct page **pages) { unsigned int i; unsigned int ret; unsigned int nr_found; rcu_read_lock(); restart: nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, (void ***)pages, index, nr_pages); ret = 0; for (i = 0; i < nr_found; i++) { struct page *page; repeat: page = radix_tree_deref_slot((void **)pages[i]); if (unlikely(!page)) continue; if (radix_tree_deref_retry(page)) goto restart; if (page->mapping == NULL || page->index != index) break; if (!page_cache_get_speculative(page)) goto repeat; /* Has the page moved? */ if (unlikely(page != *((void **)pages[i]))) { page_cache_release(page); goto repeat; } pages[ret] = page; ret++; index++; } rcu_read_unlock(); return ret; } EXPORT_SYMBOL(find_get_pages_contig); /** * find_get_pages_tag - find and return pages that match @tag * @mapping: the address_space to search * @index: the starting page index * @tag: the tag index * @nr_pages: the maximum number of pages * @pages: where the resulting pages are placed * * Like find_get_pages, except we only return pages which are tagged with * @tag. We update @index to index the next page for the traversal. */ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages) { unsigned int i; unsigned int ret; unsigned int nr_found; rcu_read_lock(); restart: nr_found = radix_tree_gang_lookup_tag_slot(&mapping->page_tree, (void ***)pages, *index, nr_pages, tag); ret = 0; for (i = 0; i < nr_found; i++) { struct page *page; repeat: page = radix_tree_deref_slot((void **)pages[i]); if (unlikely(!page)) continue; if (radix_tree_deref_retry(page)) goto restart; if (!page_cache_get_speculative(page)) goto repeat; /* Has the page moved? */ if (unlikely(page != *((void **)pages[i]))) { page_cache_release(page); goto repeat; } pages[ret] = page; ret++; } rcu_read_unlock(); if (ret) *index = pages[ret - 1]->index + 1; return ret; } EXPORT_SYMBOL(find_get_pages_tag); /** * grab_cache_page_nowait - returns locked page at given index in given cache * @mapping: target address_space * @index: the page index * * Same as grab_cache_page(), but do not wait if the page is unavailable. * This is intended for speculative data generators, where the data can * be regenerated if the page couldn't be grabbed. This routine should * be safe to call while holding the lock for another page. * * Clear __GFP_FS when allocating the page to avoid recursion into the fs * and deadlock against the caller's locked page. */ struct page * grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) { struct page *page = find_get_page(mapping, index); if (page) { if (trylock_page(page)) return page; page_cache_release(page); return NULL; } page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS); if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) { page_cache_release(page); page = NULL; } return page; } EXPORT_SYMBOL(grab_cache_page_nowait); /* * CD/DVDs are error prone. When a medium error occurs, the driver may fail * a _large_ part of the i/o request. Imagine the worst scenario: * * ---R__________________________________________B__________ * ^ reading here ^ bad block(assume 4k) * * read(R) => miss => readahead(R...B) => media error => frustrating retries * => failing the whole request => read(R) => read(R+1) => * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... * * It is going insane. Fix it by quickly scaling down the readahead size. */ static void shrink_readahead_size_eio(struct file *filp, struct file_ra_state *ra) { ra->ra_pages /= 4; } /** * do_generic_file_read - generic file read routine * @filp: the file to read * @ppos: current file position * @desc: read_descriptor * @actor: read method * * This is a generic file read routine, and uses the * mapping->a_ops->readpage() function for the actual low-level stuff. * * This is really ugly. But the goto's actually try to clarify some * of the logic when it comes to error handling etc. */ static void do_generic_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) { struct address_space *mapping = filp->f_mapping; struct inode *inode = mapping->host; struct file_ra_state *ra = &filp->f_ra; pgoff_t index; pgoff_t last_index; pgoff_t prev_index; unsigned long offset; /* offset into pagecache page */ unsigned int prev_offset; int error; index = *ppos >> PAGE_CACHE_SHIFT; prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; offset = *ppos & ~PAGE_CACHE_MASK; for (;;) { struct page *page; pgoff_t end_index; loff_t isize; unsigned long nr, ret; cond_resched(); find_page: page = find_get_page(mapping, index); if (!page) { page_cache_sync_readahead(mapping, ra, filp, index, last_index - index); page = find_get_page(mapping, index); if (unlikely(page == NULL)) goto no_cached_page; } if (PageReadahead(page)) { page_cache_async_readahead(mapping, ra, filp, page, index, last_index - index); } if (!PageUptodate(page)) { if (inode->i_blkbits == PAGE_CACHE_SHIFT || !mapping->a_ops->is_partially_uptodate) goto page_not_up_to_date; if (!trylock_page(page)) goto page_not_up_to_date; /* Did it get truncated before we got the lock? */ if (!page->mapping) goto page_not_up_to_date_locked; if (!mapping->a_ops->is_partially_uptodate(page, desc, offset)) goto page_not_up_to_date_locked; unlock_page(page); } page_ok: /* * i_size must be checked after we know the page is Uptodate. * * Checking i_size after the check allows us to calculate * the correct value for "nr", which means the zero-filled * part of the page is not copied back to userspace (unless * another truncate extends the file - this is desired though). */ isize = i_size_read(inode); end_index = (isize - 1) >> PAGE_CACHE_SHIFT; if (unlikely(!isize || index > end_index)) { page_cache_release(page); goto out; } /* nr is the maximum number of bytes to copy from this page */ nr = PAGE_CACHE_SIZE; if (index == end_index) { nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; if (nr <= offset) { page_cache_release(page); goto out; } } nr = nr - offset; /* If users can be writing to this page using arbitrary * virtual addresses, take care about potential aliasing * before reading the page on the kernel side. */ if (mapping_writably_mapped(mapping)) flush_dcache_page(page); /* * When a sequential read accesses a page several times, * only mark it as accessed the first time. */ if (prev_index != index || offset != prev_offset) mark_page_accessed(page); prev_index = index; /* * Ok, we have the page, and it's up-to-date, so * now we can copy it to user space... * * The actor routine returns how many bytes were actually used.. * NOTE! This may not be the same as how much of a user buffer * we filled up (we may be padding etc), so we can only update * "pos" here (the actor routine has to update the user buffer * pointers and the remaining count). */ ret = actor(desc, page, offset, nr); offset += ret; index += offset >> PAGE_CACHE_SHIFT; offset &= ~PAGE_CACHE_MASK; prev_offset = offset; page_cache_release(page); if (ret == nr && desc->count) continue; goto out; page_not_up_to_date: /* Get exclusive access to the page ... */ error = lock_page_killable(page); if (unlikely(error)) goto readpage_error; page_not_up_to_date_locked: /* Did it get truncated before we got the lock? */ if (!page->mapping) { unlock_page(page); page_cache_release(page); continue; } /* Did somebody else fill it already? */ if (PageUptodate(page)) { unlock_page(page); goto page_ok; } readpage: /* * A previous I/O error may have been due to temporary * failures, eg. multipath errors. * PG_error will be set again if readpage fails. */ ClearPageError(page); /* Start the actual read. The read will unlock the page. */ error = mapping->a_ops->readpage(filp, page); if (unlikely(error)) { if (error == AOP_TRUNCATED_PAGE) { page_cache_release(page); goto find_page; } goto readpage_error; } if (!PageUptodate(page)) { error = lock_page_killable(page); if (unlikely(error)) goto readpage_error; if (!PageUptodate(page)) { if (page->mapping == NULL) { /* * invalidate_mapping_pages got it */ unlock_page(page); page_cache_release(page); goto find_page; } unlock_page(page); shrink_readahead_size_eio(filp, ra); error = -EIO; goto readpage_error; } unlock_page(page); } goto page_ok; readpage_error: /* UHHUH! A synchronous read error occurred. Report it */ desc->error = error; page_cache_release(page); goto out; no_cached_page: /* * Ok, it wasn't cached, so we need to create a new * page.. */ page = page_cache_alloc_cold(mapping); if (!page) { desc->error = -ENOMEM; goto out; } error = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); if (error) { page_cache_release(page); if (error == -EEXIST) goto find_page; desc->error = error; goto out; } goto readpage; } out: ra->prev_pos = prev_index; ra->prev_pos <<= PAGE_CACHE_SHIFT; ra->prev_pos |= prev_offset; *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; file_accessed(filp); } int file_read_actor(read_descriptor_t *desc, struct page *page, unsigned long offset, unsigned long size) { char *kaddr; unsigned long left, count = desc->count; if (size > count) size = count; /* * Faults on the destination of a read are common, so do it before * taking the kmap. */ if (!fault_in_pages_writeable(desc->arg.buf, size)) { kaddr = kmap_atomic(page, KM_USER0); left = __copy_to_user_inatomic(desc->arg.buf, kaddr + offset, size); kunmap_atomic(kaddr, KM_USER0); if (left == 0) goto success; } /* Do it the slow way */ kaddr = kmap(page); left = __copy_to_user(desc->arg.buf, kaddr + offset, size); kunmap(page); if (left) { size -= left; desc->error = -EFAULT; } success: desc->count = count - size; desc->written += size; desc->arg.buf += size; return size; } /* * Performs necessary checks before doing a write * @iov: io vector request * @nr_segs: number of segments in the iovec * @count: number of bytes to write * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE * * Adjust number of segments and amount of bytes to write (nr_segs should be * properly initialized first). Returns appropriate error code that caller * should return or zero in case that write should be allowed. */ int generic_segment_checks(const struct iovec *iov, unsigned long *nr_segs, size_t *count, int access_flags) { unsigned long seg; size_t cnt = 0; for (seg = 0; seg < *nr_segs; seg++) { const struct iovec *iv = &iov[seg]; /* * If any segment has a negative length, or the cumulative * length ever wraps negative then return -EINVAL. */ cnt += iv->iov_len; if (unlikely((ssize_t)(cnt|iv->iov_len) < 0)) return -EINVAL; if (access_ok(access_flags, iv->iov_base, iv->iov_len)) continue; if (seg == 0) return -EFAULT; *nr_segs = seg; cnt -= iv->iov_len; /* This segment is no good */ break; } *count = cnt; return 0; } EXPORT_SYMBOL(generic_segment_checks); /** * generic_file_aio_read - generic filesystem read routine * @iocb: kernel I/O control block * @iov: io vector request * @nr_segs: number of segments in the iovec * @pos: current file position * * This is the "read()" routine for all filesystems * that can use the page cache directly. */ ssize_t generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *filp = iocb->ki_filp; ssize_t retval; unsigned long seg = 0; size_t count; loff_t *ppos = &iocb->ki_pos; count = 0; retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); if (retval) return retval; /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ if (filp->f_flags & O_DIRECT) { loff_t size; struct address_space *mapping; struct inode *inode; mapping = filp->f_mapping; inode = mapping->host; if (!count) goto out; /* skip atime */ size = i_size_read(inode); if (pos < size) { retval = filemap_write_and_wait_range(mapping, pos, pos + iov_length(iov, nr_segs) - 1); if (!retval) { retval = mapping->a_ops->direct_IO(READ, iocb, iov, pos, nr_segs); } if (retval > 0) { *ppos = pos + retval; count -= retval; } /* * Btrfs can have a short DIO read if we encounter * compressed extents, so if there was an error, or if * we've already read everything we wanted to, or if * there was a short read because we hit EOF, go ahead * and return. Otherwise fallthrough to buffered io for * the rest of the read. */ if (retval < 0 || !count || *ppos >= size) { file_accessed(filp); goto out; } } } count = retval; for (seg = 0; seg < nr_segs; seg++) { read_descriptor_t desc; loff_t offset = 0; /* * If we did a short DIO read we need to skip the section of the * iov that we've already read data into. */ if (count) { if (count > iov[seg].iov_len) { count -= iov[seg].iov_len; continue; } offset = count; count = 0; } desc.written = 0; desc.arg.buf = iov[seg].iov_base + offset; desc.count = iov[seg].iov_len - offset; if (desc.count == 0) continue; desc.error = 0; do_generic_file_read(filp, ppos, &desc, file_read_actor); retval += desc.written; if (desc.error) { retval = retval ?: desc.error; break; } if (desc.count > 0) break; } out: return retval; } EXPORT_SYMBOL(generic_file_aio_read); static ssize_t do_readahead(struct address_space *mapping, struct file *filp, pgoff_t index, unsigned long nr) { if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage) return -EINVAL; force_page_cache_readahead(mapping, filp, index, nr); return 0; } SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count) { ssize_t ret; struct file *file; ret = -EBADF; file = fget(fd); if (file) { if (file->f_mode & FMODE_READ) { struct address_space *mapping = file->f_mapping; pgoff_t start = offset >> PAGE_CACHE_SHIFT; pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT; unsigned long len = end - start + 1; ret = do_readahead(mapping, file, start, len); } fput(file); } return ret; } #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_readahead(long fd, loff_t offset, long count) { return SYSC_readahead((int) fd, offset, (size_t) count); } SYSCALL_ALIAS(sys_readahead, SyS_readahead); #endif #ifdef CONFIG_MMU /** * page_cache_read - adds requested page to the page cache if not already there * @file: file to read * @offset: page index * * This adds the requested page to the page cache if it isn't already there, * and schedules an I/O to read in its contents from disk. */ static int page_cache_read(struct file *file, pgoff_t offset) { struct address_space *mapping = file->f_mapping; struct page *page; int ret; do { page = page_cache_alloc_cold(mapping); if (!page) return -ENOMEM; ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); if (ret == 0) ret = mapping->a_ops->readpage(file, page); else if (ret == -EEXIST) ret = 0; /* losing race to add is OK */ page_cache_release(page); } while (ret == AOP_TRUNCATED_PAGE); return ret; } #define MMAP_LOTSAMISS (100) /* * Synchronous readahead happens when we don't even find * a page in the page cache at all. */ static void do_sync_mmap_readahead(struct vm_area_struct *vma, struct file_ra_state *ra, struct file *file, pgoff_t offset) { unsigned long ra_pages; struct address_space *mapping = file->f_mapping; /* If we don't want any read-ahead, don't bother */ if (VM_RandomReadHint(vma)) return; if (VM_SequentialReadHint(vma) || offset - 1 == (ra->prev_pos >> PAGE_CACHE_SHIFT)) { page_cache_sync_readahead(mapping, ra, file, offset, ra->ra_pages); return; } if (ra->mmap_miss < INT_MAX) ra->mmap_miss++; /* * Do we miss much more than hit in this file? If so, * stop bothering with read-ahead. It will only hurt. */ if (ra->mmap_miss > MMAP_LOTSAMISS) return; /* * mmap read-around */ ra_pages = max_sane_readahead(ra->ra_pages); if (ra_pages) { ra->start = max_t(long, 0, offset - ra_pages/2); ra->size = ra_pages; ra->async_size = 0; ra_submit(ra, mapping, file); } } /* * Asynchronous readahead happens when we find the page and PG_readahead, * so we want to possibly extend the readahead further.. */ static void do_async_mmap_readahead(struct vm_area_struct *vma, struct file_ra_state *ra, struct file *file, struct page *page, pgoff_t offset) { struct address_space *mapping = file->f_mapping; /* If we don't want any read-ahead, don't bother */ if (VM_RandomReadHint(vma)) return; if (ra->mmap_miss > 0) ra->mmap_miss--; if (PageReadahead(page)) page_cache_async_readahead(mapping, ra, file, page, offset, ra->ra_pages); } /** * filemap_fault - read in file data for page fault handling * @vma: vma in which the fault was taken * @vmf: struct vm_fault containing details of the fault * * filemap_fault() is invoked via the vma operations vector for a * mapped memory region to read in file data during a page fault. * * The goto's are kind of ugly, but this streamlines the normal case of having * it in the page cache, and handles the special cases reasonably without * having a lot of duplicated code. */ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { int error; struct file *file = vma->vm_file; struct address_space *mapping = file->f_mapping; struct file_ra_state *ra = &file->f_ra; struct inode *inode = mapping->host; pgoff_t offset = vmf->pgoff; struct page *page; pgoff_t size; int ret = 0; size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (offset >= size) return VM_FAULT_SIGBUS; /* * Do we have something in the page cache already? */ page = find_get_page(mapping, offset); if (likely(page)) { /* * We found the page, so try async readahead before * waiting for the lock. */ do_async_mmap_readahead(vma, ra, file, page, offset); lock_page(page); /* Did it get truncated? */ if (unlikely(page->mapping != mapping)) { unlock_page(page); put_page(page); goto no_cached_page; } } else { /* No page in the page cache at all */ do_sync_mmap_readahead(vma, ra, file, offset); count_vm_event(PGMAJFAULT); ret = VM_FAULT_MAJOR; retry_find: page = find_lock_page(mapping, offset); if (!page) goto no_cached_page; } /* * We have a locked page in the page cache, now we need to check * that it's up-to-date. If not, it is going to be due to an error. */ if (unlikely(!PageUptodate(page))) goto page_not_uptodate; /* * Found the page and have a reference on it. * We must recheck i_size under page lock. */ size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (unlikely(offset >= size)) { unlock_page(page); page_cache_release(page); return VM_FAULT_SIGBUS; } ra->prev_pos = (loff_t)offset << PAGE_CACHE_SHIFT; vmf->page = page; return ret | VM_FAULT_LOCKED; no_cached_page: /* * We're only likely to ever get here if MADV_RANDOM is in * effect. */ error = page_cache_read(file, offset); /* * The page we want has now been added to the page cache. * In the unlikely event that someone removed it in the * meantime, we'll just come back here and read it again. */ if (error >= 0) goto retry_find; /* * An error return from page_cache_read can result if the * system is low on memory, or a problem occurs while trying * to schedule I/O. */ if (error == -ENOMEM) return VM_FAULT_OOM; return VM_FAULT_SIGBUS; page_not_uptodate: /* * Umm, take care of errors if the page isn't up-to-date. * Try to re-read it _once_. We do this synchronously, * because there really aren't any performance issues here * and we need to check for errors. */ ClearPageError(page); error = mapping->a_ops->readpage(file, page); if (!error) { wait_on_page_locked(page); if (!PageUptodate(page)) error = -EIO; } page_cache_release(page); if (!error || error == AOP_TRUNCATED_PAGE) goto retry_find; /* Things didn't work out. Return zero to tell the mm layer so. */ shrink_readahead_size_eio(file, ra); return VM_FAULT_SIGBUS; } EXPORT_SYMBOL(filemap_fault); const struct vm_operations_struct generic_file_vm_ops = { .fault = filemap_fault, }; /* This is used for a general mmap of a disk file */ int generic_file_mmap(struct file * file, struct vm_area_struct * vma) { struct address_space *mapping = file->f_mapping; if (!mapping->a_ops->readpage) return -ENOEXEC; file_accessed(file); vma->vm_ops = &generic_file_vm_ops; vma->vm_flags |= VM_CAN_NONLINEAR; return 0; } /* * This is for filesystems which do not implement ->writepage. */ int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) { if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) return -EINVAL; return generic_file_mmap(file, vma); } #else int generic_file_mmap(struct file * file, struct vm_area_struct * vma) { return -ENOSYS; } int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) { return -ENOSYS; } #endif /* CONFIG_MMU */ EXPORT_SYMBOL(generic_file_mmap); EXPORT_SYMBOL(generic_file_readonly_mmap); static struct page *__read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *,struct page*), void *data, gfp_t gfp) { struct page *page; int err; repeat: page = find_get_page(mapping, index); if (!page) { page = __page_cache_alloc(gfp | __GFP_COLD); if (!page) return ERR_PTR(-ENOMEM); err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); if (unlikely(err)) { page_cache_release(page); if (err == -EEXIST) goto repeat; /* Presumably ENOMEM for radix tree node */ return ERR_PTR(err); } err = filler(data, page); if (err < 0) { page_cache_release(page); page = ERR_PTR(err); } } return page; } static struct page *do_read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *,struct page*), void *data, gfp_t gfp) { struct page *page; int err; retry: page = __read_cache_page(mapping, index, filler, data, gfp); if (IS_ERR(page)) return page; if (PageUptodate(page)) goto out; lock_page(page); if (!page->mapping) { unlock_page(page); page_cache_release(page); goto retry; } if (PageUptodate(page)) { unlock_page(page); goto out; } err = filler(data, page); if (err < 0) { page_cache_release(page); return ERR_PTR(err); } out: mark_page_accessed(page); return page; } /** * read_cache_page_async - read into page cache, fill it if needed * @mapping: the page's address_space * @index: the page index * @filler: function to perform the read * @data: destination for read data * * Same as read_cache_page, but don't wait for page to become unlocked * after submitting it to the filler. * * Read into the page cache. If a page already exists, and PageUptodate() is * not set, try to fill the page but don't wait for it to become unlocked. * * If the page does not get brought uptodate, return -EIO. */ struct page *read_cache_page_async(struct address_space *mapping, pgoff_t index, int (*filler)(void *,struct page*), void *data) { return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); } EXPORT_SYMBOL(read_cache_page_async); static struct page *wait_on_page_read(struct page *page) { if (!IS_ERR(page)) { wait_on_page_locked(page); if (!PageUptodate(page)) { page_cache_release(page); page = ERR_PTR(-EIO); } } return page; } /** * read_cache_page_gfp - read into page cache, using specified page allocation flags. * @mapping: the page's address_space * @index: the page index * @gfp: the page allocator flags to use if allocating * * This is the same as "read_mapping_page(mapping, index, NULL)", but with * any new page allocations done using the specified allocation flags. Note * that the Radix tree operations will still use GFP_KERNEL, so you can't * expect to do this atomically or anything like that - but you can pass in * other page requirements. * * If the page does not get brought uptodate, return -EIO. */ struct page *read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp) { filler_t *filler = (filler_t *)mapping->a_ops->readpage; return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp)); } EXPORT_SYMBOL(read_cache_page_gfp); /** * read_cache_page - read into page cache, fill it if needed * @mapping: the page's address_space * @index: the page index * @filler: function to perform the read * @data: destination for read data * * Read into the page cache. If a page already exists, and PageUptodate() is * not set, try to fill the page then wait for it to become unlocked. * * If the page does not get brought uptodate, return -EIO. */ struct page *read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *,struct page*), void *data) { return wait_on_page_read(read_cache_page_async(mapping, index, filler, data)); } EXPORT_SYMBOL(read_cache_page); /* * The logic we want is * * if suid or (sgid and xgrp) * remove privs */ int should_remove_suid(struct dentry *dentry) { mode_t mode = dentry->d_inode->i_mode; int kill = 0; /* suid always must be killed */ if (unlikely(mode & S_ISUID)) kill = ATTR_KILL_SUID; /* * sgid without any exec bits is just a mandatory locking mark; leave * it alone. If some exec bits are set, it's a real sgid; kill it. */ if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) kill |= ATTR_KILL_SGID; if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) return kill; return 0; } EXPORT_SYMBOL(should_remove_suid); static int __remove_suid(struct dentry *dentry, int kill) { struct iattr newattrs; newattrs.ia_valid = ATTR_FORCE | kill; return notify_change(dentry, &newattrs); } int file_remove_suid(struct file *file) { struct dentry *dentry = file->f_path.dentry; int killsuid = should_remove_suid(dentry); int killpriv = security_inode_need_killpriv(dentry); int error = 0; if (killpriv < 0) return killpriv; if (killpriv) error = security_inode_killpriv(dentry); if (!error && killsuid) error = __remove_suid(dentry, killsuid); return error; } EXPORT_SYMBOL(file_remove_suid); static size_t __iovec_copy_from_user_inatomic(char *vaddr, const struct iovec *iov, size_t base, size_t bytes) { size_t copied = 0, left = 0; while (bytes) { char __user *buf = iov->iov_base + base; int copy = min(bytes, iov->iov_len - base); base = 0; left = __copy_from_user_inatomic(vaddr, buf, copy); copied += copy; bytes -= copy; vaddr += copy; iov++; if (unlikely(left)) break; } return copied - left; } /* * Copy as much as we can into the page and return the number of bytes which * were successfully copied. If a fault is encountered then return the number of * bytes which were copied. */ size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes) { char *kaddr; size_t copied; BUG_ON(!in_atomic()); kaddr = kmap_atomic(page, KM_USER0); if (likely(i->nr_segs == 1)) { int left; char __user *buf = i->iov->iov_base + i->iov_offset; left = __copy_from_user_inatomic(kaddr + offset, buf, bytes); copied = bytes - left; } else { copied = __iovec_copy_from_user_inatomic(kaddr + offset, i->iov, i->iov_offset, bytes); } kunmap_atomic(kaddr, KM_USER0); return copied; } EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); /* * This has the same sideeffects and return value as * iov_iter_copy_from_user_atomic(). * The difference is that it attempts to resolve faults. * Page must not be locked. */ size_t iov_iter_copy_from_user(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes) { char *kaddr; size_t copied; kaddr = kmap(page); if (likely(i->nr_segs == 1)) { int left; char __user *buf = i->iov->iov_base + i->iov_offset; left = __copy_from_user(kaddr + offset, buf, bytes); copied = bytes - left; } else { copied = __iovec_copy_from_user_inatomic(kaddr + offset, i->iov, i->iov_offset, bytes); } kunmap(page); return copied; } EXPORT_SYMBOL(iov_iter_copy_from_user); void iov_iter_advance(struct iov_iter *i, size_t bytes) { BUG_ON(i->count < bytes); if (likely(i->nr_segs == 1)) { i->iov_offset += bytes; i->count -= bytes; } else { const struct iovec *iov = i->iov; size_t base = i->iov_offset; /* * The !iov->iov_len check ensures we skip over unlikely * zero-length segments (without overruning the iovec). */ while (bytes || unlikely(i->count && !iov->iov_len)) { int copy; copy = min(bytes, iov->iov_len - base); BUG_ON(!i->count || i->count < copy); i->count -= copy; bytes -= copy; base += copy; if (iov->iov_len == base) { iov++; base = 0; } } i->iov = iov; i->iov_offset = base; } } EXPORT_SYMBOL(iov_iter_advance); /* * Fault in the first iovec of the given iov_iter, to a maximum length * of bytes. Returns 0 on success, or non-zero if the memory could not be * accessed (ie. because it is an invalid address). * * writev-intensive code may want this to prefault several iovecs -- that * would be possible (callers must not rely on the fact that _only_ the * first iovec will be faulted with the current implementation). */ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) { char __user *buf = i->iov->iov_base + i->iov_offset; bytes = min(bytes, i->iov->iov_len - i->iov_offset); return fault_in_pages_readable(buf, bytes); } EXPORT_SYMBOL(iov_iter_fault_in_readable); /* * Return the count of just the current iov_iter segment. */ size_t iov_iter_single_seg_count(struct iov_iter *i) { const struct iovec *iov = i->iov; if (i->nr_segs == 1) return i->count; else return min(i->count, iov->iov_len - i->iov_offset); } EXPORT_SYMBOL(iov_iter_single_seg_count); /* * Performs necessary checks before doing a write * * Can adjust writing position or amount of bytes to write. * Returns appropriate error code that caller should return or * zero in case that write should be allowed. */ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk) { struct inode *inode = file->f_mapping->host; unsigned long limit = rlimit(RLIMIT_FSIZE); if (unlikely(*pos < 0)) return -EINVAL; if (!isblk) { /* FIXME: this is for backwards compatibility with 2.4 */ if (file->f_flags & O_APPEND) *pos = i_size_read(inode); if (limit != RLIM_INFINITY) { if (*pos >= limit) { send_sig(SIGXFSZ, current, 0); return -EFBIG; } if (*count > limit - (typeof(limit))*pos) { *count = limit - (typeof(limit))*pos; } } } /* * LFS rule */ if (unlikely(*pos + *count > MAX_NON_LFS && !(file->f_flags & O_LARGEFILE))) { if (*pos >= MAX_NON_LFS) { return -EFBIG; } if (*count > MAX_NON_LFS - (unsigned long)*pos) { *count = MAX_NON_LFS - (unsigned long)*pos; } } /* * Are we about to exceed the fs block limit ? * * If we have written data it becomes a short write. If we have * exceeded without writing data we send a signal and return EFBIG. * Linus frestrict idea will clean these up nicely.. */ if (likely(!isblk)) { if (unlikely(*pos >= inode->i_sb->s_maxbytes)) { if (*count || *pos > inode->i_sb->s_maxbytes) { return -EFBIG; } /* zero-length writes at ->s_maxbytes are OK */ } if (unlikely(*pos + *count > inode->i_sb->s_maxbytes)) *count = inode->i_sb->s_maxbytes - *pos; } else { #ifdef CONFIG_BLOCK loff_t isize; if (bdev_read_only(I_BDEV(inode))) return -EPERM; isize = i_size_read(inode); if (*pos >= isize) { if (*count || *pos > isize) return -ENOSPC; } if (*pos + *count > isize) *count = isize - *pos; #else return -EPERM; #endif } return 0; } EXPORT_SYMBOL(generic_write_checks); int pagecache_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { const struct address_space_operations *aops = mapping->a_ops; return aops->write_begin(file, mapping, pos, len, flags, pagep, fsdata); } EXPORT_SYMBOL(pagecache_write_begin); int pagecache_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { const struct address_space_operations *aops = mapping->a_ops; mark_page_accessed(page); return aops->write_end(file, mapping, pos, len, copied, page, fsdata); } EXPORT_SYMBOL(pagecache_write_end); ssize_t generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, unsigned long *nr_segs, loff_t pos, loff_t *ppos, size_t count, size_t ocount) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; ssize_t written; size_t write_len; pgoff_t end; if (count != ocount) *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); write_len = iov_length(iov, *nr_segs); end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); if (written) goto out; /* * After a write we want buffered reads to be sure to go to disk to get * the new data. We invalidate clean cached page from the region we're * about to write. We do this *before* the write so that we can return * without clobbering -EIOCBQUEUED from ->direct_IO(). */ if (mapping->nrpages) { written = invalidate_inode_pages2_range(mapping, pos >> PAGE_CACHE_SHIFT, end); /* * If a page can not be invalidated, return 0 to fall back * to buffered write. */ if (written) { if (written == -EBUSY) return 0; goto out; } } written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs); /* * Finally, try again to invalidate clean pages which might have been * cached by non-direct readahead, or faulted in by get_user_pages() * if the source of the write was an mmap'ed region of the file * we're writing. Either one is a pretty crazy thing to do, * so we don't support it 100%. If this invalidation * fails, tough, the write still worked... */ if (mapping->nrpages) { invalidate_inode_pages2_range(mapping, pos >> PAGE_CACHE_SHIFT, end); } if (written > 0) { loff_t end = pos + written; if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { i_size_write(inode, end); mark_inode_dirty(inode); } *ppos = end; } out: return written; } EXPORT_SYMBOL(generic_file_direct_write); /* * Find or create a page at the given pagecache position. Return the locked * page. This function is specifically for buffered writes. */ struct page *grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index, unsigned flags) { int status; struct page *page; gfp_t gfp_notmask = 0; if (flags & AOP_FLAG_NOFS) gfp_notmask = __GFP_FS; repeat: page = find_lock_page(mapping, index); if (likely(page)) return page; page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask); if (!page) return NULL; status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL & ~gfp_notmask); if (unlikely(status)) { page_cache_release(page); if (status == -EEXIST) goto repeat; return NULL; } return page; } EXPORT_SYMBOL(grab_cache_page_write_begin); static ssize_t generic_perform_write(struct file *file, struct iov_iter *i, loff_t pos) { struct address_space *mapping = file->f_mapping; const struct address_space_operations *a_ops = mapping->a_ops; long status = 0; ssize_t written = 0; unsigned int flags = 0; /* * Copies from kernel address space cannot fail (NFSD is a big user). */ if (segment_eq(get_fs(), KERNEL_DS)) flags |= AOP_FLAG_UNINTERRUPTIBLE; do { struct page *page; pgoff_t index; /* Pagecache index for current page */ unsigned long offset; /* Offset into pagecache page */ unsigned long bytes; /* Bytes to write to page */ size_t copied; /* Bytes copied from user */ void *fsdata; offset = (pos & (PAGE_CACHE_SIZE - 1)); index = pos >> PAGE_CACHE_SHIFT; bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, iov_iter_count(i)); again: /* * Bring in the user page that we will copy from _first_. * Otherwise there's a nasty deadlock on copying from the * same page as we're writing to, without it being marked * up-to-date. * * Not only is this an optimisation, but it is also required * to check that the address is actually valid, when atomic * usercopies are used, below. */ if (unlikely(iov_iter_fault_in_readable(i, bytes))) { status = -EFAULT; break; } status = a_ops->write_begin(file, mapping, pos, bytes, flags, &page, &fsdata); if (unlikely(status)) break; if (mapping_writably_mapped(mapping)) flush_dcache_page(page); pagefault_disable(); copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); pagefault_enable(); flush_dcache_page(page); mark_page_accessed(page); status = a_ops->write_end(file, mapping, pos, bytes, copied, page, fsdata); if (unlikely(status < 0)) break; copied = status; cond_resched(); iov_iter_advance(i, copied); if (unlikely(copied == 0)) { /* * If we were unable to copy any data at all, we must * fall back to a single segment length write. * * If we didn't fallback here, we could livelock * because not all segments in the iov can be copied at * once without a pagefault. */ bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, iov_iter_single_seg_count(i)); goto again; } pos += copied; written += copied; balance_dirty_pages_ratelimited(mapping); } while (iov_iter_count(i)); return written ? written : status; } ssize_t generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos, loff_t *ppos, size_t count, ssize_t written) { struct file *file = iocb->ki_filp; ssize_t status; struct iov_iter i; iov_iter_init(&i, iov, nr_segs, count, written); status = generic_perform_write(file, &i, pos); if (likely(status >= 0)) { written += status; *ppos = pos + status; } return written ? written : status; } EXPORT_SYMBOL(generic_file_buffered_write); /** * __generic_file_aio_write - write data to a file * @iocb: IO state structure (file, offset, etc.) * @iov: vector with data to write * @nr_segs: number of segments in the vector * @ppos: position where to write * * This function does all the work needed for actually writing data to a * file. It does all basic checks, removes SUID from the file, updates * modification times and calls proper subroutines depending on whether we * do direct IO or a standard buffered write. * * It expects i_mutex to be grabbed unless we work on a block device or similar * object which does not need locking at all. * * This function does *not* take care of syncing data in case of O_SYNC write. * A caller has to handle it. This is mainly due to the fact that we want to * avoid syncing under i_mutex. */ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { struct file *file = iocb->ki_filp; struct address_space * mapping = file->f_mapping; size_t ocount; /* original count */ size_t count; /* after file limit checks */ struct inode *inode = mapping->host; loff_t pos; ssize_t written; ssize_t err; ocount = 0; err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); if (err) return err; count = ocount; pos = *ppos; vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; written = 0; err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (err) goto out; if (count == 0) goto out; err = file_remove_suid(file); if (err) goto out; file_update_time(file); /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ if (unlikely(file->f_flags & O_DIRECT)) { loff_t endbyte; ssize_t written_buffered; written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos, count, ocount); if (written < 0 || written == count) goto out; /* * direct-io write to a hole: fall through to buffered I/O * for completing the rest of the request. */ pos += written; count -= written; written_buffered = generic_file_buffered_write(iocb, iov, nr_segs, pos, ppos, count, written); /* * If generic_file_buffered_write() retuned a synchronous error * then we want to return the number of bytes which were * direct-written, or the error code if that was zero. Note * that this differs from normal direct-io semantics, which * will return -EFOO even if some bytes were written. */ if (written_buffered < 0) { err = written_buffered; goto out; } /* * We need to ensure that the page cache pages are written to * disk and invalidated to preserve the expected O_DIRECT * semantics. */ endbyte = pos + written_buffered - written - 1; err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); if (err == 0) { written = written_buffered; invalidate_mapping_pages(mapping, pos >> PAGE_CACHE_SHIFT, endbyte >> PAGE_CACHE_SHIFT); } else { /* * We don't know how much we wrote, so just return * the number of bytes which were direct-written */ } } else { written = generic_file_buffered_write(iocb, iov, nr_segs, pos, ppos, count, written); } out: current->backing_dev_info = NULL; return written ? written : err; } EXPORT_SYMBOL(__generic_file_aio_write); /** * generic_file_aio_write - write data to a file * @iocb: IO state structure * @iov: vector with data to write * @nr_segs: number of segments in the vector * @pos: position in file where to write * * This is a wrapper around __generic_file_aio_write() to be used by most * filesystems. It takes care of syncing the file in case of O_SYNC file * and acquires i_mutex as needed. */ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; ssize_t ret; BUG_ON(iocb->ki_pos != pos); mutex_lock(&inode->i_mutex); ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); mutex_unlock(&inode->i_mutex); if (ret > 0 || ret == -EIOCBQUEUED) { ssize_t err; err = generic_write_sync(file, pos, ret); if (err < 0 && ret > 0) ret = err; } return ret; } EXPORT_SYMBOL(generic_file_aio_write); /** * try_to_release_page() - release old fs-specific metadata on a page * * @page: the page which the kernel is trying to free * @gfp_mask: memory allocation flags (and I/O mode) * * The address_space is to try to release any data against the page * (presumably at page->private). If the release was successful, return `1'. * Otherwise return zero. * * This may also be called if PG_fscache is set on a page, indicating that the * page is known to the local caching routines. * * The @gfp_mask argument specifies whether I/O may be performed to release * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS). * */ int try_to_release_page(struct page *page, gfp_t gfp_mask) { struct address_space * const mapping = page->mapping; BUG_ON(!PageLocked(page)); if (PageWriteback(page)) return 0; if (mapping && mapping->a_ops->releasepage) return mapping->a_ops->releasepage(page, gfp_mask); return try_to_free_buffers(page); } EXPORT_SYMBOL(try_to_release_page);
gpl-2.0
CM-Tab-S/android_kernel_samsung_exynos5420
net/sunrpc/svcsock.c
259
44290
/* * linux/net/sunrpc/svcsock.c * * These are the RPC server socket internals. * * The server scheduling algorithm does not always distribute the load * evenly when servicing a single client. May need to modify the * svc_xprt_enqueue procedure... * * TCP support is largely untested and may be a little slow. The problem * is that we currently do two separate recvfrom's, one for the 4-byte * record length, and the second for the actual record. This could possibly * be improved by always reading a minimum size of around 100 bytes and * tucking any superfluous bytes away in a temporary store. Still, that * leaves write requests out in the rain. An alternative may be to peek at * the first skb in the queue, and if it matches the next TCP sequence * number, to extract the record marker. Yuck. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/net.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/udp.h> #include <linux/tcp.h> #include <linux/unistd.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/file.h> #include <linux/freezer.h> #include <net/sock.h> #include <net/checksum.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/tcp.h> #include <net/tcp_states.h> #include <asm/uaccess.h> #include <asm/ioctls.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/msg_prot.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/xprt.h> #include "sunrpc.h" #define RPCDBG_FACILITY RPCDBG_SVCXPRT static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, int *errp, int flags); static void svc_udp_data_ready(struct sock *, int); static int svc_udp_recvfrom(struct svc_rqst *); static int svc_udp_sendto(struct svc_rqst *); static void svc_sock_detach(struct svc_xprt *); static void svc_tcp_sock_detach(struct svc_xprt *); static void svc_sock_free(struct svc_xprt *); static struct svc_xprt *svc_create_socket(struct svc_serv *, int, struct net *, struct sockaddr *, int, int); #if defined(CONFIG_SUNRPC_BACKCHANNEL) static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, struct net *, struct sockaddr *, int, int); static void svc_bc_sock_free(struct svc_xprt *xprt); #endif /* CONFIG_SUNRPC_BACKCHANNEL */ #ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key svc_key[2]; static struct lock_class_key svc_slock_key[2]; static void svc_reclassify_socket(struct socket *sock) { struct sock *sk = sock->sk; BUG_ON(sock_owned_by_user(sk)); switch (sk->sk_family) { case AF_INET: sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", &svc_slock_key[0], "sk_xprt.xpt_lock-AF_INET-NFSD", &svc_key[0]); break; case AF_INET6: sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD", &svc_slock_key[1], "sk_xprt.xpt_lock-AF_INET6-NFSD", &svc_key[1]); break; default: BUG(); } } #else static void svc_reclassify_socket(struct socket *sock) { } #endif /* * Release an skbuff after use */ static void svc_release_skb(struct svc_rqst *rqstp) { struct sk_buff *skb = rqstp->rq_xprt_ctxt; if (skb) { struct svc_sock *svsk = container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); rqstp->rq_xprt_ctxt = NULL; dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); skb_free_datagram_locked(svsk->sk_sk, skb); } } union svc_pktinfo_u { struct in_pktinfo pkti; struct in6_pktinfo pkti6; }; #define SVC_PKTINFO_SPACE \ CMSG_SPACE(sizeof(union svc_pktinfo_u)) static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) { struct svc_sock *svsk = container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); switch (svsk->sk_sk->sk_family) { case AF_INET: { struct in_pktinfo *pki = CMSG_DATA(cmh); cmh->cmsg_level = SOL_IP; cmh->cmsg_type = IP_PKTINFO; pki->ipi_ifindex = 0; pki->ipi_spec_dst.s_addr = svc_daddr_in(rqstp)->sin_addr.s_addr; cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); } break; case AF_INET6: { struct in6_pktinfo *pki = CMSG_DATA(cmh); struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp); cmh->cmsg_level = SOL_IPV6; cmh->cmsg_type = IPV6_PKTINFO; pki->ipi6_ifindex = daddr->sin6_scope_id; pki->ipi6_addr = daddr->sin6_addr; cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); } break; } } /* * send routine intended to be shared by the fore- and back-channel */ int svc_send_common(struct socket *sock, struct xdr_buf *xdr, struct page *headpage, unsigned long headoffset, struct page *tailpage, unsigned long tailoffset) { int result; int size; struct page **ppage = xdr->pages; size_t base = xdr->page_base; unsigned int pglen = xdr->page_len; unsigned int flags = MSG_MORE; int slen; int len = 0; slen = xdr->len; /* send head */ if (slen == xdr->head[0].iov_len) flags = 0; len = kernel_sendpage(sock, headpage, headoffset, xdr->head[0].iov_len, flags); if (len != xdr->head[0].iov_len) goto out; slen -= xdr->head[0].iov_len; if (slen == 0) goto out; /* send page data */ size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen; while (pglen > 0) { if (slen == size) flags = 0; result = kernel_sendpage(sock, *ppage, base, size, flags); if (result > 0) len += result; if (result != size) goto out; slen -= size; pglen -= size; size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen; base = 0; ppage++; } /* send tail */ if (xdr->tail[0].iov_len) { result = kernel_sendpage(sock, tailpage, tailoffset, xdr->tail[0].iov_len, 0); if (result > 0) len += result; } out: return len; } /* * Generic sendto routine */ static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) { struct svc_sock *svsk = container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); struct socket *sock = svsk->sk_sock; union { struct cmsghdr hdr; long all[SVC_PKTINFO_SPACE / sizeof(long)]; } buffer; struct cmsghdr *cmh = &buffer.hdr; int len = 0; unsigned long tailoff; unsigned long headoff; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); if (rqstp->rq_prot == IPPROTO_UDP) { struct msghdr msg = { .msg_name = &rqstp->rq_addr, .msg_namelen = rqstp->rq_addrlen, .msg_control = cmh, .msg_controllen = sizeof(buffer), .msg_flags = MSG_MORE, }; svc_set_cmsg_data(rqstp, cmh); if (sock_sendmsg(sock, &msg, 0) < 0) goto out; } tailoff = ((unsigned long)xdr->tail[0].iov_base) & (PAGE_SIZE-1); headoff = 0; len = svc_send_common(sock, xdr, rqstp->rq_respages[0], headoff, rqstp->rq_respages[0], tailoff); out: dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n", svsk, xdr->head[0].iov_base, xdr->head[0].iov_len, xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf))); return len; } /* * Report socket names for nfsdfs */ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining) { const struct sock *sk = svsk->sk_sk; const char *proto_name = sk->sk_protocol == IPPROTO_UDP ? "udp" : "tcp"; int len; switch (sk->sk_family) { case PF_INET: len = snprintf(buf, remaining, "ipv4 %s %pI4 %d\n", proto_name, &inet_sk(sk)->inet_rcv_saddr, inet_sk(sk)->inet_num); break; case PF_INET6: len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n", proto_name, &inet6_sk(sk)->rcv_saddr, inet_sk(sk)->inet_num); break; default: len = snprintf(buf, remaining, "*unknown-%d*\n", sk->sk_family); } if (len >= remaining) { *buf = '\0'; return -ENAMETOOLONG; } return len; } /** * svc_sock_names - construct a list of listener names in a string * @serv: pointer to RPC service * @buf: pointer to a buffer to fill in with socket names * @buflen: size of the buffer to be filled * @toclose: pointer to '\0'-terminated C string containing the name * of a listener to be closed * * Fills in @buf with a '\n'-separated list of names of listener * sockets. If @toclose is not NULL, the socket named by @toclose * is closed, and is not included in the output list. * * Returns positive length of the socket name string, or a negative * errno value on error. */ int svc_sock_names(struct svc_serv *serv, char *buf, const size_t buflen, const char *toclose) { struct svc_sock *svsk, *closesk = NULL; int len = 0; if (!serv) return 0; spin_lock_bh(&serv->sv_lock); list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) { int onelen = svc_one_sock_name(svsk, buf + len, buflen - len); if (onelen < 0) { len = onelen; break; } if (toclose && strcmp(toclose, buf + len) == 0) { closesk = svsk; svc_xprt_get(&closesk->sk_xprt); } else len += onelen; } spin_unlock_bh(&serv->sv_lock); if (closesk) { /* Should unregister with portmap, but you cannot * unregister just one protocol... */ svc_close_xprt(&closesk->sk_xprt); svc_xprt_put(&closesk->sk_xprt); } else if (toclose) return -ENOENT; return len; } EXPORT_SYMBOL_GPL(svc_sock_names); /* * Check input queue length */ static int svc_recv_available(struct svc_sock *svsk) { struct socket *sock = svsk->sk_sock; int avail, err; err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail); return (err >= 0)? avail : err; } /* * Generic recvfrom routine. */ static int svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) { struct svc_sock *svsk = container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); struct msghdr msg = { .msg_flags = MSG_DONTWAIT, }; int len; rqstp->rq_xprt_hlen = 0; len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen, msg.msg_flags); dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", svsk, iov[0].iov_base, iov[0].iov_len, len); return len; } static int svc_partial_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen, unsigned int base) { size_t save_iovlen; void *save_iovbase; unsigned int i; int ret; if (base == 0) return svc_recvfrom(rqstp, iov, nr, buflen); for (i = 0; i < nr; i++) { if (iov[i].iov_len > base) break; base -= iov[i].iov_len; } save_iovlen = iov[i].iov_len; save_iovbase = iov[i].iov_base; iov[i].iov_len -= base; iov[i].iov_base += base; ret = svc_recvfrom(rqstp, &iov[i], nr - i, buflen); iov[i].iov_len = save_iovlen; iov[i].iov_base = save_iovbase; return ret; } /* * Set socket snd and rcv buffer lengths */ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv) { #if 0 mm_segment_t oldfs; oldfs = get_fs(); set_fs(KERNEL_DS); sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, (char*)&snd, sizeof(snd)); sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, (char*)&rcv, sizeof(rcv)); #else /* sock_setsockopt limits use to sysctl_?mem_max, * which isn't acceptable. Until that is made conditional * on not having CAP_SYS_RESOURCE or similar, we go direct... * DaveM said I could! */ lock_sock(sock->sk); sock->sk->sk_sndbuf = snd * 2; sock->sk->sk_rcvbuf = rcv * 2; sock->sk->sk_write_space(sock->sk); release_sock(sock->sk); #endif } /* * INET callback when data has been received on the socket. */ static void svc_udp_data_ready(struct sock *sk, int count) { struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; wait_queue_head_t *wq = sk_sleep(sk); if (svsk) { dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", svsk, sk, count, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); svc_xprt_enqueue(&svsk->sk_xprt); } if (wq && waitqueue_active(wq)) wake_up_interruptible(wq); } /* * INET callback when space is newly available on the socket. */ static void svc_write_space(struct sock *sk) { struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data); wait_queue_head_t *wq = sk_sleep(sk); if (svsk) { dprintk("svc: socket %p(inet %p), write_space busy=%d\n", svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); svc_xprt_enqueue(&svsk->sk_xprt); } if (wq && waitqueue_active(wq)) { dprintk("RPC svc_write_space: someone sleeping on %p\n", svsk); wake_up_interruptible(wq); } } static void svc_tcp_write_space(struct sock *sk) { struct socket *sock = sk->sk_socket; if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) clear_bit(SOCK_NOSPACE, &sock->flags); svc_write_space(sk); } /* * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo */ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp, struct cmsghdr *cmh) { struct in_pktinfo *pki = CMSG_DATA(cmh); struct sockaddr_in *daddr = svc_daddr_in(rqstp); if (cmh->cmsg_type != IP_PKTINFO) return 0; daddr->sin_family = AF_INET; daddr->sin_addr.s_addr = pki->ipi_spec_dst.s_addr; return 1; } /* * See net/ipv6/datagram.c : datagram_recv_ctl */ static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, struct cmsghdr *cmh) { struct in6_pktinfo *pki = CMSG_DATA(cmh); struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp); if (cmh->cmsg_type != IPV6_PKTINFO) return 0; daddr->sin6_family = AF_INET6; daddr->sin6_addr = pki->ipi6_addr; daddr->sin6_scope_id = pki->ipi6_ifindex; return 1; } /* * Copy the UDP datagram's destination address to the rqstp structure. * The 'destination' address in this case is the address to which the * peer sent the datagram, i.e. our local address. For multihomed * hosts, this can change from msg to msg. Note that only the IP * address changes, the port number should remain the same. */ static int svc_udp_get_dest_address(struct svc_rqst *rqstp, struct cmsghdr *cmh) { switch (cmh->cmsg_level) { case SOL_IP: return svc_udp_get_dest_address4(rqstp, cmh); case SOL_IPV6: return svc_udp_get_dest_address6(rqstp, cmh); } return 0; } /* * Receive a datagram from a UDP socket. */ static int svc_udp_recvfrom(struct svc_rqst *rqstp) { struct svc_sock *svsk = container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); struct svc_serv *serv = svsk->sk_xprt.xpt_server; struct sk_buff *skb; union { struct cmsghdr hdr; long all[SVC_PKTINFO_SPACE / sizeof(long)]; } buffer; struct cmsghdr *cmh = &buffer.hdr; struct msghdr msg = { .msg_name = svc_addr(rqstp), .msg_control = cmh, .msg_controllen = sizeof(buffer), .msg_flags = MSG_DONTWAIT, }; size_t len; int err; if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags)) /* udp sockets need large rcvbuf as all pending * requests are still in that buffer. sndbuf must * also be large enough that there is enough space * for one reply per thread. We count all threads * rather than threads in a particular pool, which * provides an upper bound on the number of threads * which will access the socket. */ svc_sock_setbufsize(svsk->sk_sock, (serv->sv_nrthreads+3) * serv->sv_max_mesg, (serv->sv_nrthreads+3) * serv->sv_max_mesg); clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); skb = NULL; err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, 0, 0, MSG_PEEK | MSG_DONTWAIT); if (err >= 0) skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err); if (skb == NULL) { if (err != -EAGAIN) { /* possibly an icmp error */ dprintk("svc: recvfrom returned error %d\n", -err); set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); } return -EAGAIN; } len = svc_addr_len(svc_addr(rqstp)); if (len == 0) return -EAFNOSUPPORT; rqstp->rq_addrlen = len; if (skb->tstamp.tv64 == 0) { skb->tstamp = ktime_get_real(); /* Don't enable netstamp, sunrpc doesn't need that much accuracy */ } svsk->sk_sk->sk_stamp = skb->tstamp; set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */ len = skb->len - sizeof(struct udphdr); rqstp->rq_arg.len = len; rqstp->rq_prot = IPPROTO_UDP; if (!svc_udp_get_dest_address(rqstp, cmh)) { if (net_ratelimit()) printk(KERN_WARNING "svc: received unknown control message %d/%d; " "dropping RPC reply datagram\n", cmh->cmsg_level, cmh->cmsg_type); skb_free_datagram_locked(svsk->sk_sk, skb); return 0; } rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp)); if (skb_is_nonlinear(skb)) { /* we have to copy */ local_bh_disable(); if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) { local_bh_enable(); /* checksum error */ skb_free_datagram_locked(svsk->sk_sk, skb); return 0; } local_bh_enable(); skb_free_datagram_locked(svsk->sk_sk, skb); } else { /* we can use it in-place */ rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); rqstp->rq_arg.head[0].iov_len = len; if (skb_checksum_complete(skb)) { skb_free_datagram_locked(svsk->sk_sk, skb); return 0; } rqstp->rq_xprt_ctxt = skb; } rqstp->rq_arg.page_base = 0; if (len <= rqstp->rq_arg.head[0].iov_len) { rqstp->rq_arg.head[0].iov_len = len; rqstp->rq_arg.page_len = 0; rqstp->rq_respages = rqstp->rq_pages+1; } else { rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; rqstp->rq_respages = rqstp->rq_pages + 1 + DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE); } if (serv->sv_stats) serv->sv_stats->netudpcnt++; return len; } static int svc_udp_sendto(struct svc_rqst *rqstp) { int error; error = svc_sendto(rqstp, &rqstp->rq_res); if (error == -ECONNREFUSED) /* ICMP error on earlier request. */ error = svc_sendto(rqstp, &rqstp->rq_res); return error; } static void svc_udp_prep_reply_hdr(struct svc_rqst *rqstp) { } static int svc_udp_has_wspace(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); struct svc_serv *serv = xprt->xpt_server; unsigned long required; /* * Set the SOCK_NOSPACE flag before checking the available * sock space. */ set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg; if (required*2 > sock_wspace(svsk->sk_sk)) return 0; clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); return 1; } static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt) { BUG(); return NULL; } static struct svc_xprt *svc_udp_create(struct svc_serv *serv, struct net *net, struct sockaddr *sa, int salen, int flags) { return svc_create_socket(serv, IPPROTO_UDP, net, sa, salen, flags); } static struct svc_xprt_ops svc_udp_ops = { .xpo_create = svc_udp_create, .xpo_recvfrom = svc_udp_recvfrom, .xpo_sendto = svc_udp_sendto, .xpo_release_rqst = svc_release_skb, .xpo_detach = svc_sock_detach, .xpo_free = svc_sock_free, .xpo_prep_reply_hdr = svc_udp_prep_reply_hdr, .xpo_has_wspace = svc_udp_has_wspace, .xpo_accept = svc_udp_accept, }; static struct svc_xprt_class svc_udp_class = { .xcl_name = "udp", .xcl_owner = THIS_MODULE, .xcl_ops = &svc_udp_ops, .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP, }; static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) { int err, level, optname, one = 1; svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_udp_class, &svsk->sk_xprt, serv); clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); svsk->sk_sk->sk_data_ready = svc_udp_data_ready; svsk->sk_sk->sk_write_space = svc_write_space; /* initialise setting must have enough space to * receive and respond to one request. * svc_udp_recvfrom will re-adjust if necessary */ svc_sock_setbufsize(svsk->sk_sock, 3 * svsk->sk_xprt.xpt_server->sv_max_mesg, 3 * svsk->sk_xprt.xpt_server->sv_max_mesg); /* data might have come in before data_ready set up */ set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); /* make sure we get destination address info */ switch (svsk->sk_sk->sk_family) { case AF_INET: level = SOL_IP; optname = IP_PKTINFO; break; case AF_INET6: level = SOL_IPV6; optname = IPV6_RECVPKTINFO; break; default: BUG(); } err = kernel_setsockopt(svsk->sk_sock, level, optname, (char *)&one, sizeof(one)); dprintk("svc: kernel_setsockopt returned %d\n", err); } /* * A data_ready event on a listening socket means there's a connection * pending. Do not use state_change as a substitute for it. */ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused) { struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; wait_queue_head_t *wq; dprintk("svc: socket %p TCP (listen) state change %d\n", sk, sk->sk_state); /* * This callback may called twice when a new connection * is established as a child socket inherits everything * from a parent LISTEN socket. * 1) data_ready method of the parent socket will be called * when one of child sockets become ESTABLISHED. * 2) data_ready method of the child socket may be called * when it receives data before the socket is accepted. * In case of 2, we should ignore it silently. */ if (sk->sk_state == TCP_LISTEN) { if (svsk) { set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); svc_xprt_enqueue(&svsk->sk_xprt); } else printk("svc: socket %p: no user data\n", sk); } wq = sk_sleep(sk); if (wq && waitqueue_active(wq)) wake_up_interruptible_all(wq); } /* * A state change on a connected socket means it's dying or dead. */ static void svc_tcp_state_change(struct sock *sk) { struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; wait_queue_head_t *wq = sk_sleep(sk); dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n", sk, sk->sk_state, sk->sk_user_data); if (!svsk) printk("svc: socket %p: no user data\n", sk); else { set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); svc_xprt_enqueue(&svsk->sk_xprt); } if (wq && waitqueue_active(wq)) wake_up_interruptible_all(wq); } static void svc_tcp_data_ready(struct sock *sk, int count) { struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; wait_queue_head_t *wq = sk_sleep(sk); dprintk("svc: socket %p TCP data ready (svsk %p)\n", sk, sk->sk_user_data); if (svsk) { set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); svc_xprt_enqueue(&svsk->sk_xprt); } if (wq && waitqueue_active(wq)) wake_up_interruptible(wq); } /* * Accept a TCP connection */ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); struct sockaddr_storage addr; struct sockaddr *sin = (struct sockaddr *) &addr; struct svc_serv *serv = svsk->sk_xprt.xpt_server; struct socket *sock = svsk->sk_sock; struct socket *newsock; struct svc_sock *newsvsk; int err, slen; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); dprintk("svc: tcp_accept %p sock %p\n", svsk, sock); if (!sock) return NULL; clear_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); err = kernel_accept(sock, &newsock, O_NONBLOCK); if (err < 0) { if (err == -ENOMEM) printk(KERN_WARNING "%s: no more sockets!\n", serv->sv_name); else if (err != -EAGAIN && net_ratelimit()) printk(KERN_WARNING "%s: accept failed (err %d)!\n", serv->sv_name, -err); return NULL; } set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); err = kernel_getpeername(newsock, sin, &slen); if (err < 0) { if (net_ratelimit()) printk(KERN_WARNING "%s: peername failed (err %d)!\n", serv->sv_name, -err); goto failed; /* aborted connection or whatever */ } /* Ideally, we would want to reject connections from unauthorized * hosts here, but when we get encryption, the IP of the host won't * tell us anything. For now just warn about unpriv connections. */ if (!svc_port_is_privileged(sin)) { dprintk(KERN_WARNING "%s: connect from unprivileged port: %s\n", serv->sv_name, __svc_print_addr(sin, buf, sizeof(buf))); } dprintk("%s: connect from %s\n", serv->sv_name, __svc_print_addr(sin, buf, sizeof(buf))); /* make sure that a write doesn't block forever when * low on memory */ newsock->sk->sk_sndtimeo = HZ*30; if (!(newsvsk = svc_setup_socket(serv, newsock, &err, (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY)))) goto failed; svc_xprt_set_remote(&newsvsk->sk_xprt, sin, slen); err = kernel_getsockname(newsock, sin, &slen); if (unlikely(err < 0)) { dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err); slen = offsetof(struct sockaddr, sa_data); } svc_xprt_set_local(&newsvsk->sk_xprt, sin, slen); if (serv->sv_stats) serv->sv_stats->nettcpconn++; return &newsvsk->sk_xprt; failed: sock_release(newsock); return NULL; } static unsigned int svc_tcp_restore_pages(struct svc_sock *svsk, struct svc_rqst *rqstp) { unsigned int i, len, npages; if (svsk->sk_tcplen <= sizeof(rpc_fraghdr)) return 0; len = svsk->sk_tcplen - sizeof(rpc_fraghdr); npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < npages; i++) { if (rqstp->rq_pages[i] != NULL) put_page(rqstp->rq_pages[i]); BUG_ON(svsk->sk_pages[i] == NULL); rqstp->rq_pages[i] = svsk->sk_pages[i]; svsk->sk_pages[i] = NULL; } rqstp->rq_arg.head[0].iov_base = page_address(rqstp->rq_pages[0]); return len; } static void svc_tcp_save_pages(struct svc_sock *svsk, struct svc_rqst *rqstp) { unsigned int i, len, npages; if (svsk->sk_tcplen <= sizeof(rpc_fraghdr)) return; len = svsk->sk_tcplen - sizeof(rpc_fraghdr); npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < npages; i++) { svsk->sk_pages[i] = rqstp->rq_pages[i]; rqstp->rq_pages[i] = NULL; } } static void svc_tcp_clear_pages(struct svc_sock *svsk) { unsigned int i, len, npages; if (svsk->sk_tcplen <= sizeof(rpc_fraghdr)) goto out; len = svsk->sk_tcplen - sizeof(rpc_fraghdr); npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < npages; i++) { BUG_ON(svsk->sk_pages[i] == NULL); put_page(svsk->sk_pages[i]); svsk->sk_pages[i] = NULL; } out: svsk->sk_tcplen = 0; } /* * Receive data. * If we haven't gotten the record length yet, get the next four bytes. * Otherwise try to gobble up as much as possible up to the complete * record length. */ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) { struct svc_serv *serv = svsk->sk_xprt.xpt_server; unsigned int want; int len; clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) { struct kvec iov; want = sizeof(rpc_fraghdr) - svsk->sk_tcplen; iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen; iov.iov_len = want; if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0) goto error; svsk->sk_tcplen += len; if (len < want) { dprintk("svc: short recvfrom while reading record " "length (%d of %d)\n", len, want); return -EAGAIN; } svsk->sk_reclen = ntohl(svsk->sk_reclen); if (!(svsk->sk_reclen & RPC_LAST_STREAM_FRAGMENT)) { /* FIXME: technically, a record can be fragmented, * and non-terminal fragments will not have the top * bit set in the fragment length header. * But apparently no known nfs clients send fragmented * records. */ if (net_ratelimit()) printk(KERN_NOTICE "RPC: multiple fragments " "per record not supported\n"); goto err_delete; } svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK; dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); if (svsk->sk_reclen > serv->sv_max_mesg) { if (net_ratelimit()) printk(KERN_NOTICE "RPC: " "fragment too large: 0x%08lx\n", (unsigned long)svsk->sk_reclen); goto err_delete; } } if (svsk->sk_reclen < 8) goto err_delete; /* client is nuts. */ len = svsk->sk_reclen; return len; error: dprintk("RPC: TCP recv_record got %d\n", len); return len; err_delete: set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); return -EAGAIN; } static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) { struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt; struct rpc_rqst *req = NULL; struct kvec *src, *dst; __be32 *p = (__be32 *)rqstp->rq_arg.head[0].iov_base; __be32 xid; __be32 calldir; xid = *p++; calldir = *p; if (bc_xprt) req = xprt_lookup_rqst(bc_xprt, xid); if (!req) { printk(KERN_NOTICE "%s: Got unrecognized reply: " "calldir 0x%x xpt_bc_xprt %p xid %08x\n", __func__, ntohl(calldir), bc_xprt, xid); return -EAGAIN; } memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); /* * XXX!: cheating for now! Only copying HEAD. * But we know this is good enough for now (in fact, for any * callback reply in the forseeable future). */ dst = &req->rq_private_buf.head[0]; src = &rqstp->rq_arg.head[0]; if (dst->iov_len < src->iov_len) return -EAGAIN; /* whatever; just giving up. */ memcpy(dst->iov_base, src->iov_base, src->iov_len); xprt_complete_rqst(req->rq_task, svsk->sk_reclen); rqstp->rq_arg.len = 0; return 0; } static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len) { int i = 0; int t = 0; while (t < len) { vec[i].iov_base = page_address(pages[i]); vec[i].iov_len = PAGE_SIZE; i++; t += PAGE_SIZE; } return i; } /* * Receive data from a TCP socket. */ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) { struct svc_sock *svsk = container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); struct svc_serv *serv = svsk->sk_xprt.xpt_server; int len; struct kvec *vec; unsigned int want, base; __be32 *p; __be32 calldir; int pnum; dprintk("svc: tcp_recv %p data %d conn %d close %d\n", svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags), test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); len = svc_tcp_recv_record(svsk, rqstp); if (len < 0) goto error; base = svc_tcp_restore_pages(svsk, rqstp); want = svsk->sk_reclen - base; vec = rqstp->rq_vec; pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0], svsk->sk_reclen); rqstp->rq_respages = &rqstp->rq_pages[pnum]; /* Now receive data */ len = svc_partial_recvfrom(rqstp, vec, pnum, want, base); if (len >= 0) svsk->sk_tcplen += len; if (len != want) { svc_tcp_save_pages(svsk, rqstp); if (len < 0 && len != -EAGAIN) goto err_other; dprintk("svc: incomplete TCP record (%d of %d)\n", svsk->sk_tcplen, svsk->sk_reclen); goto err_noclose; } rqstp->rq_arg.len = svsk->sk_reclen; rqstp->rq_arg.page_base = 0; if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; rqstp->rq_arg.page_len = 0; } else rqstp->rq_arg.page_len = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; rqstp->rq_xprt_ctxt = NULL; rqstp->rq_prot = IPPROTO_TCP; p = (__be32 *)rqstp->rq_arg.head[0].iov_base; calldir = p[1]; if (calldir) len = receive_cb_reply(svsk, rqstp); /* Reset TCP read info */ svsk->sk_reclen = 0; svsk->sk_tcplen = 0; /* If we have more data, signal svc_xprt_enqueue() to try again */ if (svc_recv_available(svsk) > sizeof(rpc_fraghdr)) set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); if (len < 0) goto error; svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt); if (serv->sv_stats) serv->sv_stats->nettcpcnt++; dprintk("svc: TCP complete record (%d bytes)\n", rqstp->rq_arg.len); return rqstp->rq_arg.len; error: if (len != -EAGAIN) goto err_other; dprintk("RPC: TCP recvfrom got EAGAIN\n"); return -EAGAIN; err_other: printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", svsk->sk_xprt.xpt_server->sv_name, -len); set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); err_noclose: return -EAGAIN; /* record not complete */ } /* * Send out data on TCP socket. */ static int svc_tcp_sendto(struct svc_rqst *rqstp) { struct xdr_buf *xbufp = &rqstp->rq_res; int sent; __be32 reclen; /* Set up the first element of the reply kvec. * Any other kvecs that may be in use have been taken * care of by the server implementation itself. */ reclen = htonl(0x80000000|((xbufp->len ) - 4)); memcpy(xbufp->head[0].iov_base, &reclen, 4); sent = svc_sendto(rqstp, &rqstp->rq_res); if (sent != xbufp->len) { printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes " "- shutting down socket\n", rqstp->rq_xprt->xpt_server->sv_name, (sent<0)?"got error":"sent only", sent, xbufp->len); set_bit(XPT_CLOSE, &rqstp->rq_xprt->xpt_flags); svc_xprt_enqueue(rqstp->rq_xprt); sent = -EAGAIN; } return sent; } /* * Setup response header. TCP has a 4B record length field. */ static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp) { struct kvec *resv = &rqstp->rq_res.head[0]; /* tcp needs a space for the record length... */ svc_putnl(resv, 0); } static int svc_tcp_has_wspace(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); struct svc_serv *serv = svsk->sk_xprt.xpt_server; int required; if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) return 1; required = atomic_read(&xprt->xpt_reserved) + serv->sv_max_mesg; if (sk_stream_wspace(svsk->sk_sk) >= required) return 1; set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); return 0; } static struct svc_xprt *svc_tcp_create(struct svc_serv *serv, struct net *net, struct sockaddr *sa, int salen, int flags) { return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); } #if defined(CONFIG_SUNRPC_BACKCHANNEL) static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, struct net *, struct sockaddr *, int, int); static void svc_bc_sock_free(struct svc_xprt *xprt); static struct svc_xprt *svc_bc_tcp_create(struct svc_serv *serv, struct net *net, struct sockaddr *sa, int salen, int flags) { return svc_bc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); } static void svc_bc_tcp_sock_detach(struct svc_xprt *xprt) { } static struct svc_xprt_ops svc_tcp_bc_ops = { .xpo_create = svc_bc_tcp_create, .xpo_detach = svc_bc_tcp_sock_detach, .xpo_free = svc_bc_sock_free, .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr, }; static struct svc_xprt_class svc_tcp_bc_class = { .xcl_name = "tcp-bc", .xcl_owner = THIS_MODULE, .xcl_ops = &svc_tcp_bc_ops, .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, }; static void svc_init_bc_xprt_sock(void) { svc_reg_xprt_class(&svc_tcp_bc_class); } static void svc_cleanup_bc_xprt_sock(void) { svc_unreg_xprt_class(&svc_tcp_bc_class); } #else /* CONFIG_SUNRPC_BACKCHANNEL */ static void svc_init_bc_xprt_sock(void) { } static void svc_cleanup_bc_xprt_sock(void) { } #endif /* CONFIG_SUNRPC_BACKCHANNEL */ static struct svc_xprt_ops svc_tcp_ops = { .xpo_create = svc_tcp_create, .xpo_recvfrom = svc_tcp_recvfrom, .xpo_sendto = svc_tcp_sendto, .xpo_release_rqst = svc_release_skb, .xpo_detach = svc_tcp_sock_detach, .xpo_free = svc_sock_free, .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr, .xpo_has_wspace = svc_tcp_has_wspace, .xpo_accept = svc_tcp_accept, }; static struct svc_xprt_class svc_tcp_class = { .xcl_name = "tcp", .xcl_owner = THIS_MODULE, .xcl_ops = &svc_tcp_ops, .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, }; void svc_init_xprt_sock(void) { svc_reg_xprt_class(&svc_tcp_class); svc_reg_xprt_class(&svc_udp_class); svc_init_bc_xprt_sock(); } void svc_cleanup_xprt_sock(void) { svc_unreg_xprt_class(&svc_tcp_class); svc_unreg_xprt_class(&svc_udp_class); svc_cleanup_bc_xprt_sock(); } static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) { struct sock *sk = svsk->sk_sk; svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_tcp_class, &svsk->sk_xprt, serv); set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); if (sk->sk_state == TCP_LISTEN) { dprintk("setting up TCP socket for listening\n"); set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags); sk->sk_data_ready = svc_tcp_listen_data_ready; set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); } else { dprintk("setting up TCP socket for reading\n"); sk->sk_state_change = svc_tcp_state_change; sk->sk_data_ready = svc_tcp_data_ready; sk->sk_write_space = svc_tcp_write_space; svsk->sk_reclen = 0; svsk->sk_tcplen = 0; memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages)); tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); if (sk->sk_state != TCP_ESTABLISHED) set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); } } void svc_sock_update_bufs(struct svc_serv *serv) { /* * The number of server threads has changed. Update * rcvbuf and sndbuf accordingly on all sockets */ struct svc_sock *svsk; spin_lock_bh(&serv->sv_lock); list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); spin_unlock_bh(&serv->sv_lock); } EXPORT_SYMBOL_GPL(svc_sock_update_bufs); /* * Initialize socket for RPC use and create svc_sock struct * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF. */ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, struct socket *sock, int *errp, int flags) { struct svc_sock *svsk; struct sock *inet; int pmap_register = !(flags & SVC_SOCK_ANONYMOUS); dprintk("svc: svc_setup_socket %p\n", sock); if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) { *errp = -ENOMEM; return NULL; } inet = sock->sk; /* Register socket with portmapper */ if (*errp >= 0 && pmap_register) *errp = svc_register(serv, sock_net(sock->sk), inet->sk_family, inet->sk_protocol, ntohs(inet_sk(inet)->inet_sport)); if (*errp < 0) { kfree(svsk); return NULL; } inet->sk_user_data = svsk; svsk->sk_sock = sock; svsk->sk_sk = inet; svsk->sk_ostate = inet->sk_state_change; svsk->sk_odata = inet->sk_data_ready; svsk->sk_owspace = inet->sk_write_space; /* Initialize the socket */ if (sock->type == SOCK_DGRAM) svc_udp_init(svsk, serv); else { /* initialise setting must have enough space to * receive and respond to one request. */ svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg, 4 * serv->sv_max_mesg); svc_tcp_init(svsk, serv); } dprintk("svc: svc_setup_socket created %p (inet %p)\n", svsk, svsk->sk_sk); return svsk; } bool svc_alien_sock(struct net *net, int fd) { int err; struct socket *sock = sockfd_lookup(fd, &err); bool ret = false; if (!sock) goto out; if (sock_net(sock->sk) != net) ret = true; sockfd_put(sock); out: return ret; } EXPORT_SYMBOL_GPL(svc_alien_sock); /** * svc_addsock - add a listener socket to an RPC service * @serv: pointer to RPC service to which to add a new listener * @fd: file descriptor of the new listener * @name_return: pointer to buffer to fill in with name of listener * @len: size of the buffer * * Fills in socket name and returns positive length of name if successful. * Name is terminated with '\n'. On error, returns a negative errno * value. */ int svc_addsock(struct svc_serv *serv, const int fd, char *name_return, const size_t len) { int err = 0; struct socket *so = sockfd_lookup(fd, &err); struct svc_sock *svsk = NULL; if (!so) return err; if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6)) err = -EAFNOSUPPORT; else if (so->sk->sk_protocol != IPPROTO_TCP && so->sk->sk_protocol != IPPROTO_UDP) err = -EPROTONOSUPPORT; else if (so->state > SS_UNCONNECTED) err = -EISCONN; else { if (!try_module_get(THIS_MODULE)) err = -ENOENT; else svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS); if (svsk) { struct sockaddr_storage addr; struct sockaddr *sin = (struct sockaddr *)&addr; int salen; if (kernel_getsockname(svsk->sk_sock, sin, &salen) == 0) svc_xprt_set_local(&svsk->sk_xprt, sin, salen); clear_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags); spin_lock_bh(&serv->sv_lock); list_add(&svsk->sk_xprt.xpt_list, &serv->sv_permsocks); spin_unlock_bh(&serv->sv_lock); svc_xprt_received(&svsk->sk_xprt); err = 0; } else module_put(THIS_MODULE); } if (err) { sockfd_put(so); return err; } return svc_one_sock_name(svsk, name_return, len); } EXPORT_SYMBOL_GPL(svc_addsock); /* * Create socket for RPC service. */ static struct svc_xprt *svc_create_socket(struct svc_serv *serv, int protocol, struct net *net, struct sockaddr *sin, int len, int flags) { struct svc_sock *svsk; struct socket *sock; int error; int type; struct sockaddr_storage addr; struct sockaddr *newsin = (struct sockaddr *)&addr; int newlen; int family; int val; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); dprintk("svc: svc_create_socket(%s, %d, %s)\n", serv->sv_program->pg_name, protocol, __svc_print_addr(sin, buf, sizeof(buf))); if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) { printk(KERN_WARNING "svc: only UDP and TCP " "sockets supported\n"); return ERR_PTR(-EINVAL); } type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; switch (sin->sa_family) { case AF_INET6: family = PF_INET6; break; case AF_INET: family = PF_INET; break; default: return ERR_PTR(-EINVAL); } error = __sock_create(net, family, type, protocol, &sock, 1); if (error < 0) return ERR_PTR(error); svc_reclassify_socket(sock); /* * If this is an PF_INET6 listener, we want to avoid * getting requests from IPv4 remotes. Those should * be shunted to a PF_INET listener via rpcbind. */ val = 1; if (family == PF_INET6) kernel_setsockopt(sock, SOL_IPV6, IPV6_V6ONLY, (char *)&val, sizeof(val)); if (type == SOCK_STREAM) sock->sk->sk_reuse = 1; /* allow address reuse */ error = kernel_bind(sock, sin, len); if (error < 0) goto bummer; newlen = len; error = kernel_getsockname(sock, newsin, &newlen); if (error < 0) goto bummer; if (protocol == IPPROTO_TCP) { if ((error = kernel_listen(sock, 64)) < 0) goto bummer; } if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) { svc_xprt_set_local(&svsk->sk_xprt, newsin, newlen); return (struct svc_xprt *)svsk; } bummer: dprintk("svc: svc_create_socket error = %d\n", -error); sock_release(sock); return ERR_PTR(error); } /* * Detach the svc_sock from the socket so that no * more callbacks occur. */ static void svc_sock_detach(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); struct sock *sk = svsk->sk_sk; wait_queue_head_t *wq; dprintk("svc: svc_sock_detach(%p)\n", svsk); /* put back the old socket callbacks */ sk->sk_state_change = svsk->sk_ostate; sk->sk_data_ready = svsk->sk_odata; sk->sk_write_space = svsk->sk_owspace; wq = sk_sleep(sk); if (wq && waitqueue_active(wq)) wake_up_interruptible(wq); } /* * Disconnect the socket, and reset the callbacks */ static void svc_tcp_sock_detach(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); dprintk("svc: svc_tcp_sock_detach(%p)\n", svsk); svc_sock_detach(xprt); if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) { svc_tcp_clear_pages(svsk); kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR); } } /* * Free the svc_sock's socket resources and the svc_sock itself. */ static void svc_sock_free(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); dprintk("svc: svc_sock_free(%p)\n", svsk); if (svsk->sk_sock->file) sockfd_put(svsk->sk_sock); else sock_release(svsk->sk_sock); kfree(svsk); } #if defined(CONFIG_SUNRPC_BACKCHANNEL) /* * Create a back channel svc_xprt which shares the fore channel socket. */ static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv, int protocol, struct net *net, struct sockaddr *sin, int len, int flags) { struct svc_sock *svsk; struct svc_xprt *xprt; if (protocol != IPPROTO_TCP) { printk(KERN_WARNING "svc: only TCP sockets" " supported on shared back channel\n"); return ERR_PTR(-EINVAL); } svsk = kzalloc(sizeof(*svsk), GFP_KERNEL); if (!svsk) return ERR_PTR(-ENOMEM); xprt = &svsk->sk_xprt; svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv); serv->sv_bc_xprt = xprt; return xprt; } /* * Free a back channel svc_sock. */ static void svc_bc_sock_free(struct svc_xprt *xprt) { if (xprt) kfree(container_of(xprt, struct svc_sock, sk_xprt)); } #endif /* CONFIG_SUNRPC_BACKCHANNEL */
gpl-2.0
Nold360/GC-Linux-Kernel-2.6.32
arch/powerpc/sysdev/uic.c
515
8882
/* * arch/powerpc/sysdev/uic.c * * IBM PowerPC 4xx Universal Interrupt Controller * * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/slab.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/sysdev.h> #include <linux/device.h> #include <linux/bootmem.h> #include <linux/spinlock.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/dcr.h> #define NR_UIC_INTS 32 #define UIC_SR 0x0 #define UIC_ER 0x2 #define UIC_CR 0x3 #define UIC_PR 0x4 #define UIC_TR 0x5 #define UIC_MSR 0x6 #define UIC_VR 0x7 #define UIC_VCR 0x8 #define uic_irq_to_hw(virq) (irq_map[virq].hwirq) struct uic *primary_uic; struct uic { int index; int dcrbase; spinlock_t lock; /* The remapper for this UIC */ struct irq_host *irqhost; }; static void uic_unmask_irq(unsigned int virq) { struct irq_desc *desc = get_irq_desc(virq); struct uic *uic = get_irq_chip_data(virq); unsigned int src = uic_irq_to_hw(virq); unsigned long flags; u32 er, sr; sr = 1 << (31-src); spin_lock_irqsave(&uic->lock, flags); /* ack level-triggered interrupts here */ if (desc->status & IRQ_LEVEL) mtdcr(uic->dcrbase + UIC_SR, sr); er = mfdcr(uic->dcrbase + UIC_ER); er |= sr; mtdcr(uic->dcrbase + UIC_ER, er); spin_unlock_irqrestore(&uic->lock, flags); } static void uic_mask_irq(unsigned int virq) { struct uic *uic = get_irq_chip_data(virq); unsigned int src = uic_irq_to_hw(virq); unsigned long flags; u32 er; spin_lock_irqsave(&uic->lock, flags); er = mfdcr(uic->dcrbase + UIC_ER); er &= ~(1 << (31 - src)); mtdcr(uic->dcrbase + UIC_ER, er); spin_unlock_irqrestore(&uic->lock, flags); } static void uic_ack_irq(unsigned int virq) { struct uic *uic = get_irq_chip_data(virq); unsigned int src = uic_irq_to_hw(virq); unsigned long flags; spin_lock_irqsave(&uic->lock, flags); mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src)); spin_unlock_irqrestore(&uic->lock, flags); } static void uic_mask_ack_irq(unsigned int virq) { struct irq_desc *desc = get_irq_desc(virq); struct uic *uic = get_irq_chip_data(virq); unsigned int src = uic_irq_to_hw(virq); unsigned long flags; u32 er, sr; sr = 1 << (31-src); spin_lock_irqsave(&uic->lock, flags); er = mfdcr(uic->dcrbase + UIC_ER); er &= ~sr; mtdcr(uic->dcrbase + UIC_ER, er); /* On the UIC, acking (i.e. clearing the SR bit) * a level irq will have no effect if the interrupt * is still asserted by the device, even if * the interrupt is already masked. Therefore * we only ack the egde interrupts here, while * level interrupts are ack'ed after the actual * isr call in the uic_unmask_irq() */ if (!(desc->status & IRQ_LEVEL)) mtdcr(uic->dcrbase + UIC_SR, sr); spin_unlock_irqrestore(&uic->lock, flags); } static int uic_set_irq_type(unsigned int virq, unsigned int flow_type) { struct uic *uic = get_irq_chip_data(virq); unsigned int src = uic_irq_to_hw(virq); struct irq_desc *desc = get_irq_desc(virq); unsigned long flags; int trigger, polarity; u32 tr, pr, mask; switch (flow_type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_NONE: uic_mask_irq(virq); return 0; case IRQ_TYPE_EDGE_RISING: trigger = 1; polarity = 1; break; case IRQ_TYPE_EDGE_FALLING: trigger = 1; polarity = 0; break; case IRQ_TYPE_LEVEL_HIGH: trigger = 0; polarity = 1; break; case IRQ_TYPE_LEVEL_LOW: trigger = 0; polarity = 0; break; default: return -EINVAL; } mask = ~(1 << (31 - src)); spin_lock_irqsave(&uic->lock, flags); tr = mfdcr(uic->dcrbase + UIC_TR); pr = mfdcr(uic->dcrbase + UIC_PR); tr = (tr & mask) | (trigger << (31-src)); pr = (pr & mask) | (polarity << (31-src)); mtdcr(uic->dcrbase + UIC_PR, pr); mtdcr(uic->dcrbase + UIC_TR, tr); desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; if (!trigger) desc->status |= IRQ_LEVEL; spin_unlock_irqrestore(&uic->lock, flags); return 0; } static struct irq_chip uic_irq_chip = { .typename = " UIC ", .unmask = uic_unmask_irq, .mask = uic_mask_irq, .mask_ack = uic_mask_ack_irq, .ack = uic_ack_irq, .set_type = uic_set_irq_type, }; static int uic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { struct uic *uic = h->host_data; set_irq_chip_data(virq, uic); /* Despite the name, handle_level_irq() works for both level * and edge irqs on UIC. FIXME: check this is correct */ set_irq_chip_and_handler(virq, &uic_irq_chip, handle_level_irq); /* Set default irq type */ set_irq_type(virq, IRQ_TYPE_NONE); return 0; } static int uic_host_xlate(struct irq_host *h, struct device_node *ct, u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type) { /* UIC intspecs must have 2 cells */ BUG_ON(intsize != 2); *out_hwirq = intspec[0]; *out_type = intspec[1]; return 0; } static struct irq_host_ops uic_host_ops = { .map = uic_host_map, .xlate = uic_host_xlate, }; void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) { struct uic *uic = get_irq_data(virq); u32 msr; int src; int subvirq; spin_lock(&desc->lock); if (desc->status & IRQ_LEVEL) desc->chip->mask(virq); else desc->chip->mask_ack(virq); spin_unlock(&desc->lock); msr = mfdcr(uic->dcrbase + UIC_MSR); if (!msr) /* spurious interrupt */ goto uic_irq_ret; src = 32 - ffs(msr); subvirq = irq_linear_revmap(uic->irqhost, src); generic_handle_irq(subvirq); uic_irq_ret: spin_lock(&desc->lock); if (desc->status & IRQ_LEVEL) desc->chip->ack(virq); if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) desc->chip->unmask(virq); spin_unlock(&desc->lock); } static struct uic * __init uic_init_one(struct device_node *node) { struct uic *uic; const u32 *indexp, *dcrreg; int len; BUG_ON(! of_device_is_compatible(node, "ibm,uic")); uic = kzalloc(sizeof(*uic), GFP_KERNEL); if (! uic) return NULL; /* FIXME: panic? */ spin_lock_init(&uic->lock); indexp = of_get_property(node, "cell-index", &len); if (!indexp || (len != sizeof(u32))) { printk(KERN_ERR "uic: Device node %s has missing or invalid " "cell-index property\n", node->full_name); return NULL; } uic->index = *indexp; dcrreg = of_get_property(node, "dcr-reg", &len); if (!dcrreg || (len != 2*sizeof(u32))) { printk(KERN_ERR "uic: Device node %s has missing or invalid " "dcr-reg property\n", node->full_name); return NULL; } uic->dcrbase = *dcrreg; uic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, NR_UIC_INTS, &uic_host_ops, -1); if (! uic->irqhost) return NULL; /* FIXME: panic? */ uic->irqhost->host_data = uic; /* Start with all interrupts disabled, level and non-critical */ mtdcr(uic->dcrbase + UIC_ER, 0); mtdcr(uic->dcrbase + UIC_CR, 0); mtdcr(uic->dcrbase + UIC_TR, 0); /* Clear any pending interrupts, in case the firmware left some */ mtdcr(uic->dcrbase + UIC_SR, 0xffffffff); printk ("UIC%d (%d IRQ sources) at DCR 0x%x\n", uic->index, NR_UIC_INTS, uic->dcrbase); return uic; } void __init uic_init_tree(void) { struct device_node *np; struct uic *uic; const u32 *interrupts; /* First locate and initialize the top-level UIC */ for_each_compatible_node(np, NULL, "ibm,uic") { interrupts = of_get_property(np, "interrupts", NULL); if (!interrupts) break; } BUG_ON(!np); /* uic_init_tree() assumes there's a UIC as the * top-level interrupt controller */ primary_uic = uic_init_one(np); if (!primary_uic) panic("Unable to initialize primary UIC %s\n", np->full_name); irq_set_default_host(primary_uic->irqhost); of_node_put(np); /* The scan again for cascaded UICs */ for_each_compatible_node(np, NULL, "ibm,uic") { interrupts = of_get_property(np, "interrupts", NULL); if (interrupts) { /* Secondary UIC */ int cascade_virq; uic = uic_init_one(np); if (! uic) panic("Unable to initialize a secondary UIC %s\n", np->full_name); cascade_virq = irq_of_parse_and_map(np, 0); set_irq_data(cascade_virq, uic); set_irq_chained_handler(cascade_virq, uic_irq_cascade); /* FIXME: setup critical cascade?? */ } } } /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ unsigned int uic_get_irq(void) { u32 msr; int src; BUG_ON(! primary_uic); msr = mfdcr(primary_uic->dcrbase + UIC_MSR); src = 32 - ffs(msr); return irq_linear_revmap(primary_uic->irqhost, src); }
gpl-2.0
friedrich420/N3-AEL-Kernel-Lollipop-Ed.
drivers/media/tdmb/fc8050/fc8050_tun.c
771
6857
/***************************************************************************** Copyright(c) 2009 FCI Inc. All Rights Reserved File name : fc8050_tun.c Description : fc8050 host interface This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA History : ---------------------------------------------------------------------- 2009/09/14 jason initial 2009/11/26 MPW Config1p0 2010/02/24 SLR Config1p0 2010/05/04 SLR Config1p2 *******************************************************************************/ #include "fci_types.h" #include "fci_oal.h" #include "fci_tun.h" #include "fci_hal.h" #include "fc8050_regs.h" #if !defined(CONFIG_TDMB_EBI) #define FEATURE_TUNER_BURST_MODE #endif static int fc8050_write(HANDLE hDevice, u8 addr, u8 data) { int res; u8 tmp; tmp = data; res = tuner_i2c_write(hDevice, addr, 1, &tmp, 1); return res; } static int fc8050_read(HANDLE hDevice, u8 addr, u8 *data) { int res; res = tuner_i2c_read(hDevice, addr, 1, data, 1); return res; } static int fc8050_set_filter(HANDLE hDevice) { int i; u8 cal_mon = 0; #if (FC8050_FREQ_XTAL == 19200) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0x52); fc8050_write(hDevice, 0x32, 0x09); #elif (FC8050_FREQ_XTAL == 16384) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0x45); fc8050_write(hDevice, 0x32, 0x09); #elif (FC8050_FREQ_XTAL == 24576) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0x68); fc8050_write(hDevice, 0x32, 0x09); #elif (FC8050_FREQ_XTAL == 27000) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0x71); fc8050_write(hDevice, 0x32, 0x09); #elif (FC8050_FREQ_XTAL == 27120) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0x74); fc8050_write(hDevice, 0x32, 0x09); #elif (FC8050_FREQ_XTAL == 38400) fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x3B, 0xA1); fc8050_write(hDevice, 0x32, 0x09); #else return BBM_NOK; #endif for (i = 0; i < 10; i++) { ms_wait(5); fc8050_read(hDevice, 0x33, &cal_mon); if ((cal_mon & 0xC0) == 0xC0) break; fc8050_write(hDevice, 0x32, 0x01); fc8050_write(hDevice, 0x32, 0x09); } fc8050_write(hDevice, 0x32, 0x01); return BBM_OK; } static int fc8050_lband_init(HANDLE hDevice) { print_log(hDevice, "fc8050_lband_init\n"); return BBM_NOK; } static int fc8050_band3_init(HANDLE hDevice) { print_log(hDevice, "fc8050_band3_init\n"); fc8050_write(hDevice, 0x00, 0x00); fc8050_write(hDevice, 0x00, 0x00); fc8050_write(hDevice, 0x02, 0x86); fc8050_write(hDevice, 0x05, 0xD8); fc8050_write(hDevice, 0x0A, 0x83); fc8050_write(hDevice, 0x16, 0x0d); fc8050_write(hDevice, 0x13, 0x88); fc8050_write(hDevice, 0x15, 0x00); fc8050_write(hDevice, 0x21, 0x73); fc8050_write(hDevice, 0x57, 0x40); fc8050_write(hDevice, 0x69, 0x8C); fc8050_write(hDevice, 0x51, 0x04); fc8050_write(hDevice, 0x53, 0x00); fc8050_write(hDevice, 0x54, 0x28); fc8050_write(hDevice, 0x45, 0x40); fc8050_write(hDevice, 0x46, 0x32); fc8050_write(hDevice, 0x48, 0x40); fc8050_write(hDevice, 0x49, 0x32); fc8050_write(hDevice, 0x7A, 0x88); fc8050_write(hDevice, 0x53, 0x01); fc8050_write(hDevice, 0x58, 0x34); fc8050_write(hDevice, 0x59, 0x2A); fc8050_write(hDevice, 0x5A, 0x1D); fc8050_write(hDevice, 0x5B, 0x14); fc8050_write(hDevice, 0x61, 0x64); fc8050_write(hDevice, 0x74, 0x3A); fc8050_write(hDevice, 0x75, 0x1E); fc8050_write(hDevice, 0x6A, 0x0C); fc8050_write(hDevice, 0x6C, 0x0C); fc8050_write(hDevice, 0x6E, 0x0C); fc8050_write(hDevice, 0x70, 0x0C); fc8050_write(hDevice, 0x72, 0x0C); fc8050_write(hDevice, 0x7C, 0x0C); fc8050_write(hDevice, 0x4E, 0x26); fc8050_write(hDevice, 0x31, 0x13); fc8050_write(hDevice, 0x34, 0x53); fc8050_write(hDevice, 0x43, 0x20); fc8050_write(hDevice, 0x2e, 0x70); fc8050_set_filter(hDevice); return BBM_OK; } int fc8050_tuner_init(HANDLE hDevice, u32 band) { int res = BBM_NOK; bbm_write(hDevice, BBM_QDD_COMMAN, 0x5C); bbm_write(hDevice, BBM_QDD_AGC_STEP, 0x03); bbm_write(hDevice, BBM_QDD_TUN_COMMA, 0x40); bbm_write(hDevice, BBM_QDD_TUN_GAIN, 0x24); bbm_write(hDevice, BBM_QDD_AGC_PERIOD, 0x14); bbm_write(hDevice, BBM_QDD_TRAGET_RMS, 0x60); bbm_write(hDevice, BBM_QDD_TUN_GAIN_LOC, 0x44); bbm_write(hDevice, BBM_QDD_GAIN_MAX, 0x38); if (band == LBAND_TYPE) res = fc8050_lband_init(hDevice); else if (band == BAND3_TYPE) res = fc8050_band3_init(hDevice); else return BBM_NOK; if (res != BBM_OK) return res; return res; } int fc8050_set_freq(HANDLE hDevice, u32 band, u32 f_lo) { u32 f_diff, f_diff_shifted, n_val, k_val; u32 f_vco = f_lo * 12; u32 r_val = (f_vco >= 25 * FC8050_FREQ_XTAL) ? 1 : 2; u32 f_comp = FC8050_FREQ_XTAL/r_val; u8 pre_shift_bits = 4; u8 data_0x0E; fc8050_write(hDevice, 0x0a, 0x85); fc8050_write(hDevice, 0x16, 0x0d); n_val = f_vco / f_comp; f_diff = f_vco - f_comp * n_val; f_diff_shifted = f_diff << (20 - pre_shift_bits); k_val = f_diff_shifted / ((f_comp) >> pre_shift_bits); k_val = (f_diff_shifted + (f_comp >> (pre_shift_bits+1))) / (f_comp >> pre_shift_bits); data_0x0E = ((r_val == 1) ? 0x40 : 0x50) + (unsigned char)(k_val >> 16); fc8050_write(hDevice, 0x0E, data_0x0E); fc8050_write(hDevice, 0x0F, (unsigned char)(k_val >> 8)); fc8050_write(hDevice, 0x10, (unsigned char)(k_val)); fc8050_write(hDevice, 0x11, (unsigned char)(n_val)); fc8050_write(hDevice, 0x0a, 0x83); return BBM_OK; } int fc8050_get_rssi(HANDLE hDevice, int *rssi) { int res = BBM_OK; u8 LNA, RFVGA, PREAMP_PGA, CSF = 0x00; int K = -66; #ifdef FEATURE_TUNER_BURST_MODE u32 burst_data; res = tuner_i2c_read(hDevice, 0x76, 1, (unsigned char *)&burst_data, 4); LNA = burst_data&0x000000ff; RFVGA = (burst_data&0x0000ff00)>>8; CSF = (burst_data&0x00ff0000)>>16; PREAMP_PGA = (burst_data&0xff000000)>>24; #else res = fc8050_read(hDevice, 0x76, &LNA); res |= fc8050_read(hDevice, 0x77, &RFVGA); res |= fc8050_read(hDevice, 0x78, &CSF); res |= fc8050_read(hDevice, 0x79, &PREAMP_PGA); #endif if (res != BBM_OK) return res; *rssi = (((LNA & 0x07) * 5) + (RFVGA * 7 / 10) + ((PREAMP_PGA >> 7) * 6) + ((CSF & 0x7) * 6) - ((PREAMP_PGA & 0x7F) >> 1) + K); return BBM_OK; }
gpl-2.0
reposte/android_kernel_xiaomi_msm8992
drivers/usb/serial/siemens_mpi.c
2307
1241
/* * Siemens USB-MPI Serial USB driver * * Copyright (C) 2005 Thomas Hergenhahn <thomas.hergenhahn@suse.de> * Copyright (C) 2005,2008 Greg Kroah-Hartman <gregkh@suse.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/tty.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/serial.h> #define DRIVER_AUTHOR "Thomas Hergenhahn@web.de http://libnodave.sf.net" #define DRIVER_DESC "Driver for Siemens USB/MPI adapter" static const struct usb_device_id id_table[] = { /* Vendor and product id for 6ES7-972-0CB20-0XA0 */ { USB_DEVICE(0x908, 0x0004) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver siemens_usb_mpi_device = { .driver = { .owner = THIS_MODULE, .name = "siemens_mpi", }, .id_table = id_table, .num_ports = 1, }; static struct usb_serial_driver * const serial_drivers[] = { &siemens_usb_mpi_device, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
bigzz/shamu_flar2
drivers/bluetooth/hci_bcsp.c
2563
18830
/* * * Bluetooth HCI UART driver * * Copyright (C) 2002-2003 Fabrizio Gennari <fabrizio.gennari@philips.com> * Copyright (C) 2004-2005 Marcel Holtmann <marcel@holtmann.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/signal.h> #include <linux/ioctl.h> #include <linux/skbuff.h> #include <linux/bitrev.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "hci_uart.h" #define VERSION "0.3" static bool txcrc = 1; static bool hciextn = 1; #define BCSP_TXWINSIZE 4 #define BCSP_ACK_PKT 0x05 #define BCSP_LE_PKT 0x06 struct bcsp_struct { struct sk_buff_head unack; /* Unack'ed packets queue */ struct sk_buff_head rel; /* Reliable packets queue */ struct sk_buff_head unrel; /* Unreliable packets queue */ unsigned long rx_count; struct sk_buff *rx_skb; u8 rxseq_txack; /* rxseq == txack. */ u8 rxack; /* Last packet sent by us that the peer ack'ed */ struct timer_list tbcsp; enum { BCSP_W4_PKT_DELIMITER, BCSP_W4_PKT_START, BCSP_W4_BCSP_HDR, BCSP_W4_DATA, BCSP_W4_CRC } rx_state; enum { BCSP_ESCSTATE_NOESC, BCSP_ESCSTATE_ESC } rx_esc_state; u8 use_crc; u16 message_crc; u8 txack_req; /* Do we need to send ack's to the peer? */ /* Reliable packet sequence number - used to assign seq to each rel pkt. */ u8 msgq_txseq; }; /* ---- BCSP CRC calculation ---- */ /* Table for calculating CRC for polynomial 0x1021, LSB processed first, initial value 0xffff, bits shifted in reverse order. */ static const u16 crc_table[] = { 0x0000, 0x1081, 0x2102, 0x3183, 0x4204, 0x5285, 0x6306, 0x7387, 0x8408, 0x9489, 0xa50a, 0xb58b, 0xc60c, 0xd68d, 0xe70e, 0xf78f }; /* Initialise the crc calculator */ #define BCSP_CRC_INIT(x) x = 0xffff /* Update crc with next data byte Implementation note The data byte is treated as two nibbles. The crc is generated in reverse, i.e., bits are fed into the register from the top. */ static void bcsp_crc_update(u16 *crc, u8 d) { u16 reg = *crc; reg = (reg >> 4) ^ crc_table[(reg ^ d) & 0x000f]; reg = (reg >> 4) ^ crc_table[(reg ^ (d >> 4)) & 0x000f]; *crc = reg; } /* ---- BCSP core ---- */ static void bcsp_slip_msgdelim(struct sk_buff *skb) { const char pkt_delim = 0xc0; memcpy(skb_put(skb, 1), &pkt_delim, 1); } static void bcsp_slip_one_byte(struct sk_buff *skb, u8 c) { const char esc_c0[2] = { 0xdb, 0xdc }; const char esc_db[2] = { 0xdb, 0xdd }; switch (c) { case 0xc0: memcpy(skb_put(skb, 2), &esc_c0, 2); break; case 0xdb: memcpy(skb_put(skb, 2), &esc_db, 2); break; default: memcpy(skb_put(skb, 1), &c, 1); } } static int bcsp_enqueue(struct hci_uart *hu, struct sk_buff *skb) { struct bcsp_struct *bcsp = hu->priv; if (skb->len > 0xFFF) { BT_ERR("Packet too long"); kfree_skb(skb); return 0; } switch (bt_cb(skb)->pkt_type) { case HCI_ACLDATA_PKT: case HCI_COMMAND_PKT: skb_queue_tail(&bcsp->rel, skb); break; case HCI_SCODATA_PKT: skb_queue_tail(&bcsp->unrel, skb); break; default: BT_ERR("Unknown packet type"); kfree_skb(skb); break; } return 0; } static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data, int len, int pkt_type) { struct sk_buff *nskb; u8 hdr[4], chan; u16 BCSP_CRC_INIT(bcsp_txmsg_crc); int rel, i; switch (pkt_type) { case HCI_ACLDATA_PKT: chan = 6; /* BCSP ACL channel */ rel = 1; /* reliable channel */ break; case HCI_COMMAND_PKT: chan = 5; /* BCSP cmd/evt channel */ rel = 1; /* reliable channel */ break; case HCI_SCODATA_PKT: chan = 7; /* BCSP SCO channel */ rel = 0; /* unreliable channel */ break; case BCSP_LE_PKT: chan = 1; /* BCSP LE channel */ rel = 0; /* unreliable channel */ break; case BCSP_ACK_PKT: chan = 0; /* BCSP internal channel */ rel = 0; /* unreliable channel */ break; default: BT_ERR("Unknown packet type"); return NULL; } if (hciextn && chan == 5) { __le16 opcode = ((struct hci_command_hdr *)data)->opcode; /* Vendor specific commands */ if (hci_opcode_ogf(__le16_to_cpu(opcode)) == 0x3f) { u8 desc = *(data + HCI_COMMAND_HDR_SIZE); if ((desc & 0xf0) == 0xc0) { data += HCI_COMMAND_HDR_SIZE + 1; len -= HCI_COMMAND_HDR_SIZE + 1; chan = desc & 0x0f; } } } /* Max len of packet: (original len +4(bcsp hdr) +2(crc))*2 (because bytes 0xc0 and 0xdb are escaped, worst case is when the packet is all made of 0xc0 and 0xdb :) ) + 2 (0xc0 delimiters at start and end). */ nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC); if (!nskb) return NULL; bt_cb(nskb)->pkt_type = pkt_type; bcsp_slip_msgdelim(nskb); hdr[0] = bcsp->rxseq_txack << 3; bcsp->txack_req = 0; BT_DBG("We request packet no %u to card", bcsp->rxseq_txack); if (rel) { hdr[0] |= 0x80 + bcsp->msgq_txseq; BT_DBG("Sending packet with seqno %u", bcsp->msgq_txseq); bcsp->msgq_txseq = (bcsp->msgq_txseq + 1) & 0x07; } if (bcsp->use_crc) hdr[0] |= 0x40; hdr[1] = ((len << 4) & 0xff) | chan; hdr[2] = len >> 4; hdr[3] = ~(hdr[0] + hdr[1] + hdr[2]); /* Put BCSP header */ for (i = 0; i < 4; i++) { bcsp_slip_one_byte(nskb, hdr[i]); if (bcsp->use_crc) bcsp_crc_update(&bcsp_txmsg_crc, hdr[i]); } /* Put payload */ for (i = 0; i < len; i++) { bcsp_slip_one_byte(nskb, data[i]); if (bcsp->use_crc) bcsp_crc_update(&bcsp_txmsg_crc, data[i]); } /* Put CRC */ if (bcsp->use_crc) { bcsp_txmsg_crc = bitrev16(bcsp_txmsg_crc); bcsp_slip_one_byte(nskb, (u8) ((bcsp_txmsg_crc >> 8) & 0x00ff)); bcsp_slip_one_byte(nskb, (u8) (bcsp_txmsg_crc & 0x00ff)); } bcsp_slip_msgdelim(nskb); return nskb; } /* This is a rewrite of pkt_avail in ABCSP */ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu) { struct bcsp_struct *bcsp = hu->priv; unsigned long flags; struct sk_buff *skb; /* First of all, check for unreliable messages in the queue, since they have priority */ if ((skb = skb_dequeue(&bcsp->unrel)) != NULL) { struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type); if (nskb) { kfree_skb(skb); return nskb; } else { skb_queue_head(&bcsp->unrel, skb); BT_ERR("Could not dequeue pkt because alloc_skb failed"); } } /* Now, try to send a reliable pkt. We can only send a reliable packet if the number of packets sent but not yet ack'ed is < than the winsize */ spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING); if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) { struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type); if (nskb) { __skb_queue_tail(&bcsp->unack, skb); mod_timer(&bcsp->tbcsp, jiffies + HZ / 4); spin_unlock_irqrestore(&bcsp->unack.lock, flags); return nskb; } else { skb_queue_head(&bcsp->rel, skb); BT_ERR("Could not dequeue pkt because alloc_skb failed"); } } spin_unlock_irqrestore(&bcsp->unack.lock, flags); /* We could not send a reliable packet, either because there are none or because there are too many unack'ed pkts. Did we receive any packets we have not acknowledged yet ? */ if (bcsp->txack_req) { /* if so, craft an empty ACK pkt and send it on BCSP unreliable channel 0 */ struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, NULL, 0, BCSP_ACK_PKT); return nskb; } /* We have nothing to send */ return NULL; } static int bcsp_flush(struct hci_uart *hu) { BT_DBG("hu %p", hu); return 0; } /* Remove ack'ed packets */ static void bcsp_pkt_cull(struct bcsp_struct *bcsp) { struct sk_buff *skb, *tmp; unsigned long flags; int i, pkts_to_be_removed; u8 seqno; spin_lock_irqsave(&bcsp->unack.lock, flags); pkts_to_be_removed = skb_queue_len(&bcsp->unack); seqno = bcsp->msgq_txseq; while (pkts_to_be_removed) { if (bcsp->rxack == seqno) break; pkts_to_be_removed--; seqno = (seqno - 1) & 0x07; } if (bcsp->rxack != seqno) BT_ERR("Peer acked invalid packet"); BT_DBG("Removing %u pkts out of %u, up to seqno %u", pkts_to_be_removed, skb_queue_len(&bcsp->unack), (seqno - 1) & 0x07); i = 0; skb_queue_walk_safe(&bcsp->unack, skb, tmp) { if (i >= pkts_to_be_removed) break; i++; __skb_unlink(skb, &bcsp->unack); kfree_skb(skb); } if (skb_queue_empty(&bcsp->unack)) del_timer(&bcsp->tbcsp); spin_unlock_irqrestore(&bcsp->unack.lock, flags); if (i != pkts_to_be_removed) BT_ERR("Removed only %u out of %u pkts", i, pkts_to_be_removed); } /* Handle BCSP link-establishment packets. When we detect a "sync" packet, symptom that the BT module has reset, we do nothing :) (yet) */ static void bcsp_handle_le_pkt(struct hci_uart *hu) { struct bcsp_struct *bcsp = hu->priv; u8 conf_pkt[4] = { 0xad, 0xef, 0xac, 0xed }; u8 conf_rsp_pkt[4] = { 0xde, 0xad, 0xd0, 0xd0 }; u8 sync_pkt[4] = { 0xda, 0xdc, 0xed, 0xed }; /* spot "conf" pkts and reply with a "conf rsp" pkt */ if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 && !memcmp(&bcsp->rx_skb->data[4], conf_pkt, 4)) { struct sk_buff *nskb = alloc_skb(4, GFP_ATOMIC); BT_DBG("Found a LE conf pkt"); if (!nskb) return; memcpy(skb_put(nskb, 4), conf_rsp_pkt, 4); bt_cb(nskb)->pkt_type = BCSP_LE_PKT; skb_queue_head(&bcsp->unrel, nskb); hci_uart_tx_wakeup(hu); } /* Spot "sync" pkts. If we find one...disaster! */ else if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 && !memcmp(&bcsp->rx_skb->data[4], sync_pkt, 4)) { BT_ERR("Found a LE sync pkt, card has reset"); } } static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char byte) { const u8 c0 = 0xc0, db = 0xdb; switch (bcsp->rx_esc_state) { case BCSP_ESCSTATE_NOESC: switch (byte) { case 0xdb: bcsp->rx_esc_state = BCSP_ESCSTATE_ESC; break; default: memcpy(skb_put(bcsp->rx_skb, 1), &byte, 1); if ((bcsp->rx_skb-> data[0] & 0x40) != 0 && bcsp->rx_state != BCSP_W4_CRC) bcsp_crc_update(&bcsp->message_crc, byte); bcsp->rx_count--; } break; case BCSP_ESCSTATE_ESC: switch (byte) { case 0xdc: memcpy(skb_put(bcsp->rx_skb, 1), &c0, 1); if ((bcsp->rx_skb-> data[0] & 0x40) != 0 && bcsp->rx_state != BCSP_W4_CRC) bcsp_crc_update(&bcsp-> message_crc, 0xc0); bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; bcsp->rx_count--; break; case 0xdd: memcpy(skb_put(bcsp->rx_skb, 1), &db, 1); if ((bcsp->rx_skb-> data[0] & 0x40) != 0 && bcsp->rx_state != BCSP_W4_CRC) bcsp_crc_update(&bcsp-> message_crc, 0xdb); bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; bcsp->rx_count--; break; default: BT_ERR ("Invalid byte %02x after esc byte", byte); kfree_skb(bcsp->rx_skb); bcsp->rx_skb = NULL; bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; } } } static void bcsp_complete_rx_pkt(struct hci_uart *hu) { struct bcsp_struct *bcsp = hu->priv; int pass_up; if (bcsp->rx_skb->data[0] & 0x80) { /* reliable pkt */ BT_DBG("Received seqno %u from card", bcsp->rxseq_txack); bcsp->rxseq_txack++; bcsp->rxseq_txack %= 0x8; bcsp->txack_req = 1; /* If needed, transmit an ack pkt */ hci_uart_tx_wakeup(hu); } bcsp->rxack = (bcsp->rx_skb->data[0] >> 3) & 0x07; BT_DBG("Request for pkt %u from card", bcsp->rxack); bcsp_pkt_cull(bcsp); if ((bcsp->rx_skb->data[1] & 0x0f) == 6 && bcsp->rx_skb->data[0] & 0x80) { bt_cb(bcsp->rx_skb)->pkt_type = HCI_ACLDATA_PKT; pass_up = 1; } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 && bcsp->rx_skb->data[0] & 0x80) { bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT; pass_up = 1; } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) { bt_cb(bcsp->rx_skb)->pkt_type = HCI_SCODATA_PKT; pass_up = 1; } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 && !(bcsp->rx_skb->data[0] & 0x80)) { bcsp_handle_le_pkt(hu); pass_up = 0; } else pass_up = 0; if (!pass_up) { struct hci_event_hdr hdr; u8 desc = (bcsp->rx_skb->data[1] & 0x0f); if (desc != 0 && desc != 1) { if (hciextn) { desc |= 0xc0; skb_pull(bcsp->rx_skb, 4); memcpy(skb_push(bcsp->rx_skb, 1), &desc, 1); hdr.evt = 0xff; hdr.plen = bcsp->rx_skb->len; memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE); bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT; hci_recv_frame(bcsp->rx_skb); } else { BT_ERR ("Packet for unknown channel (%u %s)", bcsp->rx_skb->data[1] & 0x0f, bcsp->rx_skb->data[0] & 0x80 ? "reliable" : "unreliable"); kfree_skb(bcsp->rx_skb); } } else kfree_skb(bcsp->rx_skb); } else { /* Pull out BCSP hdr */ skb_pull(bcsp->rx_skb, 4); hci_recv_frame(bcsp->rx_skb); } bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_skb = NULL; } static u16 bscp_get_crc(struct bcsp_struct *bcsp) { return get_unaligned_be16(&bcsp->rx_skb->data[bcsp->rx_skb->len - 2]); } /* Recv data */ static int bcsp_recv(struct hci_uart *hu, void *data, int count) { struct bcsp_struct *bcsp = hu->priv; unsigned char *ptr; BT_DBG("hu %p count %d rx_state %d rx_count %ld", hu, count, bcsp->rx_state, bcsp->rx_count); ptr = data; while (count) { if (bcsp->rx_count) { if (*ptr == 0xc0) { BT_ERR("Short BCSP packet"); kfree_skb(bcsp->rx_skb); bcsp->rx_state = BCSP_W4_PKT_START; bcsp->rx_count = 0; } else bcsp_unslip_one_byte(bcsp, *ptr); ptr++; count--; continue; } switch (bcsp->rx_state) { case BCSP_W4_BCSP_HDR: if ((0xff & (u8) ~ (bcsp->rx_skb->data[0] + bcsp->rx_skb->data[1] + bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) { BT_ERR("Error in BCSP hdr checksum"); kfree_skb(bcsp->rx_skb); bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; continue; } if (bcsp->rx_skb->data[0] & 0x80 /* reliable pkt */ && (bcsp->rx_skb->data[0] & 0x07) != bcsp->rxseq_txack) { BT_ERR ("Out-of-order packet arrived, got %u expected %u", bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack); kfree_skb(bcsp->rx_skb); bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; continue; } bcsp->rx_state = BCSP_W4_DATA; bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) + (bcsp->rx_skb->data[2] << 4); /* May be 0 */ continue; case BCSP_W4_DATA: if (bcsp->rx_skb->data[0] & 0x40) { /* pkt with crc */ bcsp->rx_state = BCSP_W4_CRC; bcsp->rx_count = 2; } else bcsp_complete_rx_pkt(hu); continue; case BCSP_W4_CRC: if (bitrev16(bcsp->message_crc) != bscp_get_crc(bcsp)) { BT_ERR ("Checksum failed: computed %04x received %04x", bitrev16(bcsp->message_crc), bscp_get_crc(bcsp)); kfree_skb(bcsp->rx_skb); bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; continue; } skb_trim(bcsp->rx_skb, bcsp->rx_skb->len - 2); bcsp_complete_rx_pkt(hu); continue; case BCSP_W4_PKT_DELIMITER: switch (*ptr) { case 0xc0: bcsp->rx_state = BCSP_W4_PKT_START; break; default: /*BT_ERR("Ignoring byte %02x", *ptr);*/ break; } ptr++; count--; break; case BCSP_W4_PKT_START: switch (*ptr) { case 0xc0: ptr++; count--; break; default: bcsp->rx_state = BCSP_W4_BCSP_HDR; bcsp->rx_count = 4; bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; BCSP_CRC_INIT(bcsp->message_crc); /* Do not increment ptr or decrement count * Allocate packet. Max len of a BCSP pkt= * 0xFFF (payload) +4 (header) +2 (crc) */ bcsp->rx_skb = bt_skb_alloc(0x1005, GFP_ATOMIC); if (!bcsp->rx_skb) { BT_ERR("Can't allocate mem for new packet"); bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; return 0; } bcsp->rx_skb->dev = (void *) hu->hdev; break; } break; } } return count; } /* Arrange to retransmit all messages in the relq. */ static void bcsp_timed_event(unsigned long arg) { struct hci_uart *hu = (struct hci_uart *) arg; struct bcsp_struct *bcsp = hu->priv; struct sk_buff *skb; unsigned long flags; BT_DBG("hu %p retransmitting %u pkts", hu, bcsp->unack.qlen); spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING); while ((skb = __skb_dequeue_tail(&bcsp->unack)) != NULL) { bcsp->msgq_txseq = (bcsp->msgq_txseq - 1) & 0x07; skb_queue_head(&bcsp->rel, skb); } spin_unlock_irqrestore(&bcsp->unack.lock, flags); hci_uart_tx_wakeup(hu); } static int bcsp_open(struct hci_uart *hu) { struct bcsp_struct *bcsp; BT_DBG("hu %p", hu); bcsp = kzalloc(sizeof(*bcsp), GFP_KERNEL); if (!bcsp) return -ENOMEM; hu->priv = bcsp; skb_queue_head_init(&bcsp->unack); skb_queue_head_init(&bcsp->rel); skb_queue_head_init(&bcsp->unrel); init_timer(&bcsp->tbcsp); bcsp->tbcsp.function = bcsp_timed_event; bcsp->tbcsp.data = (u_long) hu; bcsp->rx_state = BCSP_W4_PKT_DELIMITER; if (txcrc) bcsp->use_crc = 1; return 0; } static int bcsp_close(struct hci_uart *hu) { struct bcsp_struct *bcsp = hu->priv; hu->priv = NULL; BT_DBG("hu %p", hu); skb_queue_purge(&bcsp->unack); skb_queue_purge(&bcsp->rel); skb_queue_purge(&bcsp->unrel); del_timer(&bcsp->tbcsp); kfree(bcsp); return 0; } static struct hci_uart_proto bcsp = { .id = HCI_UART_BCSP, .open = bcsp_open, .close = bcsp_close, .enqueue = bcsp_enqueue, .dequeue = bcsp_dequeue, .recv = bcsp_recv, .flush = bcsp_flush }; int __init bcsp_init(void) { int err = hci_uart_register_proto(&bcsp); if (!err) BT_INFO("HCI BCSP protocol initialized"); else BT_ERR("HCI BCSP protocol registration failed"); return err; } int __exit bcsp_deinit(void) { return hci_uart_unregister_proto(&bcsp); } module_param(txcrc, bool, 0644); MODULE_PARM_DESC(txcrc, "Transmit CRC with every BCSP packet"); module_param(hciextn, bool, 0644); MODULE_PARM_DESC(hciextn, "Convert HCI Extensions into BCSP packets");
gpl-2.0
zoobab/vzkernel
drivers/bluetooth/hci_bcsp.c
2563
18830
/* * * Bluetooth HCI UART driver * * Copyright (C) 2002-2003 Fabrizio Gennari <fabrizio.gennari@philips.com> * Copyright (C) 2004-2005 Marcel Holtmann <marcel@holtmann.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/signal.h> #include <linux/ioctl.h> #include <linux/skbuff.h> #include <linux/bitrev.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "hci_uart.h" #define VERSION "0.3" static bool txcrc = 1; static bool hciextn = 1; #define BCSP_TXWINSIZE 4 #define BCSP_ACK_PKT 0x05 #define BCSP_LE_PKT 0x06 struct bcsp_struct { struct sk_buff_head unack; /* Unack'ed packets queue */ struct sk_buff_head rel; /* Reliable packets queue */ struct sk_buff_head unrel; /* Unreliable packets queue */ unsigned long rx_count; struct sk_buff *rx_skb; u8 rxseq_txack; /* rxseq == txack. */ u8 rxack; /* Last packet sent by us that the peer ack'ed */ struct timer_list tbcsp; enum { BCSP_W4_PKT_DELIMITER, BCSP_W4_PKT_START, BCSP_W4_BCSP_HDR, BCSP_W4_DATA, BCSP_W4_CRC } rx_state; enum { BCSP_ESCSTATE_NOESC, BCSP_ESCSTATE_ESC } rx_esc_state; u8 use_crc; u16 message_crc; u8 txack_req; /* Do we need to send ack's to the peer? */ /* Reliable packet sequence number - used to assign seq to each rel pkt. */ u8 msgq_txseq; }; /* ---- BCSP CRC calculation ---- */ /* Table for calculating CRC for polynomial 0x1021, LSB processed first, initial value 0xffff, bits shifted in reverse order. */ static const u16 crc_table[] = { 0x0000, 0x1081, 0x2102, 0x3183, 0x4204, 0x5285, 0x6306, 0x7387, 0x8408, 0x9489, 0xa50a, 0xb58b, 0xc60c, 0xd68d, 0xe70e, 0xf78f }; /* Initialise the crc calculator */ #define BCSP_CRC_INIT(x) x = 0xffff /* Update crc with next data byte Implementation note The data byte is treated as two nibbles. The crc is generated in reverse, i.e., bits are fed into the register from the top. */ static void bcsp_crc_update(u16 *crc, u8 d) { u16 reg = *crc; reg = (reg >> 4) ^ crc_table[(reg ^ d) & 0x000f]; reg = (reg >> 4) ^ crc_table[(reg ^ (d >> 4)) & 0x000f]; *crc = reg; } /* ---- BCSP core ---- */ static void bcsp_slip_msgdelim(struct sk_buff *skb) { const char pkt_delim = 0xc0; memcpy(skb_put(skb, 1), &pkt_delim, 1); } static void bcsp_slip_one_byte(struct sk_buff *skb, u8 c) { const char esc_c0[2] = { 0xdb, 0xdc }; const char esc_db[2] = { 0xdb, 0xdd }; switch (c) { case 0xc0: memcpy(skb_put(skb, 2), &esc_c0, 2); break; case 0xdb: memcpy(skb_put(skb, 2), &esc_db, 2); break; default: memcpy(skb_put(skb, 1), &c, 1); } } static int bcsp_enqueue(struct hci_uart *hu, struct sk_buff *skb) { struct bcsp_struct *bcsp = hu->priv; if (skb->len > 0xFFF) { BT_ERR("Packet too long"); kfree_skb(skb); return 0; } switch (bt_cb(skb)->pkt_type) { case HCI_ACLDATA_PKT: case HCI_COMMAND_PKT: skb_queue_tail(&bcsp->rel, skb); break; case HCI_SCODATA_PKT: skb_queue_tail(&bcsp->unrel, skb); break; default: BT_ERR("Unknown packet type"); kfree_skb(skb); break; } return 0; } static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data, int len, int pkt_type) { struct sk_buff *nskb; u8 hdr[4], chan; u16 BCSP_CRC_INIT(bcsp_txmsg_crc); int rel, i; switch (pkt_type) { case HCI_ACLDATA_PKT: chan = 6; /* BCSP ACL channel */ rel = 1; /* reliable channel */ break; case HCI_COMMAND_PKT: chan = 5; /* BCSP cmd/evt channel */ rel = 1; /* reliable channel */ break; case HCI_SCODATA_PKT: chan = 7; /* BCSP SCO channel */ rel = 0; /* unreliable channel */ break; case BCSP_LE_PKT: chan = 1; /* BCSP LE channel */ rel = 0; /* unreliable channel */ break; case BCSP_ACK_PKT: chan = 0; /* BCSP internal channel */ rel = 0; /* unreliable channel */ break; default: BT_ERR("Unknown packet type"); return NULL; } if (hciextn && chan == 5) { __le16 opcode = ((struct hci_command_hdr *)data)->opcode; /* Vendor specific commands */ if (hci_opcode_ogf(__le16_to_cpu(opcode)) == 0x3f) { u8 desc = *(data + HCI_COMMAND_HDR_SIZE); if ((desc & 0xf0) == 0xc0) { data += HCI_COMMAND_HDR_SIZE + 1; len -= HCI_COMMAND_HDR_SIZE + 1; chan = desc & 0x0f; } } } /* Max len of packet: (original len +4(bcsp hdr) +2(crc))*2 (because bytes 0xc0 and 0xdb are escaped, worst case is when the packet is all made of 0xc0 and 0xdb :) ) + 2 (0xc0 delimiters at start and end). */ nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC); if (!nskb) return NULL; bt_cb(nskb)->pkt_type = pkt_type; bcsp_slip_msgdelim(nskb); hdr[0] = bcsp->rxseq_txack << 3; bcsp->txack_req = 0; BT_DBG("We request packet no %u to card", bcsp->rxseq_txack); if (rel) { hdr[0] |= 0x80 + bcsp->msgq_txseq; BT_DBG("Sending packet with seqno %u", bcsp->msgq_txseq); bcsp->msgq_txseq = (bcsp->msgq_txseq + 1) & 0x07; } if (bcsp->use_crc) hdr[0] |= 0x40; hdr[1] = ((len << 4) & 0xff) | chan; hdr[2] = len >> 4; hdr[3] = ~(hdr[0] + hdr[1] + hdr[2]); /* Put BCSP header */ for (i = 0; i < 4; i++) { bcsp_slip_one_byte(nskb, hdr[i]); if (bcsp->use_crc) bcsp_crc_update(&bcsp_txmsg_crc, hdr[i]); } /* Put payload */ for (i = 0; i < len; i++) { bcsp_slip_one_byte(nskb, data[i]); if (bcsp->use_crc) bcsp_crc_update(&bcsp_txmsg_crc, data[i]); } /* Put CRC */ if (bcsp->use_crc) { bcsp_txmsg_crc = bitrev16(bcsp_txmsg_crc); bcsp_slip_one_byte(nskb, (u8) ((bcsp_txmsg_crc >> 8) & 0x00ff)); bcsp_slip_one_byte(nskb, (u8) (bcsp_txmsg_crc & 0x00ff)); } bcsp_slip_msgdelim(nskb); return nskb; } /* This is a rewrite of pkt_avail in ABCSP */ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu) { struct bcsp_struct *bcsp = hu->priv; unsigned long flags; struct sk_buff *skb; /* First of all, check for unreliable messages in the queue, since they have priority */ if ((skb = skb_dequeue(&bcsp->unrel)) != NULL) { struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type); if (nskb) { kfree_skb(skb); return nskb; } else { skb_queue_head(&bcsp->unrel, skb); BT_ERR("Could not dequeue pkt because alloc_skb failed"); } } /* Now, try to send a reliable pkt. We can only send a reliable packet if the number of packets sent but not yet ack'ed is < than the winsize */ spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING); if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) { struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type); if (nskb) { __skb_queue_tail(&bcsp->unack, skb); mod_timer(&bcsp->tbcsp, jiffies + HZ / 4); spin_unlock_irqrestore(&bcsp->unack.lock, flags); return nskb; } else { skb_queue_head(&bcsp->rel, skb); BT_ERR("Could not dequeue pkt because alloc_skb failed"); } } spin_unlock_irqrestore(&bcsp->unack.lock, flags); /* We could not send a reliable packet, either because there are none or because there are too many unack'ed pkts. Did we receive any packets we have not acknowledged yet ? */ if (bcsp->txack_req) { /* if so, craft an empty ACK pkt and send it on BCSP unreliable channel 0 */ struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, NULL, 0, BCSP_ACK_PKT); return nskb; } /* We have nothing to send */ return NULL; } static int bcsp_flush(struct hci_uart *hu) { BT_DBG("hu %p", hu); return 0; } /* Remove ack'ed packets */ static void bcsp_pkt_cull(struct bcsp_struct *bcsp) { struct sk_buff *skb, *tmp; unsigned long flags; int i, pkts_to_be_removed; u8 seqno; spin_lock_irqsave(&bcsp->unack.lock, flags); pkts_to_be_removed = skb_queue_len(&bcsp->unack); seqno = bcsp->msgq_txseq; while (pkts_to_be_removed) { if (bcsp->rxack == seqno) break; pkts_to_be_removed--; seqno = (seqno - 1) & 0x07; } if (bcsp->rxack != seqno) BT_ERR("Peer acked invalid packet"); BT_DBG("Removing %u pkts out of %u, up to seqno %u", pkts_to_be_removed, skb_queue_len(&bcsp->unack), (seqno - 1) & 0x07); i = 0; skb_queue_walk_safe(&bcsp->unack, skb, tmp) { if (i >= pkts_to_be_removed) break; i++; __skb_unlink(skb, &bcsp->unack); kfree_skb(skb); } if (skb_queue_empty(&bcsp->unack)) del_timer(&bcsp->tbcsp); spin_unlock_irqrestore(&bcsp->unack.lock, flags); if (i != pkts_to_be_removed) BT_ERR("Removed only %u out of %u pkts", i, pkts_to_be_removed); } /* Handle BCSP link-establishment packets. When we detect a "sync" packet, symptom that the BT module has reset, we do nothing :) (yet) */ static void bcsp_handle_le_pkt(struct hci_uart *hu) { struct bcsp_struct *bcsp = hu->priv; u8 conf_pkt[4] = { 0xad, 0xef, 0xac, 0xed }; u8 conf_rsp_pkt[4] = { 0xde, 0xad, 0xd0, 0xd0 }; u8 sync_pkt[4] = { 0xda, 0xdc, 0xed, 0xed }; /* spot "conf" pkts and reply with a "conf rsp" pkt */ if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 && !memcmp(&bcsp->rx_skb->data[4], conf_pkt, 4)) { struct sk_buff *nskb = alloc_skb(4, GFP_ATOMIC); BT_DBG("Found a LE conf pkt"); if (!nskb) return; memcpy(skb_put(nskb, 4), conf_rsp_pkt, 4); bt_cb(nskb)->pkt_type = BCSP_LE_PKT; skb_queue_head(&bcsp->unrel, nskb); hci_uart_tx_wakeup(hu); } /* Spot "sync" pkts. If we find one...disaster! */ else if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 && !memcmp(&bcsp->rx_skb->data[4], sync_pkt, 4)) { BT_ERR("Found a LE sync pkt, card has reset"); } } static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char byte) { const u8 c0 = 0xc0, db = 0xdb; switch (bcsp->rx_esc_state) { case BCSP_ESCSTATE_NOESC: switch (byte) { case 0xdb: bcsp->rx_esc_state = BCSP_ESCSTATE_ESC; break; default: memcpy(skb_put(bcsp->rx_skb, 1), &byte, 1); if ((bcsp->rx_skb-> data[0] & 0x40) != 0 && bcsp->rx_state != BCSP_W4_CRC) bcsp_crc_update(&bcsp->message_crc, byte); bcsp->rx_count--; } break; case BCSP_ESCSTATE_ESC: switch (byte) { case 0xdc: memcpy(skb_put(bcsp->rx_skb, 1), &c0, 1); if ((bcsp->rx_skb-> data[0] & 0x40) != 0 && bcsp->rx_state != BCSP_W4_CRC) bcsp_crc_update(&bcsp-> message_crc, 0xc0); bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; bcsp->rx_count--; break; case 0xdd: memcpy(skb_put(bcsp->rx_skb, 1), &db, 1); if ((bcsp->rx_skb-> data[0] & 0x40) != 0 && bcsp->rx_state != BCSP_W4_CRC) bcsp_crc_update(&bcsp-> message_crc, 0xdb); bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; bcsp->rx_count--; break; default: BT_ERR ("Invalid byte %02x after esc byte", byte); kfree_skb(bcsp->rx_skb); bcsp->rx_skb = NULL; bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; } } } static void bcsp_complete_rx_pkt(struct hci_uart *hu) { struct bcsp_struct *bcsp = hu->priv; int pass_up; if (bcsp->rx_skb->data[0] & 0x80) { /* reliable pkt */ BT_DBG("Received seqno %u from card", bcsp->rxseq_txack); bcsp->rxseq_txack++; bcsp->rxseq_txack %= 0x8; bcsp->txack_req = 1; /* If needed, transmit an ack pkt */ hci_uart_tx_wakeup(hu); } bcsp->rxack = (bcsp->rx_skb->data[0] >> 3) & 0x07; BT_DBG("Request for pkt %u from card", bcsp->rxack); bcsp_pkt_cull(bcsp); if ((bcsp->rx_skb->data[1] & 0x0f) == 6 && bcsp->rx_skb->data[0] & 0x80) { bt_cb(bcsp->rx_skb)->pkt_type = HCI_ACLDATA_PKT; pass_up = 1; } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 && bcsp->rx_skb->data[0] & 0x80) { bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT; pass_up = 1; } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) { bt_cb(bcsp->rx_skb)->pkt_type = HCI_SCODATA_PKT; pass_up = 1; } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 && !(bcsp->rx_skb->data[0] & 0x80)) { bcsp_handle_le_pkt(hu); pass_up = 0; } else pass_up = 0; if (!pass_up) { struct hci_event_hdr hdr; u8 desc = (bcsp->rx_skb->data[1] & 0x0f); if (desc != 0 && desc != 1) { if (hciextn) { desc |= 0xc0; skb_pull(bcsp->rx_skb, 4); memcpy(skb_push(bcsp->rx_skb, 1), &desc, 1); hdr.evt = 0xff; hdr.plen = bcsp->rx_skb->len; memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE); bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT; hci_recv_frame(bcsp->rx_skb); } else { BT_ERR ("Packet for unknown channel (%u %s)", bcsp->rx_skb->data[1] & 0x0f, bcsp->rx_skb->data[0] & 0x80 ? "reliable" : "unreliable"); kfree_skb(bcsp->rx_skb); } } else kfree_skb(bcsp->rx_skb); } else { /* Pull out BCSP hdr */ skb_pull(bcsp->rx_skb, 4); hci_recv_frame(bcsp->rx_skb); } bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_skb = NULL; } static u16 bscp_get_crc(struct bcsp_struct *bcsp) { return get_unaligned_be16(&bcsp->rx_skb->data[bcsp->rx_skb->len - 2]); } /* Recv data */ static int bcsp_recv(struct hci_uart *hu, void *data, int count) { struct bcsp_struct *bcsp = hu->priv; unsigned char *ptr; BT_DBG("hu %p count %d rx_state %d rx_count %ld", hu, count, bcsp->rx_state, bcsp->rx_count); ptr = data; while (count) { if (bcsp->rx_count) { if (*ptr == 0xc0) { BT_ERR("Short BCSP packet"); kfree_skb(bcsp->rx_skb); bcsp->rx_state = BCSP_W4_PKT_START; bcsp->rx_count = 0; } else bcsp_unslip_one_byte(bcsp, *ptr); ptr++; count--; continue; } switch (bcsp->rx_state) { case BCSP_W4_BCSP_HDR: if ((0xff & (u8) ~ (bcsp->rx_skb->data[0] + bcsp->rx_skb->data[1] + bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) { BT_ERR("Error in BCSP hdr checksum"); kfree_skb(bcsp->rx_skb); bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; continue; } if (bcsp->rx_skb->data[0] & 0x80 /* reliable pkt */ && (bcsp->rx_skb->data[0] & 0x07) != bcsp->rxseq_txack) { BT_ERR ("Out-of-order packet arrived, got %u expected %u", bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack); kfree_skb(bcsp->rx_skb); bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; continue; } bcsp->rx_state = BCSP_W4_DATA; bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) + (bcsp->rx_skb->data[2] << 4); /* May be 0 */ continue; case BCSP_W4_DATA: if (bcsp->rx_skb->data[0] & 0x40) { /* pkt with crc */ bcsp->rx_state = BCSP_W4_CRC; bcsp->rx_count = 2; } else bcsp_complete_rx_pkt(hu); continue; case BCSP_W4_CRC: if (bitrev16(bcsp->message_crc) != bscp_get_crc(bcsp)) { BT_ERR ("Checksum failed: computed %04x received %04x", bitrev16(bcsp->message_crc), bscp_get_crc(bcsp)); kfree_skb(bcsp->rx_skb); bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; continue; } skb_trim(bcsp->rx_skb, bcsp->rx_skb->len - 2); bcsp_complete_rx_pkt(hu); continue; case BCSP_W4_PKT_DELIMITER: switch (*ptr) { case 0xc0: bcsp->rx_state = BCSP_W4_PKT_START; break; default: /*BT_ERR("Ignoring byte %02x", *ptr);*/ break; } ptr++; count--; break; case BCSP_W4_PKT_START: switch (*ptr) { case 0xc0: ptr++; count--; break; default: bcsp->rx_state = BCSP_W4_BCSP_HDR; bcsp->rx_count = 4; bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; BCSP_CRC_INIT(bcsp->message_crc); /* Do not increment ptr or decrement count * Allocate packet. Max len of a BCSP pkt= * 0xFFF (payload) +4 (header) +2 (crc) */ bcsp->rx_skb = bt_skb_alloc(0x1005, GFP_ATOMIC); if (!bcsp->rx_skb) { BT_ERR("Can't allocate mem for new packet"); bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; return 0; } bcsp->rx_skb->dev = (void *) hu->hdev; break; } break; } } return count; } /* Arrange to retransmit all messages in the relq. */ static void bcsp_timed_event(unsigned long arg) { struct hci_uart *hu = (struct hci_uart *) arg; struct bcsp_struct *bcsp = hu->priv; struct sk_buff *skb; unsigned long flags; BT_DBG("hu %p retransmitting %u pkts", hu, bcsp->unack.qlen); spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING); while ((skb = __skb_dequeue_tail(&bcsp->unack)) != NULL) { bcsp->msgq_txseq = (bcsp->msgq_txseq - 1) & 0x07; skb_queue_head(&bcsp->rel, skb); } spin_unlock_irqrestore(&bcsp->unack.lock, flags); hci_uart_tx_wakeup(hu); } static int bcsp_open(struct hci_uart *hu) { struct bcsp_struct *bcsp; BT_DBG("hu %p", hu); bcsp = kzalloc(sizeof(*bcsp), GFP_KERNEL); if (!bcsp) return -ENOMEM; hu->priv = bcsp; skb_queue_head_init(&bcsp->unack); skb_queue_head_init(&bcsp->rel); skb_queue_head_init(&bcsp->unrel); init_timer(&bcsp->tbcsp); bcsp->tbcsp.function = bcsp_timed_event; bcsp->tbcsp.data = (u_long) hu; bcsp->rx_state = BCSP_W4_PKT_DELIMITER; if (txcrc) bcsp->use_crc = 1; return 0; } static int bcsp_close(struct hci_uart *hu) { struct bcsp_struct *bcsp = hu->priv; hu->priv = NULL; BT_DBG("hu %p", hu); skb_queue_purge(&bcsp->unack); skb_queue_purge(&bcsp->rel); skb_queue_purge(&bcsp->unrel); del_timer(&bcsp->tbcsp); kfree(bcsp); return 0; } static struct hci_uart_proto bcsp = { .id = HCI_UART_BCSP, .open = bcsp_open, .close = bcsp_close, .enqueue = bcsp_enqueue, .dequeue = bcsp_dequeue, .recv = bcsp_recv, .flush = bcsp_flush }; int __init bcsp_init(void) { int err = hci_uart_register_proto(&bcsp); if (!err) BT_INFO("HCI BCSP protocol initialized"); else BT_ERR("HCI BCSP protocol registration failed"); return err; } int __exit bcsp_deinit(void) { return hci_uart_unregister_proto(&bcsp); } module_param(txcrc, bool, 0644); MODULE_PARM_DESC(txcrc, "Transmit CRC with every BCSP packet"); module_param(hciextn, bool, 0644); MODULE_PARM_DESC(hciextn, "Convert HCI Extensions into BCSP packets");
gpl-2.0
fivestars/ubuntu-nexus7
drivers/pcmcia/sa1100_simpad.c
3075
2973
/* * drivers/pcmcia/sa1100_simpad.c * * PCMCIA implementation routines for simpad * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/init.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <mach/simpad.h> #include "sa1100_generic.h" extern long get_cs3_shadow(void); extern void set_cs3_bit(int value); extern void clear_cs3_bit(int value); static struct pcmcia_irqs irqs[] = { { 1, IRQ_GPIO_CF_CD, "CF_CD" }, }; static int simpad_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1); skt->socket.pci_irq = IRQ_GPIO_CF_IRQ; return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); } static void simpad_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) { soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); /* Disable CF bus: */ //set_cs3_bit(PCMCIA_BUFF_DIS); clear_cs3_bit(PCMCIA_RESET); } static void simpad_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { unsigned long levels = GPLR; long cs3reg = get_cs3_shadow(); state->detect=((levels & GPIO_CF_CD)==0)?1:0; state->ready=(levels & GPIO_CF_IRQ)?1:0; state->bvd1=1; /* Not available on Simpad. */ state->bvd2=1; /* Not available on Simpad. */ state->wrprot=0; /* Not available on Simpad. */ if((cs3reg & 0x0c) == 0x0c) { state->vs_3v=0; state->vs_Xv=0; } else { state->vs_3v=1; state->vs_Xv=0; } } static int simpad_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { unsigned long flags; local_irq_save(flags); /* Murphy: see table of MIC2562a-1 */ switch (state->Vcc) { case 0: clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1); break; case 33: clear_cs3_bit(VCC_3V_EN|EN1); set_cs3_bit(VCC_5V_EN|EN0); break; case 50: clear_cs3_bit(VCC_5V_EN|EN1); set_cs3_bit(VCC_3V_EN|EN0); break; default: printk(KERN_ERR "%s(): unrecognized Vcc %u\n", __func__, state->Vcc); clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1); local_irq_restore(flags); return -1; } local_irq_restore(flags); return 0; } static void simpad_pcmcia_socket_init(struct soc_pcmcia_socket *skt) { soc_pcmcia_enable_irqs(skt, irqs, ARRAY_SIZE(irqs)); } static void simpad_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) { soc_pcmcia_disable_irqs(skt, irqs, ARRAY_SIZE(irqs)); set_cs3_bit(PCMCIA_RESET); } static struct pcmcia_low_level simpad_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = simpad_pcmcia_hw_init, .hw_shutdown = simpad_pcmcia_hw_shutdown, .socket_state = simpad_pcmcia_socket_state, .configure_socket = simpad_pcmcia_configure_socket, .socket_init = simpad_pcmcia_socket_init, .socket_suspend = simpad_pcmcia_socket_suspend, }; int __devinit pcmcia_simpad_init(struct device *dev) { int ret = -ENODEV; if (machine_is_simpad()) ret = sa11xx_drv_pcmcia_probe(dev, &simpad_pcmcia_ops, 1, 1); return ret; }
gpl-2.0
yytang2012/linux-kvm-arm
drivers/hid/hid-lg3ff.c
4355
4464
/* * Force feedback support for Logitech Flight System G940 * * Copyright (c) 2009 Gary Stein <LordCnidarian@gmail.com> */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/input.h> #include <linux/hid.h> #include "hid-lg.h" /* * G940 Theory of Operation (from experimentation) * * There are 63 fields (only 3 of them currently used) * 0 - seems to be command field * 1 - 30 deal with the x axis * 31 -60 deal with the y axis * * Field 1 is x axis constant force * Field 31 is y axis constant force * * other interesting fields 1,2,3,4 on x axis * (same for 31,32,33,34 on y axis) * * 0 0 127 127 makes the joystick autocenter hard * * 127 0 127 127 makes the joystick loose on the right, * but stops all movemnt left * * -127 0 -127 -127 makes the joystick loose on the left, * but stops all movement right * * 0 0 -127 -127 makes the joystick rattle very hard * * I'm sure these are effects that I don't know enough about them */ struct lg3ff_device { struct hid_report *report; }; static int hid_lg3ff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); int x, y; /* * Available values in the field should always be 63, but we only use up to * 35. Instead, clear the entire area, however big it is. */ memset(report->field[0]->value, 0, sizeof(__s32) * report->field[0]->report_count); switch (effect->type) { case FF_CONSTANT: /* * Already clamped in ff_memless * 0 is center (different then other logitech) */ x = effect->u.ramp.start_level; y = effect->u.ramp.end_level; /* send command byte */ report->field[0]->value[0] = 0x51; /* * Sign backwards from other Force3d pro * which get recast here in two's complement 8 bits */ report->field[0]->value[1] = (unsigned char)(-x); report->field[0]->value[31] = (unsigned char)(-y); hid_hw_request(hid, report, HID_REQ_SET_REPORT); break; } return 0; } static void hid_lg3ff_set_autocenter(struct input_dev *dev, u16 magnitude) { struct hid_device *hid = input_get_drvdata(dev); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); /* * Auto Centering probed from device * NOTE: deadman's switch on G940 must be covered * for effects to work */ report->field[0]->value[0] = 0x51; report->field[0]->value[1] = 0x00; report->field[0]->value[2] = 0x00; report->field[0]->value[3] = 0x7F; report->field[0]->value[4] = 0x7F; report->field[0]->value[31] = 0x00; report->field[0]->value[32] = 0x00; report->field[0]->value[33] = 0x7F; report->field[0]->value[34] = 0x7F; hid_hw_request(hid, report, HID_REQ_SET_REPORT); } static const signed short ff3_joystick_ac[] = { FF_CONSTANT, FF_AUTOCENTER, -1 }; int lg3ff_init(struct hid_device *hid) { struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); struct input_dev *dev = hidinput->input; const signed short *ff_bits = ff3_joystick_ac; int error; int i; /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35)) return -ENODEV; /* Assume single fixed device G940 */ for (i = 0; ff_bits[i] >= 0; i++) set_bit(ff_bits[i], dev->ffbit); error = input_ff_create_memless(dev, NULL, hid_lg3ff_play); if (error) return error; if (test_bit(FF_AUTOCENTER, dev->ffbit)) dev->ff->set_autocenter = hid_lg3ff_set_autocenter; hid_info(hid, "Force feedback for Logitech Flight System G940 by Gary Stein <LordCnidarian@gmail.com>\n"); return 0; }
gpl-2.0
slz/delidded-kernel-n900j-note3
drivers/net/ethernet/neterion/vxge/vxge-config.c
5123
137152
/****************************************************************************** * This software may be used and distributed according to the terms of * the GNU General Public License (GPL), incorporated herein by reference. * Drivers based on or derived from this code fall under the GPL and must * retain the authorship, copyright and license notice. This file is not * a complete program and may only be used when the entire operating * system is licensed under the GPL. * See the file COPYING in this distribution for more information. * * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O * Virtualized Server Adapter. * Copyright(c) 2002-2010 Exar Corp. ******************************************************************************/ #include <linux/vmalloc.h> #include <linux/etherdevice.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/slab.h> #include "vxge-traffic.h" #include "vxge-config.h" #include "vxge-main.h" #define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \ status = __vxge_hw_vpath_stats_access(vpath, \ VXGE_HW_STATS_OP_READ, \ offset, \ &val64); \ if (status != VXGE_HW_OK) \ return status; \ } static void vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg) { u64 val64; val64 = readq(&vp_reg->rxmac_vcfg0); val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); writeq(val64, &vp_reg->rxmac_vcfg0); val64 = readq(&vp_reg->rxmac_vcfg0); } /* * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle */ int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id) { struct vxge_hw_vpath_reg __iomem *vp_reg; struct __vxge_hw_virtualpath *vpath; u64 val64, rxd_count, rxd_spat; int count = 0, total_count = 0; vpath = &hldev->virtual_paths[vp_id]; vp_reg = vpath->vp_reg; vxge_hw_vpath_set_zero_rx_frm_len(vp_reg); /* Check that the ring controller for this vpath has enough free RxDs * to send frames to the host. This is done by reading the * PRC_RXD_DOORBELL_VPn register and comparing the read value to the * RXD_SPAT value for the vpath. */ val64 = readq(&vp_reg->prc_cfg6); rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1; /* Use a factor of 2 when comparing rxd_count against rxd_spat for some * leg room. */ rxd_spat *= 2; do { mdelay(1); rxd_count = readq(&vp_reg->prc_rxd_doorbell); /* Check that the ring controller for this vpath does * not have any frame in its pipeline. */ val64 = readq(&vp_reg->frm_in_progress_cnt); if ((rxd_count <= rxd_spat) || (val64 > 0)) count = 0; else count++; total_count++; } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) && (total_count < VXGE_HW_MAX_POLLING_COUNT)); if (total_count >= VXGE_HW_MAX_POLLING_COUNT) printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n", __func__); return total_count; } /* vxge_hw_device_wait_receive_idle - This function waits until all frames * stored in the frame buffer for each vpath assigned to the given * function (hldev) have been sent to the host. */ void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev) { int i, total_count = 0; for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!(hldev->vpaths_deployed & vxge_mBIT(i))) continue; total_count += vxge_hw_vpath_wait_receive_idle(hldev, i); if (total_count >= VXGE_HW_MAX_POLLING_COUNT) break; } } /* * __vxge_hw_device_register_poll * Will poll certain register for specified amount of time. * Will poll until masked bit is not cleared. */ static enum vxge_hw_status __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) { u64 val64; u32 i = 0; enum vxge_hw_status ret = VXGE_HW_FAIL; udelay(10); do { val64 = readq(reg); if (!(val64 & mask)) return VXGE_HW_OK; udelay(100); } while (++i <= 9); i = 0; do { val64 = readq(reg); if (!(val64 & mask)) return VXGE_HW_OK; mdelay(1); } while (++i <= max_millis); return ret; } static inline enum vxge_hw_status __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr, u64 mask, u32 max_millis) { __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr); wmb(); __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr); wmb(); return __vxge_hw_device_register_poll(addr, mask, max_millis); } static enum vxge_hw_status vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action, u32 fw_memo, u32 offset, u64 *data0, u64 *data1, u64 *steer_ctrl) { struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; enum vxge_hw_status status; u64 val64; u32 retry = 0, max_retry = 3; spin_lock(&vpath->lock); if (!vpath->vp_open) { spin_unlock(&vpath->lock); max_retry = 100; } writeq(*data0, &vp_reg->rts_access_steer_data0); writeq(*data1, &vp_reg->rts_access_steer_data1); wmb(); val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | *steer_ctrl; status = __vxge_hw_pio_mem_write64(val64, &vp_reg->rts_access_steer_ctrl, VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, VXGE_HW_DEF_DEVICE_POLL_MILLIS); /* The __vxge_hw_device_register_poll can udelay for a significant * amount of time, blocking other process from the CPU. If it delays * for ~5secs, a NMI error can occur. A way around this is to give up * the processor via msleep, but this is not allowed is under lock. * So, only allow it to sleep for ~4secs if open. Otherwise, delay for * 1sec and sleep for 10ms until the firmware operation has completed * or timed-out. */ while ((status != VXGE_HW_OK) && retry++ < max_retry) { if (!vpath->vp_open) msleep(20); status = __vxge_hw_device_register_poll( &vp_reg->rts_access_steer_ctrl, VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, VXGE_HW_DEF_DEVICE_POLL_MILLIS); } if (status != VXGE_HW_OK) goto out; val64 = readq(&vp_reg->rts_access_steer_ctrl); if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { *data0 = readq(&vp_reg->rts_access_steer_data0); *data1 = readq(&vp_reg->rts_access_steer_data1); *steer_ctrl = val64; } else status = VXGE_HW_FAIL; out: if (vpath->vp_open) spin_unlock(&vpath->lock); return status; } enum vxge_hw_status vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major, u32 *minor, u32 *build) { u64 data0 = 0, data1 = 0, steer_ctrl = 0; struct __vxge_hw_virtualpath *vpath; enum vxge_hw_status status; vpath = &hldev->virtual_paths[hldev->first_vp_id]; status = vxge_hw_vpath_fw_api(vpath, VXGE_HW_FW_UPGRADE_ACTION, VXGE_HW_FW_UPGRADE_MEMO, VXGE_HW_FW_UPGRADE_OFFSET_READ, &data0, &data1, &steer_ctrl); if (status != VXGE_HW_OK) return status; *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0); *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0); *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0); return status; } enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev) { u64 data0 = 0, data1 = 0, steer_ctrl = 0; struct __vxge_hw_virtualpath *vpath; enum vxge_hw_status status; u32 ret; vpath = &hldev->virtual_paths[hldev->first_vp_id]; status = vxge_hw_vpath_fw_api(vpath, VXGE_HW_FW_UPGRADE_ACTION, VXGE_HW_FW_UPGRADE_MEMO, VXGE_HW_FW_UPGRADE_OFFSET_COMMIT, &data0, &data1, &steer_ctrl); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__); goto exit; } ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F; if (ret != 1) { vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d", __func__, ret); status = VXGE_HW_FAIL; } exit: return status; } enum vxge_hw_status vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size) { u64 data0 = 0, data1 = 0, steer_ctrl = 0; struct __vxge_hw_virtualpath *vpath; enum vxge_hw_status status; int ret_code, sec_code; vpath = &hldev->virtual_paths[hldev->first_vp_id]; /* send upgrade start command */ status = vxge_hw_vpath_fw_api(vpath, VXGE_HW_FW_UPGRADE_ACTION, VXGE_HW_FW_UPGRADE_MEMO, VXGE_HW_FW_UPGRADE_OFFSET_START, &data0, &data1, &steer_ctrl); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed", __func__); return status; } /* Transfer fw image to adapter 16 bytes at a time */ for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) { steer_ctrl = 0; /* The next 128bits of fwdata to be loaded onto the adapter */ data0 = *((u64 *)fwdata); data1 = *((u64 *)fwdata + 1); status = vxge_hw_vpath_fw_api(vpath, VXGE_HW_FW_UPGRADE_ACTION, VXGE_HW_FW_UPGRADE_MEMO, VXGE_HW_FW_UPGRADE_OFFSET_SEND, &data0, &data1, &steer_ctrl); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed", __func__); goto out; } ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0); switch (ret_code) { case VXGE_HW_FW_UPGRADE_OK: /* All OK, send next 16 bytes. */ break; case VXGE_FW_UPGRADE_BYTES2SKIP: /* skip bytes in the stream */ fwdata += (data0 >> 8) & 0xFFFFFFFF; break; case VXGE_HW_FW_UPGRADE_DONE: goto out; case VXGE_HW_FW_UPGRADE_ERR: sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0); switch (sec_code) { case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: printk(KERN_ERR "corrupted data from .ncf file\n"); break; case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: printk(KERN_ERR "invalid .ncf file\n"); break; case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: printk(KERN_ERR "buffer overflow\n"); break; case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: printk(KERN_ERR "failed to flash the image\n"); break; case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: printk(KERN_ERR "generic error. Unknown error type\n"); break; default: printk(KERN_ERR "Unknown error of type %d\n", sec_code); break; } status = VXGE_HW_FAIL; goto out; default: printk(KERN_ERR "Unknown FW error: %d\n", ret_code); status = VXGE_HW_FAIL; goto out; } /* point to next 16 bytes */ fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE; } out: return status; } enum vxge_hw_status vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev, struct eprom_image *img) { u64 data0 = 0, data1 = 0, steer_ctrl = 0; struct __vxge_hw_virtualpath *vpath; enum vxge_hw_status status; int i; vpath = &hldev->virtual_paths[hldev->first_vp_id]; for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) { data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i); data1 = steer_ctrl = 0; status = vxge_hw_vpath_fw_api(vpath, VXGE_HW_FW_API_GET_EPROM_REV, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, 0, &data0, &data1, &steer_ctrl); if (status != VXGE_HW_OK) break; img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0); img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0); img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0); img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0); } return status; } /* * __vxge_hw_channel_free - Free memory allocated for channel * This function deallocates memory from the channel and various arrays * in the channel */ static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) { kfree(channel->work_arr); kfree(channel->free_arr); kfree(channel->reserve_arr); kfree(channel->orig_arr); kfree(channel); } /* * __vxge_hw_channel_initialize - Initialize a channel * This function initializes a channel by properly setting the * various references */ static enum vxge_hw_status __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) { u32 i; struct __vxge_hw_virtualpath *vpath; vpath = channel->vph->vpath; if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) { for (i = 0; i < channel->length; i++) channel->orig_arr[i] = channel->reserve_arr[i]; } switch (channel->type) { case VXGE_HW_CHANNEL_TYPE_FIFO: vpath->fifoh = (struct __vxge_hw_fifo *)channel; channel->stats = &((struct __vxge_hw_fifo *) channel)->stats->common_stats; break; case VXGE_HW_CHANNEL_TYPE_RING: vpath->ringh = (struct __vxge_hw_ring *)channel; channel->stats = &((struct __vxge_hw_ring *) channel)->stats->common_stats; break; default: break; } return VXGE_HW_OK; } /* * __vxge_hw_channel_reset - Resets a channel * This function resets a channel by properly setting the various references */ static enum vxge_hw_status __vxge_hw_channel_reset(struct __vxge_hw_channel *channel) { u32 i; for (i = 0; i < channel->length; i++) { if (channel->reserve_arr != NULL) channel->reserve_arr[i] = channel->orig_arr[i]; if (channel->free_arr != NULL) channel->free_arr[i] = NULL; if (channel->work_arr != NULL) channel->work_arr[i] = NULL; } channel->free_ptr = channel->length; channel->reserve_ptr = channel->length; channel->reserve_top = 0; channel->post_index = 0; channel->compl_index = 0; return VXGE_HW_OK; } /* * __vxge_hw_device_pci_e_init * Initialize certain PCI/PCI-X configuration registers * with recommended values. Save config space for future hw resets. */ static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) { u16 cmd = 0; /* Set the PErr Repconse bit and SERR in PCI command register. */ pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd); cmd |= 0x140; pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd); pci_save_state(hldev->pdev); } /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset * in progress * This routine checks the vpath reset in progress register is turned zero */ static enum vxge_hw_status __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) { enum vxge_hw_status status; status = __vxge_hw_device_register_poll(vpath_rst_in_prog, VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff), VXGE_HW_DEF_DEVICE_POLL_MILLIS); return status; } /* * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. * Set the swapper bits appropriately for the lagacy section. */ static enum vxge_hw_status __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) { u64 val64; enum vxge_hw_status status = VXGE_HW_OK; val64 = readq(&legacy_reg->toc_swapper_fb); wmb(); switch (val64) { case VXGE_HW_SWAPPER_INITIAL_VALUE: return status; case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED: writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, &legacy_reg->pifm_rd_swap_en); writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, &legacy_reg->pifm_rd_flip_en); writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, &legacy_reg->pifm_wr_swap_en); writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, &legacy_reg->pifm_wr_flip_en); break; case VXGE_HW_SWAPPER_BYTE_SWAPPED: writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, &legacy_reg->pifm_rd_swap_en); writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, &legacy_reg->pifm_wr_swap_en); break; case VXGE_HW_SWAPPER_BIT_FLIPPED: writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, &legacy_reg->pifm_rd_flip_en); writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, &legacy_reg->pifm_wr_flip_en); break; } wmb(); val64 = readq(&legacy_reg->toc_swapper_fb); if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE) status = VXGE_HW_ERR_SWAPPER_CTRL; return status; } /* * __vxge_hw_device_toc_get * This routine sets the swapper and reads the toc pointer and returns the * memory mapped address of the toc */ static struct vxge_hw_toc_reg __iomem * __vxge_hw_device_toc_get(void __iomem *bar0) { u64 val64; struct vxge_hw_toc_reg __iomem *toc = NULL; enum vxge_hw_status status; struct vxge_hw_legacy_reg __iomem *legacy_reg = (struct vxge_hw_legacy_reg __iomem *)bar0; status = __vxge_hw_legacy_swapper_set(legacy_reg); if (status != VXGE_HW_OK) goto exit; val64 = readq(&legacy_reg->toc_first_pointer); toc = bar0 + val64; exit: return toc; } /* * __vxge_hw_device_reg_addr_get * This routine sets the swapper and reads the toc pointer and initializes the * register location pointers in the device object. It waits until the ric is * completed initializing registers. */ static enum vxge_hw_status __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev) { u64 val64; u32 i; enum vxge_hw_status status = VXGE_HW_OK; hldev->legacy_reg = hldev->bar0; hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0); if (hldev->toc_reg == NULL) { status = VXGE_HW_FAIL; goto exit; } val64 = readq(&hldev->toc_reg->toc_common_pointer); hldev->common_reg = hldev->bar0 + val64; val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer); hldev->mrpcim_reg = hldev->bar0 + val64; for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) { val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]); hldev->srpcim_reg[i] = hldev->bar0 + val64; } for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) { val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]); hldev->vpmgmt_reg[i] = hldev->bar0 + val64; } for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) { val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]); hldev->vpath_reg[i] = hldev->bar0 + val64; } val64 = readq(&hldev->toc_reg->toc_kdfc); switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) { case 0: hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ; break; default: break; } status = __vxge_hw_device_vpath_reset_in_prog_check( (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog); exit: return status; } /* * __vxge_hw_device_access_rights_get: Get Access Rights of the driver * This routine returns the Access Rights of the driver */ static u32 __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id) { u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH; switch (host_type) { case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION: if (func_id == 0) { access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; } break; case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION: access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; break; case VXGE_HW_NO_MR_SR_VH0_FUNCTION0: access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; break; case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION: case VXGE_HW_SR_VH_VIRTUAL_FUNCTION: case VXGE_HW_MR_SR_VH0_INVALID_CONFIG: break; case VXGE_HW_SR_VH_FUNCTION0: case VXGE_HW_VH_NORMAL_FUNCTION: access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; break; } return access_rights; } /* * __vxge_hw_device_is_privilaged * This routine checks if the device function is privilaged or not */ enum vxge_hw_status __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id) { if (__vxge_hw_device_access_rights_get(host_type, func_id) & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) return VXGE_HW_OK; else return VXGE_HW_ERR_PRIVILAGED_OPEARATION; } /* * __vxge_hw_vpath_func_id_get - Get the function id of the vpath. * Returns the function number of the vpath. */ static u32 __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg) { u64 val64; val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1); return (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64); } /* * __vxge_hw_device_host_info_get * This routine returns the host type assignments */ static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) { u64 val64; u32 i; val64 = readq(&hldev->common_reg->host_type_assignments); hldev->host_type = (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments); for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!(hldev->vpath_assignments & vxge_mBIT(i))) continue; hldev->func_id = __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]); hldev->access_rights = __vxge_hw_device_access_rights_get( hldev->host_type, hldev->func_id); hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN; hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i]; hldev->first_vp_id = i; break; } } /* * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as * link width and signalling rate. */ static enum vxge_hw_status __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev) { struct pci_dev *dev = hldev->pdev; u16 lnk; /* Get the negotiated link width and speed from PCI config space */ pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk); if ((lnk & PCI_EXP_LNKSTA_CLS) != 1) return VXGE_HW_ERR_INVALID_PCI_INFO; switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) { case PCIE_LNK_WIDTH_RESRV: case PCIE_LNK_X1: case PCIE_LNK_X2: case PCIE_LNK_X4: case PCIE_LNK_X8: break; default: return VXGE_HW_ERR_INVALID_PCI_INFO; } return VXGE_HW_OK; } /* * __vxge_hw_device_initialize * Initialize Titan-V hardware. */ static enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev) { enum vxge_hw_status status = VXGE_HW_OK; if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type, hldev->func_id)) { /* Validate the pci-e link width and speed */ status = __vxge_hw_verify_pci_e_info(hldev); if (status != VXGE_HW_OK) goto exit; } exit: return status; } /* * __vxge_hw_vpath_fw_ver_get - Get the fw version * Returns FW Version */ static enum vxge_hw_status __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath, struct vxge_hw_device_hw_info *hw_info) { struct vxge_hw_device_version *fw_version = &hw_info->fw_version; struct vxge_hw_device_date *fw_date = &hw_info->fw_date; struct vxge_hw_device_version *flash_version = &hw_info->flash_version; struct vxge_hw_device_date *flash_date = &hw_info->flash_date; u64 data0, data1 = 0, steer_ctrl = 0; enum vxge_hw_status status; status = vxge_hw_vpath_fw_api(vpath, VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, 0, &data0, &data1, &steer_ctrl); if (status != VXGE_HW_OK) goto exit; fw_date->day = (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0); fw_date->month = (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0); fw_date->year = (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0); snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", fw_date->month, fw_date->day, fw_date->year); fw_version->major = (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0); fw_version->minor = (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0); fw_version->build = (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0); snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", fw_version->major, fw_version->minor, fw_version->build); flash_date->day = (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1); flash_date->month = (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1); flash_date->year = (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1); snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", flash_date->month, flash_date->day, flash_date->year); flash_version->major = (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1); flash_version->minor = (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1); flash_version->build = (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1); snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", flash_version->major, flash_version->minor, flash_version->build); exit: return status; } /* * __vxge_hw_vpath_card_info_get - Get the serial numbers, * part number and product description. */ static enum vxge_hw_status __vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath, struct vxge_hw_device_hw_info *hw_info) { enum vxge_hw_status status; u64 data0, data1 = 0, steer_ctrl = 0; u8 *serial_number = hw_info->serial_number; u8 *part_number = hw_info->part_number; u8 *product_desc = hw_info->product_desc; u32 i, j = 0; data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER; status = vxge_hw_vpath_fw_api(vpath, VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, 0, &data0, &data1, &steer_ctrl); if (status != VXGE_HW_OK) return status; ((u64 *)serial_number)[0] = be64_to_cpu(data0); ((u64 *)serial_number)[1] = be64_to_cpu(data1); data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER; data1 = steer_ctrl = 0; status = vxge_hw_vpath_fw_api(vpath, VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, 0, &data0, &data1, &steer_ctrl); if (status != VXGE_HW_OK) return status; ((u64 *)part_number)[0] = be64_to_cpu(data0); ((u64 *)part_number)[1] = be64_to_cpu(data1); for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0; i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) { data0 = i; data1 = steer_ctrl = 0; status = vxge_hw_vpath_fw_api(vpath, VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, 0, &data0, &data1, &steer_ctrl); if (status != VXGE_HW_OK) return status; ((u64 *)product_desc)[j++] = be64_to_cpu(data0); ((u64 *)product_desc)[j++] = be64_to_cpu(data1); } return status; } /* * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode * Returns pci function mode */ static enum vxge_hw_status __vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath, struct vxge_hw_device_hw_info *hw_info) { u64 data0, data1 = 0, steer_ctrl = 0; enum vxge_hw_status status; data0 = 0; status = vxge_hw_vpath_fw_api(vpath, VXGE_HW_FW_API_GET_FUNC_MODE, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, 0, &data0, &data1, &steer_ctrl); if (status != VXGE_HW_OK) return status; hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0); return status; } /* * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath * from MAC address table. */ static enum vxge_hw_status __vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath, u8 *macaddr, u8 *macaddr_mask) { u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, data0 = 0, data1 = 0, steer_ctrl = 0; enum vxge_hw_status status; int i; do { status = vxge_hw_vpath_fw_api(vpath, action, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, 0, &data0, &data1, &steer_ctrl); if (status != VXGE_HW_OK) goto exit; data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0); data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK( data1); for (i = ETH_ALEN; i > 0; i--) { macaddr[i - 1] = (u8) (data0 & 0xFF); data0 >>= 8; macaddr_mask[i - 1] = (u8) (data1 & 0xFF); data1 >>= 8; } action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY; data0 = 0, data1 = 0, steer_ctrl = 0; } while (!is_valid_ether_addr(macaddr)); exit: return status; } /** * vxge_hw_device_hw_info_get - Get the hw information * Returns the vpath mask that has the bits set for each vpath allocated * for the driver, FW version information, and the first mac address for * each vpath */ enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(void __iomem *bar0, struct vxge_hw_device_hw_info *hw_info) { u32 i; u64 val64; struct vxge_hw_toc_reg __iomem *toc; struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; struct vxge_hw_common_reg __iomem *common_reg; struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; enum vxge_hw_status status; struct __vxge_hw_virtualpath vpath; memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info)); toc = __vxge_hw_device_toc_get(bar0); if (toc == NULL) { status = VXGE_HW_ERR_CRITICAL; goto exit; } val64 = readq(&toc->toc_common_pointer); common_reg = bar0 + val64; status = __vxge_hw_device_vpath_reset_in_prog_check( (u64 __iomem *)&common_reg->vpath_rst_in_prog); if (status != VXGE_HW_OK) goto exit; hw_info->vpath_mask = readq(&common_reg->vpath_assignments); val64 = readq(&common_reg->host_type_assignments); hw_info->host_type = (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!((hw_info->vpath_mask) & vxge_mBIT(i))) continue; val64 = readq(&toc->toc_vpmgmt_pointer[i]); vpmgmt_reg = bar0 + val64; hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg); if (__vxge_hw_device_access_rights_get(hw_info->host_type, hw_info->func_id) & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) { val64 = readq(&toc->toc_mrpcim_pointer); mrpcim_reg = bar0 + val64; writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask); wmb(); } val64 = readq(&toc->toc_vpath_pointer[i]); spin_lock_init(&vpath.lock); vpath.vp_reg = bar0 + val64; vpath.vp_open = VXGE_HW_VP_NOT_OPEN; status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info); if (status != VXGE_HW_OK) goto exit; status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info); if (status != VXGE_HW_OK) goto exit; status = __vxge_hw_vpath_card_info_get(&vpath, hw_info); if (status != VXGE_HW_OK) goto exit; break; } for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!((hw_info->vpath_mask) & vxge_mBIT(i))) continue; val64 = readq(&toc->toc_vpath_pointer[i]); vpath.vp_reg = bar0 + val64; vpath.vp_open = VXGE_HW_VP_NOT_OPEN; status = __vxge_hw_vpath_addr_get(&vpath, hw_info->mac_addrs[i], hw_info->mac_addr_masks[i]); if (status != VXGE_HW_OK) goto exit; } exit: return status; } /* * __vxge_hw_blockpool_destroy - Deallocates the block pool */ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) { struct __vxge_hw_device *hldev; struct list_head *p, *n; u16 ret; if (blockpool == NULL) { ret = 1; goto exit; } hldev = blockpool->hldev; list_for_each_safe(p, n, &blockpool->free_block_list) { pci_unmap_single(hldev->pdev, ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, ((struct __vxge_hw_blockpool_entry *)p)->length, PCI_DMA_BIDIRECTIONAL); vxge_os_dma_free(hldev->pdev, ((struct __vxge_hw_blockpool_entry *)p)->memblock, &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); kfree(p); blockpool->pool_size--; } list_for_each_safe(p, n, &blockpool->free_entry_list) { list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); kfree((void *)p); } ret = 0; exit: return; } /* * __vxge_hw_blockpool_create - Create block pool */ static enum vxge_hw_status __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, struct __vxge_hw_blockpool *blockpool, u32 pool_size, u32 pool_max) { u32 i; struct __vxge_hw_blockpool_entry *entry = NULL; void *memblock; dma_addr_t dma_addr; struct pci_dev *dma_handle; struct pci_dev *acc_handle; enum vxge_hw_status status = VXGE_HW_OK; if (blockpool == NULL) { status = VXGE_HW_FAIL; goto blockpool_create_exit; } blockpool->hldev = hldev; blockpool->block_size = VXGE_HW_BLOCK_SIZE; blockpool->pool_size = 0; blockpool->pool_max = pool_max; blockpool->req_out = 0; INIT_LIST_HEAD(&blockpool->free_block_list); INIT_LIST_HEAD(&blockpool->free_entry_list); for (i = 0; i < pool_size + pool_max; i++) { entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry), GFP_KERNEL); if (entry == NULL) { __vxge_hw_blockpool_destroy(blockpool); status = VXGE_HW_ERR_OUT_OF_MEMORY; goto blockpool_create_exit; } list_add(&entry->item, &blockpool->free_entry_list); } for (i = 0; i < pool_size; i++) { memblock = vxge_os_dma_malloc( hldev->pdev, VXGE_HW_BLOCK_SIZE, &dma_handle, &acc_handle); if (memblock == NULL) { __vxge_hw_blockpool_destroy(blockpool); status = VXGE_HW_ERR_OUT_OF_MEMORY; goto blockpool_create_exit; } dma_addr = pci_map_single(hldev->pdev, memblock, VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); if (unlikely(pci_dma_mapping_error(hldev->pdev, dma_addr))) { vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); __vxge_hw_blockpool_destroy(blockpool); status = VXGE_HW_ERR_OUT_OF_MEMORY; goto blockpool_create_exit; } if (!list_empty(&blockpool->free_entry_list)) entry = (struct __vxge_hw_blockpool_entry *) list_first_entry(&blockpool->free_entry_list, struct __vxge_hw_blockpool_entry, item); if (entry == NULL) entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry), GFP_KERNEL); if (entry != NULL) { list_del(&entry->item); entry->length = VXGE_HW_BLOCK_SIZE; entry->memblock = memblock; entry->dma_addr = dma_addr; entry->acc_handle = acc_handle; entry->dma_handle = dma_handle; list_add(&entry->item, &blockpool->free_block_list); blockpool->pool_size++; } else { __vxge_hw_blockpool_destroy(blockpool); status = VXGE_HW_ERR_OUT_OF_MEMORY; goto blockpool_create_exit; } } blockpool_create_exit: return status; } /* * __vxge_hw_device_fifo_config_check - Check fifo configuration. * Check the fifo configuration */ static enum vxge_hw_status __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) { if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) return VXGE_HW_BADCFG_FIFO_BLOCKS; return VXGE_HW_OK; } /* * __vxge_hw_device_vpath_config_check - Check vpath configuration. * Check the vpath configuration */ static enum vxge_hw_status __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) { enum vxge_hw_status status; if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX)) return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); if (status != VXGE_HW_OK) return status; if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) || (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU))) return VXGE_HW_BADCFG_VPATH_MTU; if ((vp_config->rpa_strip_vlan_tag != VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && (vp_config->rpa_strip_vlan_tag != VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) && (vp_config->rpa_strip_vlan_tag != VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE)) return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG; return VXGE_HW_OK; } /* * __vxge_hw_device_config_check - Check device configuration. * Check the device configuration */ static enum vxge_hw_status __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) { u32 i; enum vxge_hw_status status; if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) return VXGE_HW_BADCFG_INTR_MODE; if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) return VXGE_HW_BADCFG_RTS_MAC_EN; for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { status = __vxge_hw_device_vpath_config_check( &new_config->vp_config[i]); if (status != VXGE_HW_OK) return status; } return VXGE_HW_OK; } /* * vxge_hw_device_initialize - Initialize Titan device. * Initialize Titan device. Note that all the arguments of this public API * are 'IN', including @hldev. Driver cooperates with * OS to find new Titan device, locate its PCI and memory spaces. * * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW * to enable the latter to perform Titan hardware initialization. */ enum vxge_hw_status __devinit vxge_hw_device_initialize( struct __vxge_hw_device **devh, struct vxge_hw_device_attr *attr, struct vxge_hw_device_config *device_config) { u32 i; u32 nblocks = 0; struct __vxge_hw_device *hldev = NULL; enum vxge_hw_status status = VXGE_HW_OK; status = __vxge_hw_device_config_check(device_config); if (status != VXGE_HW_OK) goto exit; hldev = vzalloc(sizeof(struct __vxge_hw_device)); if (hldev == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } hldev->magic = VXGE_HW_DEVICE_MAGIC; vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); /* apply config */ memcpy(&hldev->config, device_config, sizeof(struct vxge_hw_device_config)); hldev->bar0 = attr->bar0; hldev->pdev = attr->pdev; hldev->uld_callbacks = attr->uld_callbacks; __vxge_hw_device_pci_e_init(hldev); status = __vxge_hw_device_reg_addr_get(hldev); if (status != VXGE_HW_OK) { vfree(hldev); goto exit; } __vxge_hw_device_host_info_get(hldev); /* Incrementing for stats blocks */ nblocks++; for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!(hldev->vpath_assignments & vxge_mBIT(i))) continue; if (device_config->vp_config[i].ring.enable == VXGE_HW_RING_ENABLE) nblocks += device_config->vp_config[i].ring.ring_blocks; if (device_config->vp_config[i].fifo.enable == VXGE_HW_FIFO_ENABLE) nblocks += device_config->vp_config[i].fifo.fifo_blocks; nblocks++; } if (__vxge_hw_blockpool_create(hldev, &hldev->block_pool, device_config->dma_blockpool_initial + nblocks, device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) { vxge_hw_device_terminate(hldev); status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } status = __vxge_hw_device_initialize(hldev); if (status != VXGE_HW_OK) { vxge_hw_device_terminate(hldev); goto exit; } *devh = hldev; exit: return status; } /* * vxge_hw_device_terminate - Terminate Titan device. * Terminate HW device. */ void vxge_hw_device_terminate(struct __vxge_hw_device *hldev) { vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC); hldev->magic = VXGE_HW_DEVICE_DEAD; __vxge_hw_blockpool_destroy(&hldev->block_pool); vfree(hldev); } /* * __vxge_hw_vpath_stats_access - Get the statistics from the given location * and offset and perform an operation */ static enum vxge_hw_status __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, u32 operation, u32 offset, u64 *stat) { u64 val64; enum vxge_hw_status status = VXGE_HW_OK; struct vxge_hw_vpath_reg __iomem *vp_reg; if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { status = VXGE_HW_ERR_VPATH_NOT_OPEN; goto vpath_stats_access_exit; } vp_reg = vpath->vp_reg; val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE | VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset); status = __vxge_hw_pio_mem_write64(val64, &vp_reg->xmac_stats_access_cmd, VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE, vpath->hldev->config.device_poll_millis); if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) *stat = readq(&vp_reg->xmac_stats_access_data); else *stat = 0; vpath_stats_access_exit: return status; } /* * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath */ static enum vxge_hw_status __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath, struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) { u64 *val64; int i; u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET; enum vxge_hw_status status = VXGE_HW_OK; val64 = (u64 *)vpath_tx_stats; if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { status = VXGE_HW_ERR_VPATH_NOT_OPEN; goto exit; } for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) { status = __vxge_hw_vpath_stats_access(vpath, VXGE_HW_STATS_OP_READ, offset, val64); if (status != VXGE_HW_OK) goto exit; offset++; val64++; } exit: return status; } /* * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath */ static enum vxge_hw_status __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) { u64 *val64; enum vxge_hw_status status = VXGE_HW_OK; int i; u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET; val64 = (u64 *) vpath_rx_stats; if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { status = VXGE_HW_ERR_VPATH_NOT_OPEN; goto exit; } for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) { status = __vxge_hw_vpath_stats_access(vpath, VXGE_HW_STATS_OP_READ, offset >> 3, val64); if (status != VXGE_HW_OK) goto exit; offset += 8; val64++; } exit: return status; } /* * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. */ static enum vxge_hw_status __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath, struct vxge_hw_vpath_stats_hw_info *hw_stats) { u64 val64; enum vxge_hw_status status = VXGE_HW_OK; struct vxge_hw_vpath_reg __iomem *vp_reg; if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { status = VXGE_HW_ERR_VPATH_NOT_OPEN; goto exit; } vp_reg = vpath->vp_reg; val64 = readq(&vp_reg->vpath_debug_stats0); hw_stats->ini_num_mwr_sent = (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64); val64 = readq(&vp_reg->vpath_debug_stats1); hw_stats->ini_num_mrd_sent = (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64); val64 = readq(&vp_reg->vpath_debug_stats2); hw_stats->ini_num_cpl_rcvd = (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64); val64 = readq(&vp_reg->vpath_debug_stats3); hw_stats->ini_num_mwr_byte_sent = VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64); val64 = readq(&vp_reg->vpath_debug_stats4); hw_stats->ini_num_cpl_byte_rcvd = VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64); val64 = readq(&vp_reg->vpath_debug_stats5); hw_stats->wrcrdtarb_xoff = (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64); val64 = readq(&vp_reg->vpath_debug_stats6); hw_stats->rdcrdtarb_xoff = (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64); val64 = readq(&vp_reg->vpath_genstats_count01); hw_stats->vpath_genstats_count0 = (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0( val64); val64 = readq(&vp_reg->vpath_genstats_count01); hw_stats->vpath_genstats_count1 = (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1( val64); val64 = readq(&vp_reg->vpath_genstats_count23); hw_stats->vpath_genstats_count2 = (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2( val64); val64 = readq(&vp_reg->vpath_genstats_count01); hw_stats->vpath_genstats_count3 = (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3( val64); val64 = readq(&vp_reg->vpath_genstats_count4); hw_stats->vpath_genstats_count4 = (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4( val64); val64 = readq(&vp_reg->vpath_genstats_count5); hw_stats->vpath_genstats_count5 = (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5( val64); status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats); if (status != VXGE_HW_OK) goto exit; status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats); if (status != VXGE_HW_OK) goto exit; VXGE_HW_VPATH_STATS_PIO_READ( VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET); hw_stats->prog_event_vnum0 = (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64); hw_stats->prog_event_vnum1 = (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64); VXGE_HW_VPATH_STATS_PIO_READ( VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET); hw_stats->prog_event_vnum2 = (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64); hw_stats->prog_event_vnum3 = (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64); val64 = readq(&vp_reg->rx_multi_cast_stats); hw_stats->rx_multi_cast_frame_discard = (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64); val64 = readq(&vp_reg->rx_frm_transferred); hw_stats->rx_frm_transferred = (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64); val64 = readq(&vp_reg->rxd_returned); hw_stats->rxd_returned = (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64); val64 = readq(&vp_reg->dbg_stats_rx_mpa); hw_stats->rx_mpa_len_fail_frms = (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64); hw_stats->rx_mpa_mrk_fail_frms = (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64); hw_stats->rx_mpa_crc_fail_frms = (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64); val64 = readq(&vp_reg->dbg_stats_rx_fau); hw_stats->rx_permitted_frms = (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64); hw_stats->rx_vp_reset_discarded_frms = (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64); hw_stats->rx_wol_frms = (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64); val64 = readq(&vp_reg->tx_vp_reset_discarded_frms); hw_stats->tx_vp_reset_discarded_frms = (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS( val64); exit: return status; } /* * vxge_hw_device_stats_get - Get the device hw statistics. * Returns the vpath h/w stats for the device. */ enum vxge_hw_status vxge_hw_device_stats_get(struct __vxge_hw_device *hldev, struct vxge_hw_device_stats_hw_info *hw_stats) { u32 i; enum vxge_hw_status status = VXGE_HW_OK; for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!(hldev->vpaths_deployed & vxge_mBIT(i)) || (hldev->virtual_paths[i].vp_open == VXGE_HW_VP_NOT_OPEN)) continue; memcpy(hldev->virtual_paths[i].hw_stats_sav, hldev->virtual_paths[i].hw_stats, sizeof(struct vxge_hw_vpath_stats_hw_info)); status = __vxge_hw_vpath_stats_get( &hldev->virtual_paths[i], hldev->virtual_paths[i].hw_stats); } memcpy(hw_stats, &hldev->stats.hw_dev_info_stats, sizeof(struct vxge_hw_device_stats_hw_info)); return status; } /* * vxge_hw_driver_stats_get - Get the device sw statistics. * Returns the vpath s/w stats for the device. */ enum vxge_hw_status vxge_hw_driver_stats_get( struct __vxge_hw_device *hldev, struct vxge_hw_device_stats_sw_info *sw_stats) { enum vxge_hw_status status = VXGE_HW_OK; memcpy(sw_stats, &hldev->stats.sw_dev_info_stats, sizeof(struct vxge_hw_device_stats_sw_info)); return status; } /* * vxge_hw_mrpcim_stats_access - Access the statistics from the given location * and offset and perform an operation * Get the statistics from the given location and offset. */ enum vxge_hw_status vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev, u32 operation, u32 location, u32 offset, u64 *stat) { u64 val64; enum vxge_hw_status status = VXGE_HW_OK; status = __vxge_hw_device_is_privilaged(hldev->host_type, hldev->func_id); if (status != VXGE_HW_OK) goto exit; val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) | VXGE_HW_XMAC_STATS_SYS_CMD_STROBE | VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) | VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset); status = __vxge_hw_pio_mem_write64(val64, &hldev->mrpcim_reg->xmac_stats_sys_cmd, VXGE_HW_XMAC_STATS_SYS_CMD_STROBE, hldev->config.device_poll_millis); if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data); else *stat = 0; exit: return status; } /* * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port * Get the Statistics on aggregate port */ static enum vxge_hw_status vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port, struct vxge_hw_xmac_aggr_stats *aggr_stats) { u64 *val64; int i; u32 offset = VXGE_HW_STATS_AGGRn_OFFSET; enum vxge_hw_status status = VXGE_HW_OK; val64 = (u64 *)aggr_stats; status = __vxge_hw_device_is_privilaged(hldev->host_type, hldev->func_id); if (status != VXGE_HW_OK) goto exit; for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) { status = vxge_hw_mrpcim_stats_access(hldev, VXGE_HW_STATS_OP_READ, VXGE_HW_STATS_LOC_AGGR, ((offset + (104 * port)) >> 3), val64); if (status != VXGE_HW_OK) goto exit; offset += 8; val64++; } exit: return status; } /* * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port * Get the Statistics on port */ static enum vxge_hw_status vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port, struct vxge_hw_xmac_port_stats *port_stats) { u64 *val64; enum vxge_hw_status status = VXGE_HW_OK; int i; u32 offset = 0x0; val64 = (u64 *) port_stats; status = __vxge_hw_device_is_privilaged(hldev->host_type, hldev->func_id); if (status != VXGE_HW_OK) goto exit; for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) { status = vxge_hw_mrpcim_stats_access(hldev, VXGE_HW_STATS_OP_READ, VXGE_HW_STATS_LOC_AGGR, ((offset + (608 * port)) >> 3), val64); if (status != VXGE_HW_OK) goto exit; offset += 8; val64++; } exit: return status; } /* * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics * Get the XMAC Statistics */ enum vxge_hw_status vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev, struct vxge_hw_xmac_stats *xmac_stats) { enum vxge_hw_status status = VXGE_HW_OK; u32 i; status = vxge_hw_device_xmac_aggr_stats_get(hldev, 0, &xmac_stats->aggr_stats[0]); if (status != VXGE_HW_OK) goto exit; status = vxge_hw_device_xmac_aggr_stats_get(hldev, 1, &xmac_stats->aggr_stats[1]); if (status != VXGE_HW_OK) goto exit; for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) { status = vxge_hw_device_xmac_port_stats_get(hldev, i, &xmac_stats->port_stats[i]); if (status != VXGE_HW_OK) goto exit; } for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!(hldev->vpaths_deployed & vxge_mBIT(i))) continue; status = __vxge_hw_vpath_xmac_tx_stats_get( &hldev->virtual_paths[i], &xmac_stats->vpath_tx_stats[i]); if (status != VXGE_HW_OK) goto exit; status = __vxge_hw_vpath_xmac_rx_stats_get( &hldev->virtual_paths[i], &xmac_stats->vpath_rx_stats[i]); if (status != VXGE_HW_OK) goto exit; } exit: return status; } /* * vxge_hw_device_debug_set - Set the debug module, level and timestamp * This routine is used to dynamically change the debug output */ void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev, enum vxge_debug_level level, u32 mask) { if (hldev == NULL) return; #if defined(VXGE_DEBUG_TRACE_MASK) || \ defined(VXGE_DEBUG_ERR_MASK) hldev->debug_module_mask = mask; hldev->debug_level = level; #endif #if defined(VXGE_DEBUG_ERR_MASK) hldev->level_err = level & VXGE_ERR; #endif #if defined(VXGE_DEBUG_TRACE_MASK) hldev->level_trace = level & VXGE_TRACE; #endif } /* * vxge_hw_device_error_level_get - Get the error level * This routine returns the current error level set */ u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev) { #if defined(VXGE_DEBUG_ERR_MASK) if (hldev == NULL) return VXGE_ERR; else return hldev->level_err; #else return 0; #endif } /* * vxge_hw_device_trace_level_get - Get the trace level * This routine returns the current trace level set */ u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev) { #if defined(VXGE_DEBUG_TRACE_MASK) if (hldev == NULL) return VXGE_TRACE; else return hldev->level_trace; #else return 0; #endif } /* * vxge_hw_getpause_data -Pause frame frame generation and reception. * Returns the Pause frame generation and reception capability of the NIC. */ enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev, u32 port, u32 *tx, u32 *rx) { u64 val64; enum vxge_hw_status status = VXGE_HW_OK; if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { status = VXGE_HW_ERR_INVALID_DEVICE; goto exit; } if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) { status = VXGE_HW_ERR_INVALID_PORT; goto exit; } if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; goto exit; } val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN) *tx = 1; if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN) *rx = 1; exit: return status; } /* * vxge_hw_device_setpause_data - set/reset pause frame generation. * It can be used to set or reset Pause frame generation or reception * support of the NIC. */ enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev, u32 port, u32 tx, u32 rx) { u64 val64; enum vxge_hw_status status = VXGE_HW_OK; if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { status = VXGE_HW_ERR_INVALID_DEVICE; goto exit; } if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) { status = VXGE_HW_ERR_INVALID_PORT; goto exit; } status = __vxge_hw_device_is_privilaged(hldev->host_type, hldev->func_id); if (status != VXGE_HW_OK) goto exit; val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); if (tx) val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN; else val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN; if (rx) val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN; else val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN; writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); exit: return status; } u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev) { struct pci_dev *dev = hldev->pdev; u16 lnk; pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk); return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4; } /* * __vxge_hw_ring_block_memblock_idx - Return the memblock index * This function returns the index of memory block */ static inline u32 __vxge_hw_ring_block_memblock_idx(u8 *block) { return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)); } /* * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index * This function sets index to a memory block */ static inline void __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx) { *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx; } /* * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer * in RxD block * Sets the next block pointer in RxD block */ static inline void __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next) { *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next; } /* * __vxge_hw_ring_first_block_address_get - Returns the dma address of the * first block * Returns the dma address of the first RxD block */ static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring) { struct vxge_hw_mempool_dma *dma_object; dma_object = ring->mempool->memblocks_dma_arr; vxge_assert(dma_object != NULL); return dma_object->addr; } /* * __vxge_hw_ring_item_dma_addr - Return the dma address of an item * This function returns the dma address of a given item */ static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh, void *item) { u32 memblock_idx; void *memblock; struct vxge_hw_mempool_dma *memblock_dma_object; ptrdiff_t dma_item_offset; /* get owner memblock index */ memblock_idx = __vxge_hw_ring_block_memblock_idx(item); /* get owner memblock by memblock index */ memblock = mempoolh->memblocks_arr[memblock_idx]; /* get memblock DMA object by memblock index */ memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx; /* calculate offset in the memblock of this item */ dma_item_offset = (u8 *)item - (u8 *)memblock; return memblock_dma_object->addr + dma_item_offset; } /* * __vxge_hw_ring_rxdblock_link - Link the RxD blocks * This function returns the dma address of a given item */ static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh, struct __vxge_hw_ring *ring, u32 from, u32 to) { u8 *to_item , *from_item; dma_addr_t to_dma; /* get "from" RxD block */ from_item = mempoolh->items_arr[from]; vxge_assert(from_item); /* get "to" RxD block */ to_item = mempoolh->items_arr[to]; vxge_assert(to_item); /* return address of the beginning of previous RxD block */ to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item); /* set next pointer for this RxD block to point on * previous item's DMA start address */ __vxge_hw_ring_block_next_pointer_set(from_item, to_dma); } /* * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD * block callback * This function is callback passed to __vxge_hw_mempool_create to create memory * pool for RxD block */ static void __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh, u32 memblock_index, struct vxge_hw_mempool_dma *dma_object, u32 index, u32 is_last) { u32 i; void *item = mempoolh->items_arr[index]; struct __vxge_hw_ring *ring = (struct __vxge_hw_ring *)mempoolh->userdata; /* format rxds array */ for (i = 0; i < ring->rxds_per_block; i++) { void *rxdblock_priv; void *uld_priv; struct vxge_hw_ring_rxd_1 *rxdp; u32 reserve_index = ring->channel.reserve_ptr - (index * ring->rxds_per_block + i + 1); u32 memblock_item_idx; ring->channel.reserve_arr[reserve_index] = ((u8 *)item) + i * ring->rxd_size; /* Note: memblock_item_idx is index of the item within * the memblock. For instance, in case of three RxD-blocks * per memblock this value can be 0, 1 or 2. */ rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh, memblock_index, item, &memblock_item_idx); rxdp = ring->channel.reserve_arr[reserve_index]; uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i); /* pre-format Host_Control */ rxdp->host_control = (u64)(size_t)uld_priv; } __vxge_hw_ring_block_memblock_idx_set(item, memblock_index); if (is_last) { /* link last one with first one */ __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0); } if (index > 0) { /* link this RxD block with previous one */ __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index); } } /* * __vxge_hw_ring_replenish - Initial replenish of RxDs * This function replenishes the RxDs from reserve array to work array */ enum vxge_hw_status vxge_hw_ring_replenish(struct __vxge_hw_ring *ring) { void *rxd; struct __vxge_hw_channel *channel; enum vxge_hw_status status = VXGE_HW_OK; channel = &ring->channel; while (vxge_hw_channel_dtr_count(channel) > 0) { status = vxge_hw_ring_rxd_reserve(ring, &rxd); vxge_assert(status == VXGE_HW_OK); if (ring->rxd_init) { status = ring->rxd_init(rxd, channel->userdata); if (status != VXGE_HW_OK) { vxge_hw_ring_rxd_free(ring, rxd); goto exit; } } vxge_hw_ring_rxd_post(ring, rxd); } status = VXGE_HW_OK; exit: return status; } /* * __vxge_hw_channel_allocate - Allocate memory for channel * This function allocates required memory for the channel and various arrays * in the channel */ static struct __vxge_hw_channel * __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, enum __vxge_hw_channel_type type, u32 length, u32 per_dtr_space, void *userdata) { struct __vxge_hw_channel *channel; struct __vxge_hw_device *hldev; int size = 0; u32 vp_id; hldev = vph->vpath->hldev; vp_id = vph->vpath->vp_id; switch (type) { case VXGE_HW_CHANNEL_TYPE_FIFO: size = sizeof(struct __vxge_hw_fifo); break; case VXGE_HW_CHANNEL_TYPE_RING: size = sizeof(struct __vxge_hw_ring); break; default: break; } channel = kzalloc(size, GFP_KERNEL); if (channel == NULL) goto exit0; INIT_LIST_HEAD(&channel->item); channel->common_reg = hldev->common_reg; channel->first_vp_id = hldev->first_vp_id; channel->type = type; channel->devh = hldev; channel->vph = vph; channel->userdata = userdata; channel->per_dtr_space = per_dtr_space; channel->length = length; channel->vp_id = vp_id; channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); if (channel->work_arr == NULL) goto exit1; channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); if (channel->free_arr == NULL) goto exit1; channel->free_ptr = length; channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); if (channel->reserve_arr == NULL) goto exit1; channel->reserve_ptr = length; channel->reserve_top = 0; channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); if (channel->orig_arr == NULL) goto exit1; return channel; exit1: __vxge_hw_channel_free(channel); exit0: return NULL; } /* * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async * Adds a block to block pool */ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, void *block_addr, u32 length, struct pci_dev *dma_h, struct pci_dev *acc_handle) { struct __vxge_hw_blockpool *blockpool; struct __vxge_hw_blockpool_entry *entry = NULL; dma_addr_t dma_addr; enum vxge_hw_status status = VXGE_HW_OK; u32 req_out; blockpool = &devh->block_pool; if (block_addr == NULL) { blockpool->req_out--; status = VXGE_HW_FAIL; goto exit; } dma_addr = pci_map_single(devh->pdev, block_addr, length, PCI_DMA_BIDIRECTIONAL); if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); blockpool->req_out--; status = VXGE_HW_FAIL; goto exit; } if (!list_empty(&blockpool->free_entry_list)) entry = (struct __vxge_hw_blockpool_entry *) list_first_entry(&blockpool->free_entry_list, struct __vxge_hw_blockpool_entry, item); if (entry == NULL) entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry)); else list_del(&entry->item); if (entry != NULL) { entry->length = length; entry->memblock = block_addr; entry->dma_addr = dma_addr; entry->acc_handle = acc_handle; entry->dma_handle = dma_h; list_add(&entry->item, &blockpool->free_block_list); blockpool->pool_size++; status = VXGE_HW_OK; } else status = VXGE_HW_ERR_OUT_OF_MEMORY; blockpool->req_out--; req_out = blockpool->req_out; exit: return; } static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size) { gfp_t flags; void *vaddr; if (in_interrupt()) flags = GFP_ATOMIC | GFP_DMA; else flags = GFP_KERNEL | GFP_DMA; vaddr = kmalloc((size), flags); vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev); } /* * __vxge_hw_blockpool_blocks_add - Request additional blocks */ static void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool) { u32 nreq = 0, i; if ((blockpool->pool_size + blockpool->req_out) < VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) { nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; blockpool->req_out += nreq; } for (i = 0; i < nreq; i++) vxge_os_dma_malloc_async( ((struct __vxge_hw_device *)blockpool->hldev)->pdev, blockpool->hldev, VXGE_HW_BLOCK_SIZE); } /* * __vxge_hw_blockpool_malloc - Allocate a memory block from pool * Allocates a block of memory of given size, either from block pool * or by calling vxge_os_dma_malloc() */ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, struct vxge_hw_mempool_dma *dma_object) { struct __vxge_hw_blockpool_entry *entry = NULL; struct __vxge_hw_blockpool *blockpool; void *memblock = NULL; enum vxge_hw_status status = VXGE_HW_OK; blockpool = &devh->block_pool; if (size != blockpool->block_size) { memblock = vxge_os_dma_malloc(devh->pdev, size, &dma_object->handle, &dma_object->acc_handle); if (memblock == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } dma_object->addr = pci_map_single(devh->pdev, memblock, size, PCI_DMA_BIDIRECTIONAL); if (unlikely(pci_dma_mapping_error(devh->pdev, dma_object->addr))) { vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } } else { if (!list_empty(&blockpool->free_block_list)) entry = (struct __vxge_hw_blockpool_entry *) list_first_entry(&blockpool->free_block_list, struct __vxge_hw_blockpool_entry, item); if (entry != NULL) { list_del(&entry->item); dma_object->addr = entry->dma_addr; dma_object->handle = entry->dma_handle; dma_object->acc_handle = entry->acc_handle; memblock = entry->memblock; list_add(&entry->item, &blockpool->free_entry_list); blockpool->pool_size--; } if (memblock != NULL) __vxge_hw_blockpool_blocks_add(blockpool); } exit: return memblock; } /* * __vxge_hw_blockpool_blocks_remove - Free additional blocks */ static void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) { struct list_head *p, *n; list_for_each_safe(p, n, &blockpool->free_block_list) { if (blockpool->pool_size < blockpool->pool_max) break; pci_unmap_single( ((struct __vxge_hw_device *)blockpool->hldev)->pdev, ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, ((struct __vxge_hw_blockpool_entry *)p)->length, PCI_DMA_BIDIRECTIONAL); vxge_os_dma_free( ((struct __vxge_hw_device *)blockpool->hldev)->pdev, ((struct __vxge_hw_blockpool_entry *)p)->memblock, &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); list_add(p, &blockpool->free_entry_list); blockpool->pool_size--; } } /* * __vxge_hw_blockpool_free - Frees the memory allcoated with * __vxge_hw_blockpool_malloc */ static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh, void *memblock, u32 size, struct vxge_hw_mempool_dma *dma_object) { struct __vxge_hw_blockpool_entry *entry = NULL; struct __vxge_hw_blockpool *blockpool; enum vxge_hw_status status = VXGE_HW_OK; blockpool = &devh->block_pool; if (size != blockpool->block_size) { pci_unmap_single(devh->pdev, dma_object->addr, size, PCI_DMA_BIDIRECTIONAL); vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); } else { if (!list_empty(&blockpool->free_entry_list)) entry = (struct __vxge_hw_blockpool_entry *) list_first_entry(&blockpool->free_entry_list, struct __vxge_hw_blockpool_entry, item); if (entry == NULL) entry = vmalloc(sizeof( struct __vxge_hw_blockpool_entry)); else list_del(&entry->item); if (entry != NULL) { entry->length = size; entry->memblock = memblock; entry->dma_addr = dma_object->addr; entry->acc_handle = dma_object->acc_handle; entry->dma_handle = dma_object->handle; list_add(&entry->item, &blockpool->free_block_list); blockpool->pool_size++; status = VXGE_HW_OK; } else status = VXGE_HW_ERR_OUT_OF_MEMORY; if (status == VXGE_HW_OK) __vxge_hw_blockpool_blocks_remove(blockpool); } } /* * vxge_hw_mempool_destroy */ static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) { u32 i, j; struct __vxge_hw_device *devh = mempool->devh; for (i = 0; i < mempool->memblocks_allocated; i++) { struct vxge_hw_mempool_dma *dma_object; vxge_assert(mempool->memblocks_arr[i]); vxge_assert(mempool->memblocks_dma_arr + i); dma_object = mempool->memblocks_dma_arr + i; for (j = 0; j < mempool->items_per_memblock; j++) { u32 index = i * mempool->items_per_memblock + j; /* to skip last partially filled(if any) memblock */ if (index >= mempool->items_current) break; } vfree(mempool->memblocks_priv_arr[i]); __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], mempool->memblock_size, dma_object); } vfree(mempool->items_arr); vfree(mempool->memblocks_dma_arr); vfree(mempool->memblocks_priv_arr); vfree(mempool->memblocks_arr); vfree(mempool); } /* * __vxge_hw_mempool_grow * Will resize mempool up to %num_allocate value. */ static enum vxge_hw_status __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, u32 *num_allocated) { u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0; u32 n_items = mempool->items_per_memblock; u32 start_block_idx = mempool->memblocks_allocated; u32 end_block_idx = mempool->memblocks_allocated + num_allocate; enum vxge_hw_status status = VXGE_HW_OK; *num_allocated = 0; if (end_block_idx > mempool->memblocks_max) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } for (i = start_block_idx; i < end_block_idx; i++) { u32 j; u32 is_last = ((end_block_idx - 1) == i); struct vxge_hw_mempool_dma *dma_object = mempool->memblocks_dma_arr + i; void *the_memblock; /* allocate memblock's private part. Each DMA memblock * has a space allocated for item's private usage upon * mempool's user request. Each time mempool grows, it will * allocate new memblock and its private part at once. * This helps to minimize memory usage a lot. */ mempool->memblocks_priv_arr[i] = vzalloc(mempool->items_priv_size * n_items); if (mempool->memblocks_priv_arr[i] == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } /* allocate DMA-capable memblock */ mempool->memblocks_arr[i] = __vxge_hw_blockpool_malloc(mempool->devh, mempool->memblock_size, dma_object); if (mempool->memblocks_arr[i] == NULL) { vfree(mempool->memblocks_priv_arr[i]); status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } (*num_allocated)++; mempool->memblocks_allocated++; memset(mempool->memblocks_arr[i], 0, mempool->memblock_size); the_memblock = mempool->memblocks_arr[i]; /* fill the items hash array */ for (j = 0; j < n_items; j++) { u32 index = i * n_items + j; if (first_time && index >= mempool->items_initial) break; mempool->items_arr[index] = ((char *)the_memblock + j*mempool->item_size); /* let caller to do more job on each item */ if (mempool->item_func_alloc != NULL) mempool->item_func_alloc(mempool, i, dma_object, index, is_last); mempool->items_current = index + 1; } if (first_time && mempool->items_current == mempool->items_initial) break; } exit: return status; } /* * vxge_hw_mempool_create * This function will create memory pool object. Pool may grow but will * never shrink. Pool consists of number of dynamically allocated blocks * with size enough to hold %items_initial number of items. Memory is * DMA-able but client must map/unmap before interoperating with the device. */ static struct vxge_hw_mempool * __vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size, u32 item_size, u32 items_priv_size, u32 items_initial, u32 items_max, const struct vxge_hw_mempool_cbs *mp_callback, void *userdata) { enum vxge_hw_status status = VXGE_HW_OK; u32 memblocks_to_allocate; struct vxge_hw_mempool *mempool = NULL; u32 allocated; if (memblock_size < item_size) { status = VXGE_HW_FAIL; goto exit; } mempool = vzalloc(sizeof(struct vxge_hw_mempool)); if (mempool == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } mempool->devh = devh; mempool->memblock_size = memblock_size; mempool->items_max = items_max; mempool->items_initial = items_initial; mempool->item_size = item_size; mempool->items_priv_size = items_priv_size; mempool->item_func_alloc = mp_callback->item_func_alloc; mempool->userdata = userdata; mempool->memblocks_allocated = 0; mempool->items_per_memblock = memblock_size / item_size; mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) / mempool->items_per_memblock; /* allocate array of memblocks */ mempool->memblocks_arr = vzalloc(sizeof(void *) * mempool->memblocks_max); if (mempool->memblocks_arr == NULL) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; mempool = NULL; goto exit; } /* allocate array of private parts of items per memblocks */ mempool->memblocks_priv_arr = vzalloc(sizeof(void *) * mempool->memblocks_max); if (mempool->memblocks_priv_arr == NULL) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; mempool = NULL; goto exit; } /* allocate array of memblocks DMA objects */ mempool->memblocks_dma_arr = vzalloc(sizeof(struct vxge_hw_mempool_dma) * mempool->memblocks_max); if (mempool->memblocks_dma_arr == NULL) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; mempool = NULL; goto exit; } /* allocate hash array of items */ mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max); if (mempool->items_arr == NULL) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; mempool = NULL; goto exit; } /* calculate initial number of memblocks */ memblocks_to_allocate = (mempool->items_initial + mempool->items_per_memblock - 1) / mempool->items_per_memblock; /* pre-allocate the mempool */ status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate, &allocated); if (status != VXGE_HW_OK) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; mempool = NULL; goto exit; } exit: return mempool; } /* * __vxge_hw_ring_abort - Returns the RxD * This function terminates the RxDs of ring */ static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) { void *rxdh; struct __vxge_hw_channel *channel; channel = &ring->channel; for (;;) { vxge_hw_channel_dtr_try_complete(channel, &rxdh); if (rxdh == NULL) break; vxge_hw_channel_dtr_complete(channel); if (ring->rxd_term) ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, channel->userdata); vxge_hw_channel_dtr_free(channel, rxdh); } return VXGE_HW_OK; } /* * __vxge_hw_ring_reset - Resets the ring * This function resets the ring during vpath reset operation */ static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) { enum vxge_hw_status status = VXGE_HW_OK; struct __vxge_hw_channel *channel; channel = &ring->channel; __vxge_hw_ring_abort(ring); status = __vxge_hw_channel_reset(channel); if (status != VXGE_HW_OK) goto exit; if (ring->rxd_init) { status = vxge_hw_ring_replenish(ring); if (status != VXGE_HW_OK) goto exit; } exit: return status; } /* * __vxge_hw_ring_delete - Removes the ring * This function freeup the memory pool and removes the ring */ static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) { struct __vxge_hw_ring *ring = vp->vpath->ringh; __vxge_hw_ring_abort(ring); if (ring->mempool) __vxge_hw_mempool_destroy(ring->mempool); vp->vpath->ringh = NULL; __vxge_hw_channel_free(&ring->channel); return VXGE_HW_OK; } /* * __vxge_hw_ring_create - Create a Ring * This function creates Ring and initializes it. */ static enum vxge_hw_status __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, struct vxge_hw_ring_attr *attr) { enum vxge_hw_status status = VXGE_HW_OK; struct __vxge_hw_ring *ring; u32 ring_length; struct vxge_hw_ring_config *config; struct __vxge_hw_device *hldev; u32 vp_id; static const struct vxge_hw_mempool_cbs ring_mp_callback = { .item_func_alloc = __vxge_hw_ring_mempool_item_alloc, }; if ((vp == NULL) || (attr == NULL)) { status = VXGE_HW_FAIL; goto exit; } hldev = vp->vpath->hldev; vp_id = vp->vpath->vp_id; config = &hldev->config.vp_config[vp_id].ring; ring_length = config->ring_blocks * vxge_hw_ring_rxds_per_block_get(config->buffer_mode); ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, VXGE_HW_CHANNEL_TYPE_RING, ring_length, attr->per_rxd_space, attr->userdata); if (ring == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } vp->vpath->ringh = ring; ring->vp_id = vp_id; ring->vp_reg = vp->vpath->vp_reg; ring->common_reg = hldev->common_reg; ring->stats = &vp->vpath->sw_stats->ring_stats; ring->config = config; ring->callback = attr->callback; ring->rxd_init = attr->rxd_init; ring->rxd_term = attr->rxd_term; ring->buffer_mode = config->buffer_mode; ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved; ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved; ring->rxds_limit = config->rxds_limit; ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); ring->rxd_priv_size = sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; ring->per_rxd_space = attr->per_rxd_space; ring->rxd_priv_size = ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; /* how many RxDs can fit into one block. Depends on configured * buffer_mode. */ ring->rxds_per_block = vxge_hw_ring_rxds_per_block_get(config->buffer_mode); /* calculate actual RxD block private size */ ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; ring->mempool = __vxge_hw_mempool_create(hldev, VXGE_HW_BLOCK_SIZE, VXGE_HW_BLOCK_SIZE, ring->rxdblock_priv_size, ring->config->ring_blocks, ring->config->ring_blocks, &ring_mp_callback, ring); if (ring->mempool == NULL) { __vxge_hw_ring_delete(vp); return VXGE_HW_ERR_OUT_OF_MEMORY; } status = __vxge_hw_channel_initialize(&ring->channel); if (status != VXGE_HW_OK) { __vxge_hw_ring_delete(vp); goto exit; } /* Note: * Specifying rxd_init callback means two things: * 1) rxds need to be initialized by driver at channel-open time; * 2) rxds need to be posted at channel-open time * (that's what the initial_replenish() below does) * Currently we don't have a case when the 1) is done without the 2). */ if (ring->rxd_init) { status = vxge_hw_ring_replenish(ring); if (status != VXGE_HW_OK) { __vxge_hw_ring_delete(vp); goto exit; } } /* initial replenish will increment the counter in its post() routine, * we have to reset it */ ring->stats->common_stats.usage_cnt = 0; exit: return status; } /* * vxge_hw_device_config_default_get - Initialize device config with defaults. * Initialize Titan device config with default values. */ enum vxge_hw_status __devinit vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config) { u32 i; device_config->dma_blockpool_initial = VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE; device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE; device_config->intr_mode = VXGE_HW_INTR_MODE_DEF; device_config->rth_en = VXGE_HW_RTH_DEFAULT; device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT; device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS; device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { device_config->vp_config[i].vp_id = i; device_config->vp_config[i].min_bandwidth = VXGE_HW_VPATH_BANDWIDTH_DEFAULT; device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT; device_config->vp_config[i].ring.ring_blocks = VXGE_HW_DEF_RING_BLOCKS; device_config->vp_config[i].ring.buffer_mode = VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT; device_config->vp_config[i].ring.scatter_mode = VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT; device_config->vp_config[i].ring.rxds_limit = VXGE_HW_DEF_RING_RXDS_LIMIT; device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE; device_config->vp_config[i].fifo.fifo_blocks = VXGE_HW_MIN_FIFO_BLOCKS; device_config->vp_config[i].fifo.max_frags = VXGE_HW_MAX_FIFO_FRAGS; device_config->vp_config[i].fifo.memblock_size = VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE; device_config->vp_config[i].fifo.alignment_size = VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE; device_config->vp_config[i].fifo.intr = VXGE_HW_FIFO_QUEUE_INTR_DEFAULT; device_config->vp_config[i].fifo.no_snoop_bits = VXGE_HW_FIFO_NO_SNOOP_DEFAULT; device_config->vp_config[i].tti.intr_enable = VXGE_HW_TIM_INTR_DEFAULT; device_config->vp_config[i].tti.btimer_val = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].tti.timer_ac_en = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].tti.timer_ci_en = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].tti.timer_ri_en = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].tti.rtimer_val = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].tti.util_sel = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].tti.ltimer_val = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].tti.urange_a = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].tti.uec_a = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].tti.urange_b = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].tti.uec_b = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].tti.urange_c = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].tti.uec_c = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].tti.uec_d = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].rti.intr_enable = VXGE_HW_TIM_INTR_DEFAULT; device_config->vp_config[i].rti.btimer_val = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].rti.timer_ac_en = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].rti.timer_ci_en = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].rti.timer_ri_en = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].rti.rtimer_val = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].rti.util_sel = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].rti.ltimer_val = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].rti.urange_a = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].rti.uec_a = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].rti.urange_b = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].rti.uec_b = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].rti.urange_c = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].rti.uec_c = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].rti.uec_d = VXGE_HW_USE_FLASH_DEFAULT; device_config->vp_config[i].mtu = VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU; device_config->vp_config[i].rpa_strip_vlan_tag = VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT; } return VXGE_HW_OK; } /* * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. * Set the swapper bits appropriately for the vpath. */ static enum vxge_hw_status __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) { #ifndef __BIG_ENDIAN u64 val64; val64 = readq(&vpath_reg->vpath_general_cfg1); wmb(); val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN; writeq(val64, &vpath_reg->vpath_general_cfg1); wmb(); #endif return VXGE_HW_OK; } /* * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc. * Set the swapper bits appropriately for the vpath. */ static enum vxge_hw_status __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg, struct vxge_hw_vpath_reg __iomem *vpath_reg) { u64 val64; val64 = readq(&legacy_reg->pifm_wr_swap_en); if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) { val64 = readq(&vpath_reg->kdfcctl_cfg0); wmb(); val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 | VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 | VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2; writeq(val64, &vpath_reg->kdfcctl_cfg0); wmb(); } return VXGE_HW_OK; } /* * vxge_hw_mgmt_reg_read - Read Titan register. */ enum vxge_hw_status vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev, enum vxge_hw_mgmt_reg_type type, u32 index, u32 offset, u64 *value) { enum vxge_hw_status status = VXGE_HW_OK; if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { status = VXGE_HW_ERR_INVALID_DEVICE; goto exit; } switch (type) { case vxge_hw_mgmt_reg_type_legacy: if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) { status = VXGE_HW_ERR_INVALID_OFFSET; break; } *value = readq((void __iomem *)hldev->legacy_reg + offset); break; case vxge_hw_mgmt_reg_type_toc: if (offset > sizeof(struct vxge_hw_toc_reg) - 8) { status = VXGE_HW_ERR_INVALID_OFFSET; break; } *value = readq((void __iomem *)hldev->toc_reg + offset); break; case vxge_hw_mgmt_reg_type_common: if (offset > sizeof(struct vxge_hw_common_reg) - 8) { status = VXGE_HW_ERR_INVALID_OFFSET; break; } *value = readq((void __iomem *)hldev->common_reg + offset); break; case vxge_hw_mgmt_reg_type_mrpcim: if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; break; } if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { status = VXGE_HW_ERR_INVALID_OFFSET; break; } *value = readq((void __iomem *)hldev->mrpcim_reg + offset); break; case vxge_hw_mgmt_reg_type_srpcim: if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; break; } if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { status = VXGE_HW_ERR_INVALID_INDEX; break; } if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) { status = VXGE_HW_ERR_INVALID_OFFSET; break; } *value = readq((void __iomem *)hldev->srpcim_reg[index] + offset); break; case vxge_hw_mgmt_reg_type_vpmgmt: if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) || (!(hldev->vpath_assignments & vxge_mBIT(index)))) { status = VXGE_HW_ERR_INVALID_INDEX; break; } if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) { status = VXGE_HW_ERR_INVALID_OFFSET; break; } *value = readq((void __iomem *)hldev->vpmgmt_reg[index] + offset); break; case vxge_hw_mgmt_reg_type_vpath: if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) || (!(hldev->vpath_assignments & vxge_mBIT(index)))) { status = VXGE_HW_ERR_INVALID_INDEX; break; } if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) { status = VXGE_HW_ERR_INVALID_INDEX; break; } if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) { status = VXGE_HW_ERR_INVALID_OFFSET; break; } *value = readq((void __iomem *)hldev->vpath_reg[index] + offset); break; default: status = VXGE_HW_ERR_INVALID_TYPE; break; } exit: return status; } /* * vxge_hw_vpath_strip_fcs_check - Check for FCS strip. */ enum vxge_hw_status vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask) { struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; enum vxge_hw_status status = VXGE_HW_OK; int i = 0, j = 0; for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!((vpath_mask) & vxge_mBIT(i))) continue; vpmgmt_reg = hldev->vpmgmt_reg[i]; for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) { if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j]) & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS) return VXGE_HW_FAIL; } } return status; } /* * vxge_hw_mgmt_reg_Write - Write Titan register. */ enum vxge_hw_status vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev, enum vxge_hw_mgmt_reg_type type, u32 index, u32 offset, u64 value) { enum vxge_hw_status status = VXGE_HW_OK; if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { status = VXGE_HW_ERR_INVALID_DEVICE; goto exit; } switch (type) { case vxge_hw_mgmt_reg_type_legacy: if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) { status = VXGE_HW_ERR_INVALID_OFFSET; break; } writeq(value, (void __iomem *)hldev->legacy_reg + offset); break; case vxge_hw_mgmt_reg_type_toc: if (offset > sizeof(struct vxge_hw_toc_reg) - 8) { status = VXGE_HW_ERR_INVALID_OFFSET; break; } writeq(value, (void __iomem *)hldev->toc_reg + offset); break; case vxge_hw_mgmt_reg_type_common: if (offset > sizeof(struct vxge_hw_common_reg) - 8) { status = VXGE_HW_ERR_INVALID_OFFSET; break; } writeq(value, (void __iomem *)hldev->common_reg + offset); break; case vxge_hw_mgmt_reg_type_mrpcim: if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; break; } if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { status = VXGE_HW_ERR_INVALID_OFFSET; break; } writeq(value, (void __iomem *)hldev->mrpcim_reg + offset); break; case vxge_hw_mgmt_reg_type_srpcim: if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; break; } if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { status = VXGE_HW_ERR_INVALID_INDEX; break; } if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) { status = VXGE_HW_ERR_INVALID_OFFSET; break; } writeq(value, (void __iomem *)hldev->srpcim_reg[index] + offset); break; case vxge_hw_mgmt_reg_type_vpmgmt: if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) || (!(hldev->vpath_assignments & vxge_mBIT(index)))) { status = VXGE_HW_ERR_INVALID_INDEX; break; } if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) { status = VXGE_HW_ERR_INVALID_OFFSET; break; } writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] + offset); break; case vxge_hw_mgmt_reg_type_vpath: if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) || (!(hldev->vpath_assignments & vxge_mBIT(index)))) { status = VXGE_HW_ERR_INVALID_INDEX; break; } if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) { status = VXGE_HW_ERR_INVALID_OFFSET; break; } writeq(value, (void __iomem *)hldev->vpath_reg[index] + offset); break; default: status = VXGE_HW_ERR_INVALID_TYPE; break; } exit: return status; } /* * __vxge_hw_fifo_abort - Returns the TxD * This function terminates the TxDs of fifo */ static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) { void *txdlh; for (;;) { vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh); if (txdlh == NULL) break; vxge_hw_channel_dtr_complete(&fifo->channel); if (fifo->txdl_term) { fifo->txdl_term(txdlh, VXGE_HW_TXDL_STATE_POSTED, fifo->channel.userdata); } vxge_hw_channel_dtr_free(&fifo->channel, txdlh); } return VXGE_HW_OK; } /* * __vxge_hw_fifo_reset - Resets the fifo * This function resets the fifo during vpath reset operation */ static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) { enum vxge_hw_status status = VXGE_HW_OK; __vxge_hw_fifo_abort(fifo); status = __vxge_hw_channel_reset(&fifo->channel); return status; } /* * __vxge_hw_fifo_delete - Removes the FIFO * This function freeup the memory pool and removes the FIFO */ static enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) { struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; __vxge_hw_fifo_abort(fifo); if (fifo->mempool) __vxge_hw_mempool_destroy(fifo->mempool); vp->vpath->fifoh = NULL; __vxge_hw_channel_free(&fifo->channel); return VXGE_HW_OK; } /* * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD * list callback * This function is callback passed to __vxge_hw_mempool_create to create memory * pool for TxD list */ static void __vxge_hw_fifo_mempool_item_alloc( struct vxge_hw_mempool *mempoolh, u32 memblock_index, struct vxge_hw_mempool_dma *dma_object, u32 index, u32 is_last) { u32 memblock_item_idx; struct __vxge_hw_fifo_txdl_priv *txdl_priv; struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index]; struct __vxge_hw_fifo *fifo = (struct __vxge_hw_fifo *)mempoolh->userdata; void *memblock = mempoolh->memblocks_arr[memblock_index]; vxge_assert(txdp); txdp->host_control = (u64) (size_t) __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp, &memblock_item_idx); txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp); vxge_assert(txdl_priv); fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp; /* pre-format HW's TxDL's private */ txdl_priv->dma_offset = (char *)txdp - (char *)memblock; txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset; txdl_priv->dma_handle = dma_object->handle; txdl_priv->memblock = memblock; txdl_priv->first_txdp = txdp; txdl_priv->next_txdl_priv = NULL; txdl_priv->alloc_frags = 0; } /* * __vxge_hw_fifo_create - Create a FIFO * This function creates FIFO and initializes it. */ static enum vxge_hw_status __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, struct vxge_hw_fifo_attr *attr) { enum vxge_hw_status status = VXGE_HW_OK; struct __vxge_hw_fifo *fifo; struct vxge_hw_fifo_config *config; u32 txdl_size, txdl_per_memblock; struct vxge_hw_mempool_cbs fifo_mp_callback; struct __vxge_hw_virtualpath *vpath; if ((vp == NULL) || (attr == NULL)) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } vpath = vp->vpath; config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo; txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd); txdl_per_memblock = config->memblock_size / txdl_size; fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp, VXGE_HW_CHANNEL_TYPE_FIFO, config->fifo_blocks * txdl_per_memblock, attr->per_txdl_space, attr->userdata); if (fifo == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } vpath->fifoh = fifo; fifo->nofl_db = vpath->nofl_db; fifo->vp_id = vpath->vp_id; fifo->vp_reg = vpath->vp_reg; fifo->stats = &vpath->sw_stats->fifo_stats; fifo->config = config; /* apply "interrupts per txdl" attribute */ fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved; fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved; if (fifo->config->intr) fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; fifo->no_snoop_bits = config->no_snoop_bits; /* * FIFO memory management strategy: * * TxDL split into three independent parts: * - set of TxD's * - TxD HW private part * - driver private part * * Adaptative memory allocation used. i.e. Memory allocated on * demand with the size which will fit into one memory block. * One memory block may contain more than one TxDL. * * During "reserve" operations more memory can be allocated on demand * for example due to FIFO full condition. * * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close * routine which will essentially stop the channel and free resources. */ /* TxDL common private size == TxDL private + driver private */ fifo->priv_size = sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space; fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) / VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; fifo->per_txdl_space = attr->per_txdl_space; /* recompute txdl size to be cacheline aligned */ fifo->txdl_size = txdl_size; fifo->txdl_per_memblock = txdl_per_memblock; fifo->txdl_term = attr->txdl_term; fifo->callback = attr->callback; if (fifo->txdl_per_memblock == 0) { __vxge_hw_fifo_delete(vp); status = VXGE_HW_ERR_INVALID_BLOCK_SIZE; goto exit; } fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc; fifo->mempool = __vxge_hw_mempool_create(vpath->hldev, fifo->config->memblock_size, fifo->txdl_size, fifo->priv_size, (fifo->config->fifo_blocks * fifo->txdl_per_memblock), (fifo->config->fifo_blocks * fifo->txdl_per_memblock), &fifo_mp_callback, fifo); if (fifo->mempool == NULL) { __vxge_hw_fifo_delete(vp); status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } status = __vxge_hw_channel_initialize(&fifo->channel); if (status != VXGE_HW_OK) { __vxge_hw_fifo_delete(vp); goto exit; } vxge_assert(fifo->channel.reserve_ptr); exit: return status; } /* * __vxge_hw_vpath_pci_read - Read the content of given address * in pci config space. * Read from the vpath pci config space. */ static enum vxge_hw_status __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath, u32 phy_func_0, u32 offset, u32 *val) { u64 val64; enum vxge_hw_status status = VXGE_HW_OK; struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset); if (phy_func_0) val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0; writeq(val64, &vp_reg->pci_config_access_cfg1); wmb(); writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ, &vp_reg->pci_config_access_cfg2); wmb(); status = __vxge_hw_device_register_poll( &vp_reg->pci_config_access_cfg2, VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS); if (status != VXGE_HW_OK) goto exit; val64 = readq(&vp_reg->pci_config_access_status); if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) { status = VXGE_HW_FAIL; *val = 0; } else *val = (u32)vxge_bVALn(val64, 32, 32); exit: return status; } /** * vxge_hw_device_flick_link_led - Flick (blink) link LED. * @hldev: HW device. * @on_off: TRUE if flickering to be on, FALSE to be off * * Flicker the link LED. */ enum vxge_hw_status vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off) { struct __vxge_hw_virtualpath *vpath; u64 data0, data1 = 0, steer_ctrl = 0; enum vxge_hw_status status; if (hldev == NULL) { status = VXGE_HW_ERR_INVALID_DEVICE; goto exit; } vpath = &hldev->virtual_paths[hldev->first_vp_id]; data0 = on_off; status = vxge_hw_vpath_fw_api(vpath, VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, 0, &data0, &data1, &steer_ctrl); exit: return status; } /* * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables */ enum vxge_hw_status __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table, u32 offset, u64 *data0, u64 *data1) { enum vxge_hw_status status; u64 steer_ctrl = 0; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } if ((rts_table == VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || (rts_table == VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || (rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || (rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; } status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset, data0, data1, &steer_ctrl); if (status != VXGE_HW_OK) goto exit; if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) && (rts_table != VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) *data1 = 0; exit: return status; } /* * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables */ enum vxge_hw_status __vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table, u32 offset, u64 steer_data0, u64 steer_data1) { u64 data0, data1 = 0, steer_ctrl = 0; enum vxge_hw_status status; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } data0 = steer_data0; if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || (rts_table == VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) data1 = steer_data1; status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset, &data0, &data1, &steer_ctrl); exit: return status; } /* * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing. */ enum vxge_hw_status vxge_hw_vpath_rts_rth_set( struct __vxge_hw_vpath_handle *vp, enum vxge_hw_rth_algoritms algorithm, struct vxge_hw_rth_hash_types *hash_type, u16 bucket_size) { u64 data0, data1; enum vxge_hw_status status = VXGE_HW_OK; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } status = __vxge_hw_vpath_rts_table_get(vp, VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, 0, &data0, &data1); if (status != VXGE_HW_OK) goto exit; data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3)); data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm); if (hash_type->hash_type_tcpipv4_en) data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN; if (hash_type->hash_type_ipv4_en) data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN; if (hash_type->hash_type_tcpipv6_en) data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN; if (hash_type->hash_type_ipv6_en) data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN; if (hash_type->hash_type_tcpipv6ex_en) data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN; if (hash_type->hash_type_ipv6ex_en) data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN; if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0)) data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE; else data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE; status = __vxge_hw_vpath_rts_table_set(vp, VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY, VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, 0, data0, 0); exit: return status; } static void vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1, u16 flag, u8 *itable) { switch (flag) { case 1: *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)| VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA( itable[j]); case 2: *data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)| VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA( itable[j]); case 3: *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)| VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA( itable[j]); case 4: *data1 |= VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)| VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA( itable[j]); default: return; } } /* * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT). */ enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set( struct __vxge_hw_vpath_handle **vpath_handles, u32 vpath_count, u8 *mtable, u8 *itable, u32 itable_size) { u32 i, j, action, rts_table; u64 data0; u64 data1; u32 max_entries; enum vxge_hw_status status = VXGE_HW_OK; struct __vxge_hw_vpath_handle *vp = vpath_handles[0]; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } max_entries = (((u32)1) << itable_size); if (vp->vpath->hldev->config.rth_it_type == VXGE_HW_RTH_IT_TYPE_SOLO_IT) { action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY; rts_table = VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT; for (j = 0; j < max_entries; j++) { data1 = 0; data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA( itable[j]); status = __vxge_hw_vpath_rts_table_set(vpath_handles[0], action, rts_table, j, data0, data1); if (status != VXGE_HW_OK) goto exit; } for (j = 0; j < max_entries; j++) { data1 = 0; data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA( itable[j]); status = __vxge_hw_vpath_rts_table_set( vpath_handles[mtable[itable[j]]], action, rts_table, j, data0, data1); if (status != VXGE_HW_OK) goto exit; } } else { action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY; rts_table = VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT; for (i = 0; i < vpath_count; i++) { for (j = 0; j < max_entries;) { data0 = 0; data1 = 0; while (j < max_entries) { if (mtable[itable[j]] != i) { j++; continue; } vxge_hw_rts_rth_data0_data1_get(j, &data0, &data1, 1, itable); j++; break; } while (j < max_entries) { if (mtable[itable[j]] != i) { j++; continue; } vxge_hw_rts_rth_data0_data1_get(j, &data0, &data1, 2, itable); j++; break; } while (j < max_entries) { if (mtable[itable[j]] != i) { j++; continue; } vxge_hw_rts_rth_data0_data1_get(j, &data0, &data1, 3, itable); j++; break; } while (j < max_entries) { if (mtable[itable[j]] != i) { j++; continue; } vxge_hw_rts_rth_data0_data1_get(j, &data0, &data1, 4, itable); j++; break; } if (data0 != 0) { status = __vxge_hw_vpath_rts_table_set( vpath_handles[i], action, rts_table, 0, data0, data1); if (status != VXGE_HW_OK) goto exit; } } } } exit: return status; } /** * vxge_hw_vpath_check_leak - Check for memory leak * @ringh: Handle to the ring object used for receive * * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred. * Returns: VXGE_HW_FAIL, if leak has occurred. * */ enum vxge_hw_status vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring) { enum vxge_hw_status status = VXGE_HW_OK; u64 rxd_new_count, rxd_spat; if (ring == NULL) return status; rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell); rxd_spat = readq(&ring->vp_reg->prc_cfg6); rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat); if (rxd_new_count >= rxd_spat) status = VXGE_HW_FAIL; return status; } /* * __vxge_hw_vpath_mgmt_read * This routine reads the vpath_mgmt registers */ static enum vxge_hw_status __vxge_hw_vpath_mgmt_read( struct __vxge_hw_device *hldev, struct __vxge_hw_virtualpath *vpath) { u32 i, mtu = 0, max_pyld = 0; u64 val64; enum vxge_hw_status status = VXGE_HW_OK; for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) { val64 = readq(&vpath->vpmgmt_reg-> rxmac_cfg0_port_vpmgmt_clone[i]); max_pyld = (u32) VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN (val64); if (mtu < max_pyld) mtu = max_pyld; } vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE; val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp); for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (val64 & vxge_mBIT(i)) vpath->vsport_number = i; } val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone); if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK) VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP); else VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN); return status; } /* * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed * This routine checks the vpath_rst_in_prog register to see if * adapter completed the reset process for the vpath */ static enum vxge_hw_status __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath) { enum vxge_hw_status status; status = __vxge_hw_device_register_poll( &vpath->hldev->common_reg->vpath_rst_in_prog, VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG( 1 << (16 - vpath->vp_id)), vpath->hldev->config.device_poll_millis); return status; } /* * __vxge_hw_vpath_reset * This routine resets the vpath on the device */ static enum vxge_hw_status __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) { u64 val64; enum vxge_hw_status status = VXGE_HW_OK; val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id)); __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), &hldev->common_reg->cmn_rsthdlr_cfg0); return status; } /* * __vxge_hw_vpath_sw_reset * This routine resets the vpath structures */ static enum vxge_hw_status __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id) { enum vxge_hw_status status = VXGE_HW_OK; struct __vxge_hw_virtualpath *vpath; vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id]; if (vpath->ringh) { status = __vxge_hw_ring_reset(vpath->ringh); if (status != VXGE_HW_OK) goto exit; } if (vpath->fifoh) status = __vxge_hw_fifo_reset(vpath->fifoh); exit: return status; } /* * __vxge_hw_vpath_prc_configure * This routine configures the prc registers of virtual path using the config * passed */ static void __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id) { u64 val64; struct __vxge_hw_virtualpath *vpath; struct vxge_hw_vp_config *vp_config; struct vxge_hw_vpath_reg __iomem *vp_reg; vpath = &hldev->virtual_paths[vp_id]; vp_reg = vpath->vp_reg; vp_config = vpath->vp_config; if (vp_config->ring.enable == VXGE_HW_RING_DISABLE) return; val64 = readq(&vp_reg->prc_cfg1); val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE; writeq(val64, &vp_reg->prc_cfg1); val64 = readq(&vpath->vp_reg->prc_cfg6); val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN; writeq(val64, &vpath->vp_reg->prc_cfg6); val64 = readq(&vp_reg->prc_cfg7); if (vpath->vp_config->ring.scatter_mode != VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3); switch (vpath->vp_config->ring.scatter_mode) { case VXGE_HW_RING_SCATTER_MODE_A: val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( VXGE_HW_PRC_CFG7_SCATTER_MODE_A); break; case VXGE_HW_RING_SCATTER_MODE_B: val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( VXGE_HW_PRC_CFG7_SCATTER_MODE_B); break; case VXGE_HW_RING_SCATTER_MODE_C: val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( VXGE_HW_PRC_CFG7_SCATTER_MODE_C); break; } } writeq(val64, &vp_reg->prc_cfg7); writeq(VXGE_HW_PRC_CFG5_RXD0_ADD( __vxge_hw_ring_first_block_address_get( vpath->ringh) >> 3), &vp_reg->prc_cfg5); val64 = readq(&vp_reg->prc_cfg4); val64 |= VXGE_HW_PRC_CFG4_IN_SVC; val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3); val64 |= VXGE_HW_PRC_CFG4_RING_MODE( VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER); if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE) val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE; else val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE; writeq(val64, &vp_reg->prc_cfg4); } /* * __vxge_hw_vpath_kdfc_configure * This routine configures the kdfc registers of virtual path using the * config passed */ static enum vxge_hw_status __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id) { u64 val64; u64 vpath_stride; enum vxge_hw_status status = VXGE_HW_OK; struct __vxge_hw_virtualpath *vpath; struct vxge_hw_vpath_reg __iomem *vp_reg; vpath = &hldev->virtual_paths[vp_id]; vp_reg = vpath->vp_reg; status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg); if (status != VXGE_HW_OK) goto exit; val64 = readq(&vp_reg->kdfc_drbl_triplet_total); vpath->max_kdfc_db = (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE( val64+1)/2; if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { vpath->max_nofl_db = vpath->max_kdfc_db; if (vpath->max_nofl_db < ((vpath->vp_config->fifo.memblock_size / (vpath->vp_config->fifo.max_frags * sizeof(struct vxge_hw_fifo_txd))) * vpath->vp_config->fifo.fifo_blocks)) { return VXGE_HW_BADCFG_FIFO_BLOCKS; } val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0( (vpath->max_nofl_db*2)-1); } writeq(val64, &vp_reg->kdfc_fifo_trpl_partition); writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE, &vp_reg->kdfc_fifo_trpl_ctrl); val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl); val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) | VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF)); val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE( VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) | #ifndef __BIG_ENDIAN VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN | #endif VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0); writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl); writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address); wmb(); vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride); vpath->nofl_db = (struct __vxge_hw_non_offload_db_wrapper __iomem *) (hldev->kdfc + (vp_id * VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE( vpath_stride))); exit: return status; } /* * __vxge_hw_vpath_mac_configure * This routine configures the mac of virtual path using the config passed */ static enum vxge_hw_status __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) { u64 val64; enum vxge_hw_status status = VXGE_HW_OK; struct __vxge_hw_virtualpath *vpath; struct vxge_hw_vp_config *vp_config; struct vxge_hw_vpath_reg __iomem *vp_reg; vpath = &hldev->virtual_paths[vp_id]; vp_reg = vpath->vp_reg; vp_config = vpath->vp_config; writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER( vpath->vsport_number), &vp_reg->xmac_vsport_choice); if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) { val64 = readq(&vp_reg->xmac_rpa_vcfg); if (vp_config->rpa_strip_vlan_tag != VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) { if (vp_config->rpa_strip_vlan_tag) val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG; else val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG; } writeq(val64, &vp_reg->xmac_rpa_vcfg); val64 = readq(&vp_reg->rxmac_vcfg0); if (vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) { val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); if ((vp_config->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu) val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN( vp_config->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE); else val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN( vpath->max_mtu); } writeq(val64, &vp_reg->rxmac_vcfg0); val64 = readq(&vp_reg->rxmac_vcfg1); val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) | VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE); if (hldev->config.rth_it_type == VXGE_HW_RTH_IT_TYPE_MULTI_IT) { val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE( 0x2) | VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE; } writeq(val64, &vp_reg->rxmac_vcfg1); } return status; } /* * __vxge_hw_vpath_tim_configure * This routine configures the tim registers of virtual path using the config * passed */ static enum vxge_hw_status __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) { u64 val64; enum vxge_hw_status status = VXGE_HW_OK; struct __vxge_hw_virtualpath *vpath; struct vxge_hw_vpath_reg __iomem *vp_reg; struct vxge_hw_vp_config *config; vpath = &hldev->virtual_paths[vp_id]; vp_reg = vpath->vp_reg; config = vpath->vp_config; writeq(0, &vp_reg->tim_dest_addr); writeq(0, &vp_reg->tim_vpath_map); writeq(0, &vp_reg->tim_bitmap); writeq(0, &vp_reg->tim_remap); if (config->ring.enable == VXGE_HW_RING_ENABLE) writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( (vp_id * VXGE_HW_MAX_INTR_PER_VP) + VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn); val64 = readq(&vp_reg->tim_pci_cfg); val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD; writeq(val64, &vp_reg->tim_pci_cfg); if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) { val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( 0x3ffffff); val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( config->tti.btimer_val); } val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN; if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) { if (config->tti.timer_ac_en) val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; else val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; } if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) { if (config->tti.timer_ci_en) val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; else val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; } if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f); val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A( config->tti.urange_a); } if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f); val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B( config->tti.urange_b); } if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f); val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C( config->tti.urange_c); } writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); vpath->tim_tti_cfg1_saved = val64; val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff); val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A( config->tti.uec_a); } if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff); val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B( config->tti.uec_b); } if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff); val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C( config->tti.uec_c); } if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff); val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D( config->tti.uec_d); } writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) { if (config->tti.timer_ri_en) val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; else val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; } if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( 0x3ffffff); val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( config->tti.rtimer_val); } if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id); } if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( 0x3ffffff); val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( config->tti.ltimer_val); } writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); vpath->tim_tti_cfg3_saved = val64; } if (config->ring.enable == VXGE_HW_RING_ENABLE) { val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( 0x3ffffff); val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( config->rti.btimer_val); } val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN; if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) { if (config->rti.timer_ac_en) val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; else val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; } if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) { if (config->rti.timer_ci_en) val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; else val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; } if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f); val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A( config->rti.urange_a); } if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f); val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B( config->rti.urange_b); } if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f); val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C( config->rti.urange_c); } writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); vpath->tim_rti_cfg1_saved = val64; val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff); val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A( config->rti.uec_a); } if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff); val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B( config->rti.uec_b); } if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff); val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C( config->rti.uec_c); } if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff); val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D( config->rti.uec_d); } writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) { if (config->rti.timer_ri_en) val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; else val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; } if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( 0x3ffffff); val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( config->rti.rtimer_val); } if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id); } if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( 0x3ffffff); val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( config->rti.ltimer_val); } writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); vpath->tim_rti_cfg3_saved = val64; } val64 = 0; writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]); writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]); writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]); writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]); writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]); writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]); val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150); val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0); val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3); writeq(val64, &vp_reg->tim_wrkld_clc); return status; } /* * __vxge_hw_vpath_initialize * This routine is the final phase of init which initializes the * registers of the vpath using the configuration passed. */ static enum vxge_hw_status __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) { u64 val64; u32 val32; enum vxge_hw_status status = VXGE_HW_OK; struct __vxge_hw_virtualpath *vpath; struct vxge_hw_vpath_reg __iomem *vp_reg; vpath = &hldev->virtual_paths[vp_id]; if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) { status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE; goto exit; } vp_reg = vpath->vp_reg; status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); if (status != VXGE_HW_OK) goto exit; status = __vxge_hw_vpath_mac_configure(hldev, vp_id); if (status != VXGE_HW_OK) goto exit; status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); if (status != VXGE_HW_OK) goto exit; status = __vxge_hw_vpath_tim_configure(hldev, vp_id); if (status != VXGE_HW_OK) goto exit; val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl); /* Get MRRS value from device control */ status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); if (status == VXGE_HW_OK) { val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7)); val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32); val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE; } val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7)); val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY( VXGE_HW_MAX_PAYLOAD_SIZE_512); val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN; writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl); exit: return status; } /* * __vxge_hw_vp_terminate - Terminate Virtual Path structure * This routine closes all channels it opened and freeup memory */ static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) { struct __vxge_hw_virtualpath *vpath; vpath = &hldev->virtual_paths[vp_id]; if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) goto exit; VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0, vpath->hldev->tim_int_mask1, vpath->vp_id); hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL; /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will * work after the interface is brought down. */ spin_lock(&vpath->lock); vpath->vp_open = VXGE_HW_VP_NOT_OPEN; spin_unlock(&vpath->lock); vpath->vpmgmt_reg = NULL; vpath->nofl_db = NULL; vpath->max_mtu = 0; vpath->vsport_number = 0; vpath->max_kdfc_db = 0; vpath->max_nofl_db = 0; vpath->ringh = NULL; vpath->fifoh = NULL; memset(&vpath->vpath_handles, 0, sizeof(struct list_head)); vpath->stats_block = 0; vpath->hw_stats = NULL; vpath->hw_stats_sav = NULL; vpath->sw_stats = NULL; exit: return; } /* * __vxge_hw_vp_initialize - Initialize Virtual Path structure * This routine is the initial phase of init which resets the vpath and * initializes the software support structures. */ static enum vxge_hw_status __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, struct vxge_hw_vp_config *config) { struct __vxge_hw_virtualpath *vpath; enum vxge_hw_status status = VXGE_HW_OK; if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) { status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE; goto exit; } vpath = &hldev->virtual_paths[vp_id]; spin_lock_init(&vpath->lock); vpath->vp_id = vp_id; vpath->vp_open = VXGE_HW_VP_OPEN; vpath->hldev = hldev; vpath->vp_config = config; vpath->vp_reg = hldev->vpath_reg[vp_id]; vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id]; __vxge_hw_vpath_reset(hldev, vp_id); status = __vxge_hw_vpath_reset_check(vpath); if (status != VXGE_HW_OK) { memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); goto exit; } status = __vxge_hw_vpath_mgmt_read(hldev, vpath); if (status != VXGE_HW_OK) { memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); goto exit; } INIT_LIST_HEAD(&vpath->vpath_handles); vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id]; VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0, hldev->tim_int_mask1, vp_id); status = __vxge_hw_vpath_initialize(hldev, vp_id); if (status != VXGE_HW_OK) __vxge_hw_vp_terminate(hldev, vp_id); exit: return status; } /* * vxge_hw_vpath_mtu_set - Set MTU. * Set new MTU value. Example, to use jumbo frames: * vxge_hw_vpath_mtu_set(my_device, 9600); */ enum vxge_hw_status vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu) { u64 val64; enum vxge_hw_status status = VXGE_HW_OK; struct __vxge_hw_virtualpath *vpath; if (vp == NULL) { status = VXGE_HW_ERR_INVALID_HANDLE; goto exit; } vpath = vp->vpath; new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE; if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu)) status = VXGE_HW_ERR_INVALID_MTU_SIZE; val64 = readq(&vpath->vp_reg->rxmac_vcfg0); val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu); writeq(val64, &vpath->vp_reg->rxmac_vcfg0); vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE; exit: return status; } /* * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics. * Enable the DMA vpath statistics. The function is to be called to re-enable * the adapter to update stats into the host memory */ static enum vxge_hw_status vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) { enum vxge_hw_status status = VXGE_HW_OK; struct __vxge_hw_virtualpath *vpath; vpath = vp->vpath; if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { status = VXGE_HW_ERR_VPATH_NOT_OPEN; goto exit; } memcpy(vpath->hw_stats_sav, vpath->hw_stats, sizeof(struct vxge_hw_vpath_stats_hw_info)); status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats); exit: return status; } /* * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool * This function allocates a block from block pool or from the system */ static struct __vxge_hw_blockpool_entry * __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) { struct __vxge_hw_blockpool_entry *entry = NULL; struct __vxge_hw_blockpool *blockpool; blockpool = &devh->block_pool; if (size == blockpool->block_size) { if (!list_empty(&blockpool->free_block_list)) entry = (struct __vxge_hw_blockpool_entry *) list_first_entry(&blockpool->free_block_list, struct __vxge_hw_blockpool_entry, item); if (entry != NULL) { list_del(&entry->item); blockpool->pool_size--; } } if (entry != NULL) __vxge_hw_blockpool_blocks_add(blockpool); return entry; } /* * vxge_hw_vpath_open - Open a virtual path on a given adapter * This function is used to open access to virtual path of an * adapter for offload, GRO operations. This function returns * synchronously. */ enum vxge_hw_status vxge_hw_vpath_open(struct __vxge_hw_device *hldev, struct vxge_hw_vpath_attr *attr, struct __vxge_hw_vpath_handle **vpath_handle) { struct __vxge_hw_virtualpath *vpath; struct __vxge_hw_vpath_handle *vp; enum vxge_hw_status status; vpath = &hldev->virtual_paths[attr->vp_id]; if (vpath->vp_open == VXGE_HW_VP_OPEN) { status = VXGE_HW_ERR_INVALID_STATE; goto vpath_open_exit1; } status = __vxge_hw_vp_initialize(hldev, attr->vp_id, &hldev->config.vp_config[attr->vp_id]); if (status != VXGE_HW_OK) goto vpath_open_exit1; vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle)); if (vp == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto vpath_open_exit2; } vp->vpath = vpath; if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { status = __vxge_hw_fifo_create(vp, &attr->fifo_attr); if (status != VXGE_HW_OK) goto vpath_open_exit6; } if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) { status = __vxge_hw_ring_create(vp, &attr->ring_attr); if (status != VXGE_HW_OK) goto vpath_open_exit7; __vxge_hw_vpath_prc_configure(hldev, attr->vp_id); } vpath->fifoh->tx_intr_num = (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) + VXGE_HW_VPATH_INTR_TX; vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, VXGE_HW_BLOCK_SIZE); if (vpath->stats_block == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto vpath_open_exit8; } vpath->hw_stats = vpath->stats_block->memblock; memset(vpath->hw_stats, 0, sizeof(struct vxge_hw_vpath_stats_hw_info)); hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] = vpath->hw_stats; vpath->hw_stats_sav = &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id]; memset(vpath->hw_stats_sav, 0, sizeof(struct vxge_hw_vpath_stats_hw_info)); writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg); status = vxge_hw_vpath_stats_enable(vp); if (status != VXGE_HW_OK) goto vpath_open_exit8; list_add(&vp->item, &vpath->vpath_handles); hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id); *vpath_handle = vp; attr->fifo_attr.userdata = vpath->fifoh; attr->ring_attr.userdata = vpath->ringh; return VXGE_HW_OK; vpath_open_exit8: if (vpath->ringh != NULL) __vxge_hw_ring_delete(vp); vpath_open_exit7: if (vpath->fifoh != NULL) __vxge_hw_fifo_delete(vp); vpath_open_exit6: vfree(vp); vpath_open_exit2: __vxge_hw_vp_terminate(hldev, attr->vp_id); vpath_open_exit1: return status; } /** * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath * (vpath) open * @vp: Handle got from previous vpath open * * This function is used to close access to virtual path opened * earlier. */ void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) { struct __vxge_hw_virtualpath *vpath = vp->vpath; struct __vxge_hw_ring *ring = vpath->ringh; struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev); u64 new_count, val64, val164; if (vdev->titan1) { new_count = readq(&vpath->vp_reg->rxdmem_size); new_count &= 0x1fff; } else new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8; val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count); writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164), &vpath->vp_reg->prc_rxd_doorbell); readl(&vpath->vp_reg->prc_rxd_doorbell); val164 /= 2; val64 = readq(&vpath->vp_reg->prc_cfg6); val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64); val64 &= 0x1ff; /* * Each RxD is of 4 qwords */ new_count -= (val64 + 1); val64 = min(val164, new_count) / 4; ring->rxds_limit = min(ring->rxds_limit, val64); if (ring->rxds_limit < 4) ring->rxds_limit = 4; } /* * __vxge_hw_blockpool_block_free - Frees a block from block pool * @devh: Hal device * @entry: Entry of block to be freed * * This function frees a block from block pool */ static void __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, struct __vxge_hw_blockpool_entry *entry) { struct __vxge_hw_blockpool *blockpool; blockpool = &devh->block_pool; if (entry->length == blockpool->block_size) { list_add(&entry->item, &blockpool->free_block_list); blockpool->pool_size++; } __vxge_hw_blockpool_blocks_remove(blockpool); } /* * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open * This function is used to close access to virtual path opened * earlier. */ enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp) { struct __vxge_hw_virtualpath *vpath = NULL; struct __vxge_hw_device *devh = NULL; u32 vp_id = vp->vpath->vp_id; u32 is_empty = TRUE; enum vxge_hw_status status = VXGE_HW_OK; vpath = vp->vpath; devh = vpath->hldev; if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { status = VXGE_HW_ERR_VPATH_NOT_OPEN; goto vpath_close_exit; } list_del(&vp->item); if (!list_empty(&vpath->vpath_handles)) { list_add(&vp->item, &vpath->vpath_handles); is_empty = FALSE; } if (!is_empty) { status = VXGE_HW_FAIL; goto vpath_close_exit; } devh->vpaths_deployed &= ~vxge_mBIT(vp_id); if (vpath->ringh != NULL) __vxge_hw_ring_delete(vp); if (vpath->fifoh != NULL) __vxge_hw_fifo_delete(vp); if (vpath->stats_block != NULL) __vxge_hw_blockpool_block_free(devh, vpath->stats_block); vfree(vp); __vxge_hw_vp_terminate(devh, vp_id); vpath_close_exit: return status; } /* * vxge_hw_vpath_reset - Resets vpath * This function is used to request a reset of vpath */ enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp) { enum vxge_hw_status status; u32 vp_id; struct __vxge_hw_virtualpath *vpath = vp->vpath; vp_id = vpath->vp_id; if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { status = VXGE_HW_ERR_VPATH_NOT_OPEN; goto exit; } status = __vxge_hw_vpath_reset(vpath->hldev, vp_id); if (status == VXGE_HW_OK) vpath->sw_stats->soft_reset_cnt++; exit: return status; } /* * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize. * This function poll's for the vpath reset completion and re initializes * the vpath. */ enum vxge_hw_status vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp) { struct __vxge_hw_virtualpath *vpath = NULL; enum vxge_hw_status status; struct __vxge_hw_device *hldev; u32 vp_id; vp_id = vp->vpath->vp_id; vpath = vp->vpath; hldev = vpath->hldev; if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { status = VXGE_HW_ERR_VPATH_NOT_OPEN; goto exit; } status = __vxge_hw_vpath_reset_check(vpath); if (status != VXGE_HW_OK) goto exit; status = __vxge_hw_vpath_sw_reset(hldev, vp_id); if (status != VXGE_HW_OK) goto exit; status = __vxge_hw_vpath_initialize(hldev, vp_id); if (status != VXGE_HW_OK) goto exit; if (vpath->ringh != NULL) __vxge_hw_vpath_prc_configure(hldev, vp_id); memset(vpath->hw_stats, 0, sizeof(struct vxge_hw_vpath_stats_hw_info)); memset(vpath->hw_stats_sav, 0, sizeof(struct vxge_hw_vpath_stats_hw_info)); writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg); status = vxge_hw_vpath_stats_enable(vp); exit: return status; } /* * vxge_hw_vpath_enable - Enable vpath. * This routine clears the vpath reset thereby enabling a vpath * to start forwarding frames and generating interrupts. */ void vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp) { struct __vxge_hw_device *hldev; u64 val64; hldev = vp->vpath->hldev; val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET( 1 << (16 - vp->vpath->vp_id)); __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), &hldev->common_reg->cmn_rsthdlr_cfg1); }
gpl-2.0
ShikharArvind/myriad_eye
drivers/staging/nvec/nvec_kbd.c
5123
4162
/* * nvec_kbd: keyboard driver for a NVIDIA compliant embedded controller * * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.launchpad.net> * * Authors: Pierre-Hugues Husson <phhusson@free.fr> * Marc Dietrich <marvin24@gmx.de> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * */ #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/delay.h> #include <linux/platform_device.h> #include "nvec-keytable.h" #include "nvec.h" #define ACK_KBD_EVENT {'\x05', '\xed', '\x01'} static const char led_on[3] = "\x05\xed\x07"; static const char led_off[3] = "\x05\xed\x00"; static unsigned char keycodes[ARRAY_SIZE(code_tab_102us) + ARRAY_SIZE(extcode_tab_us102)]; struct nvec_keys { struct input_dev *input; struct notifier_block notifier; struct nvec_chip *nvec; bool caps_lock; }; static struct nvec_keys keys_dev; static void nvec_kbd_toggle_led(void) { keys_dev.caps_lock = !keys_dev.caps_lock; if (keys_dev.caps_lock) nvec_write_async(keys_dev.nvec, led_on, sizeof(led_on)); else nvec_write_async(keys_dev.nvec, led_off, sizeof(led_off)); } static int nvec_keys_notifier(struct notifier_block *nb, unsigned long event_type, void *data) { int code, state; unsigned char *msg = (unsigned char *)data; if (event_type == NVEC_KB_EVT) { int _size = (msg[0] & (3 << 5)) >> 5; /* power on/off button */ if (_size == NVEC_VAR_SIZE) return NOTIFY_STOP; if (_size == NVEC_3BYTES) msg++; code = msg[1] & 0x7f; state = msg[1] & 0x80; if (code_tabs[_size][code] == KEY_CAPSLOCK && state) nvec_kbd_toggle_led(); input_report_key(keys_dev.input, code_tabs[_size][code], !state); input_sync(keys_dev.input); return NOTIFY_STOP; } return NOTIFY_DONE; } static int nvec_kbd_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { unsigned char buf[] = ACK_KBD_EVENT; struct nvec_chip *nvec = keys_dev.nvec; if (type == EV_REP) return 0; if (type != EV_LED) return -1; if (code != LED_CAPSL) return -1; buf[2] = !!value; nvec_write_async(nvec, buf, sizeof(buf)); return 0; } static int __devinit nvec_kbd_probe(struct platform_device *pdev) { struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); int i, j, err; struct input_dev *idev; j = 0; for (i = 0; i < ARRAY_SIZE(code_tab_102us); ++i) keycodes[j++] = code_tab_102us[i]; for (i = 0; i < ARRAY_SIZE(extcode_tab_us102); ++i) keycodes[j++] = extcode_tab_us102[i]; idev = input_allocate_device(); idev->name = "nvec keyboard"; idev->phys = "nvec"; idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_LED); idev->ledbit[0] = BIT_MASK(LED_CAPSL); idev->event = nvec_kbd_event; idev->keycode = keycodes; idev->keycodesize = sizeof(unsigned char); idev->keycodemax = ARRAY_SIZE(keycodes); for (i = 0; i < ARRAY_SIZE(keycodes); ++i) set_bit(keycodes[i], idev->keybit); clear_bit(0, idev->keybit); err = input_register_device(idev); if (err) goto fail; keys_dev.input = idev; keys_dev.notifier.notifier_call = nvec_keys_notifier; keys_dev.nvec = nvec; nvec_register_notifier(nvec, &keys_dev.notifier, 0); /* Enable keyboard */ nvec_write_async(nvec, "\x05\xf4", 2); /* keyboard reset? */ nvec_write_async(nvec, "\x05\x03\x01\x01", 4); nvec_write_async(nvec, "\x05\x04\x01", 3); nvec_write_async(nvec, "\x06\x01\xff\x03", 4); /* FIXME wait until keyboard reset is finished or until we have a sync write */ mdelay(1000); /* Disable caps lock LED */ nvec_write_async(nvec, led_off, sizeof(led_off)); return 0; fail: input_free_device(idev); return err; } static struct platform_driver nvec_kbd_driver = { .probe = nvec_kbd_probe, .driver = { .name = "nvec-kbd", .owner = THIS_MODULE, }, }; static int __init nvec_kbd_init(void) { return platform_driver_register(&nvec_kbd_driver); } module_init(nvec_kbd_init); MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>"); MODULE_DESCRIPTION("NVEC keyboard driver"); MODULE_LICENSE("GPL");
gpl-2.0
BOOTMGR/GT-I9070_kernel
lib/syscall.c
5123
2475
#include <linux/ptrace.h> #include <linux/sched.h> #include <linux/module.h> #include <asm/syscall.h> static int collect_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc) { struct pt_regs *regs = task_pt_regs(target); if (unlikely(!regs)) return -EAGAIN; *sp = user_stack_pointer(regs); *pc = instruction_pointer(regs); *callno = syscall_get_nr(target, regs); if (*callno != -1L && maxargs > 0) syscall_get_arguments(target, regs, 0, maxargs, args); return 0; } /** * task_current_syscall - Discover what a blocked task is doing. * @target: thread to examine * @callno: filled with system call number or -1 * @args: filled with @maxargs system call arguments * @maxargs: number of elements in @args to fill * @sp: filled with user stack pointer * @pc: filled with user PC * * If @target is blocked in a system call, returns zero with *@callno * set to the the call's number and @args filled in with its arguments. * Registers not used for system call arguments may not be available and * it is not kosher to use &struct user_regset calls while the system * call is still in progress. Note we may get this result if @target * has finished its system call but not yet returned to user mode, such * as when it's stopped for signal handling or syscall exit tracing. * * If @target is blocked in the kernel during a fault or exception, * returns zero with *@callno set to -1 and does not fill in @args. * If so, it's now safe to examine @target using &struct user_regset * get() calls as long as we're sure @target won't return to user mode. * * Returns -%EAGAIN if @target does not remain blocked. * * Returns -%EINVAL if @maxargs is too large (maximum is six). */ int task_current_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc) { long state; unsigned long ncsw; if (unlikely(maxargs > 6)) return -EINVAL; if (target == current) return collect_syscall(target, callno, args, maxargs, sp, pc); state = target->state; if (unlikely(!state)) return -EAGAIN; ncsw = wait_task_inactive(target, state); if (unlikely(!ncsw) || unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || unlikely(wait_task_inactive(target, state) != ncsw)) return -EAGAIN; return 0; } EXPORT_SYMBOL_GPL(task_current_syscall);
gpl-2.0
Kurre/kernel_msm
arch/arm/mach-ixp4xx/ixdp425-pci.c
5379
1914
/* * arch/arm/mach-ixp4xx/ixdp425-pci.c * * IXDP425 board-level PCI initialization * * Copyright (C) 2002 Intel Corporation. * Copyright (C) 2003-2004 MontaVista Software, Inc. * * Maintainer: Deepak Saxena <dsaxena@plexity.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/delay.h> #include <asm/mach/pci.h> #include <asm/irq.h> #include <mach/hardware.h> #include <asm/mach-types.h> #define MAX_DEV 4 #define IRQ_LINES 4 /* PCI controller GPIO to IRQ pin mappings */ #define INTA 11 #define INTB 10 #define INTC 9 #define INTD 8 void __init ixdp425_pci_preinit(void) { irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); ixp4xx_pci_preinit(); } static int __init ixdp425_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static int pci_irq_table[IRQ_LINES] = { IXP4XX_GPIO_IRQ(INTA), IXP4XX_GPIO_IRQ(INTB), IXP4XX_GPIO_IRQ(INTC), IXP4XX_GPIO_IRQ(INTD) }; if (slot >= 1 && slot <= MAX_DEV && pin >= 1 && pin <= IRQ_LINES) return pci_irq_table[(slot + pin - 2) % 4]; return -1; } struct hw_pci ixdp425_pci __initdata = { .nr_controllers = 1, .preinit = ixdp425_pci_preinit, .swizzle = pci_std_swizzle, .setup = ixp4xx_setup, .scan = ixp4xx_scan_bus, .map_irq = ixdp425_map_irq, }; int __init ixdp425_pci_init(void) { if (machine_is_ixdp425() || machine_is_ixcdp1100() || machine_is_ixdp465() || machine_is_kixrp435()) pci_common_init(&ixdp425_pci); return 0; } subsys_initcall(ixdp425_pci_init);
gpl-2.0
ninjablocks/VAR-SOM-AM33-SDK7-Kernel
arch/powerpc/platforms/powermac/nvram.c
7683
15386
/* * Copyright (C) 2002 Benjamin Herrenschmidt (benh@kernel.crashing.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Todo: - add support for the OF persistent properties */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/nvram.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/adb.h> #include <linux/pmu.h> #include <linux/bootmem.h> #include <linux/completion.h> #include <linux/spinlock.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/nvram.h> #include "pmac.h" #define DEBUG #ifdef DEBUG #define DBG(x...) printk(x) #else #define DBG(x...) #endif #define NVRAM_SIZE 0x2000 /* 8kB of non-volatile RAM */ #define CORE99_SIGNATURE 0x5a #define CORE99_ADLER_START 0x14 /* On Core99, nvram is either a sharp, a micron or an AMD flash */ #define SM_FLASH_STATUS_DONE 0x80 #define SM_FLASH_STATUS_ERR 0x38 #define SM_FLASH_CMD_ERASE_CONFIRM 0xd0 #define SM_FLASH_CMD_ERASE_SETUP 0x20 #define SM_FLASH_CMD_RESET 0xff #define SM_FLASH_CMD_WRITE_SETUP 0x40 #define SM_FLASH_CMD_CLEAR_STATUS 0x50 #define SM_FLASH_CMD_READ_STATUS 0x70 /* CHRP NVRAM header */ struct chrp_header { u8 signature; u8 cksum; u16 len; char name[12]; u8 data[0]; }; struct core99_header { struct chrp_header hdr; u32 adler; u32 generation; u32 reserved[2]; }; /* * Read and write the non-volatile RAM on PowerMacs and CHRP machines. */ static int nvram_naddrs; static volatile unsigned char __iomem *nvram_data; static int is_core_99; static int core99_bank = 0; static int nvram_partitions[3]; // XXX Turn that into a sem static DEFINE_RAW_SPINLOCK(nv_lock); static int (*core99_write_bank)(int bank, u8* datas); static int (*core99_erase_bank)(int bank); static char *nvram_image; static unsigned char core99_nvram_read_byte(int addr) { if (nvram_image == NULL) return 0xff; return nvram_image[addr]; } static void core99_nvram_write_byte(int addr, unsigned char val) { if (nvram_image == NULL) return; nvram_image[addr] = val; } static ssize_t core99_nvram_read(char *buf, size_t count, loff_t *index) { int i; if (nvram_image == NULL) return -ENODEV; if (*index > NVRAM_SIZE) return 0; i = *index; if (i + count > NVRAM_SIZE) count = NVRAM_SIZE - i; memcpy(buf, &nvram_image[i], count); *index = i + count; return count; } static ssize_t core99_nvram_write(char *buf, size_t count, loff_t *index) { int i; if (nvram_image == NULL) return -ENODEV; if (*index > NVRAM_SIZE) return 0; i = *index; if (i + count > NVRAM_SIZE) count = NVRAM_SIZE - i; memcpy(&nvram_image[i], buf, count); *index = i + count; return count; } static ssize_t core99_nvram_size(void) { if (nvram_image == NULL) return -ENODEV; return NVRAM_SIZE; } #ifdef CONFIG_PPC32 static volatile unsigned char __iomem *nvram_addr; static int nvram_mult; static unsigned char direct_nvram_read_byte(int addr) { return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]); } static void direct_nvram_write_byte(int addr, unsigned char val) { out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val); } static unsigned char indirect_nvram_read_byte(int addr) { unsigned char val; unsigned long flags; raw_spin_lock_irqsave(&nv_lock, flags); out_8(nvram_addr, addr >> 5); val = in_8(&nvram_data[(addr & 0x1f) << 4]); raw_spin_unlock_irqrestore(&nv_lock, flags); return val; } static void indirect_nvram_write_byte(int addr, unsigned char val) { unsigned long flags; raw_spin_lock_irqsave(&nv_lock, flags); out_8(nvram_addr, addr >> 5); out_8(&nvram_data[(addr & 0x1f) << 4], val); raw_spin_unlock_irqrestore(&nv_lock, flags); } #ifdef CONFIG_ADB_PMU static void pmu_nvram_complete(struct adb_request *req) { if (req->arg) complete((struct completion *)req->arg); } static unsigned char pmu_nvram_read_byte(int addr) { struct adb_request req; DECLARE_COMPLETION_ONSTACK(req_complete); req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL; if (pmu_request(&req, pmu_nvram_complete, 3, PMU_READ_NVRAM, (addr >> 8) & 0xff, addr & 0xff)) return 0xff; if (system_state == SYSTEM_RUNNING) wait_for_completion(&req_complete); while (!req.complete) pmu_poll(); return req.reply[0]; } static void pmu_nvram_write_byte(int addr, unsigned char val) { struct adb_request req; DECLARE_COMPLETION_ONSTACK(req_complete); req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL; if (pmu_request(&req, pmu_nvram_complete, 4, PMU_WRITE_NVRAM, (addr >> 8) & 0xff, addr & 0xff, val)) return; if (system_state == SYSTEM_RUNNING) wait_for_completion(&req_complete); while (!req.complete) pmu_poll(); } #endif /* CONFIG_ADB_PMU */ #endif /* CONFIG_PPC32 */ static u8 chrp_checksum(struct chrp_header* hdr) { u8 *ptr; u16 sum = hdr->signature; for (ptr = (u8 *)&hdr->len; ptr < hdr->data; ptr++) sum += *ptr; while (sum > 0xFF) sum = (sum & 0xFF) + (sum>>8); return sum; } static u32 core99_calc_adler(u8 *buffer) { int cnt; u32 low, high; buffer += CORE99_ADLER_START; low = 1; high = 0; for (cnt=0; cnt<(NVRAM_SIZE-CORE99_ADLER_START); cnt++) { if ((cnt % 5000) == 0) { high %= 65521UL; high %= 65521UL; } low += buffer[cnt]; high += low; } low %= 65521UL; high %= 65521UL; return (high << 16) | low; } static u32 core99_check(u8* datas) { struct core99_header* hdr99 = (struct core99_header*)datas; if (hdr99->hdr.signature != CORE99_SIGNATURE) { DBG("Invalid signature\n"); return 0; } if (hdr99->hdr.cksum != chrp_checksum(&hdr99->hdr)) { DBG("Invalid checksum\n"); return 0; } if (hdr99->adler != core99_calc_adler(datas)) { DBG("Invalid adler\n"); return 0; } return hdr99->generation; } static int sm_erase_bank(int bank) { int stat; unsigned long timeout; u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; DBG("nvram: Sharp/Micron Erasing bank %d...\n", bank); out_8(base, SM_FLASH_CMD_ERASE_SETUP); out_8(base, SM_FLASH_CMD_ERASE_CONFIRM); timeout = 0; do { if (++timeout > 1000000) { printk(KERN_ERR "nvram: Sharp/Micron flash erase timeout !\n"); break; } out_8(base, SM_FLASH_CMD_READ_STATUS); stat = in_8(base); } while (!(stat & SM_FLASH_STATUS_DONE)); out_8(base, SM_FLASH_CMD_CLEAR_STATUS); out_8(base, SM_FLASH_CMD_RESET); if (memchr_inv(base, 0xff, NVRAM_SIZE)) { printk(KERN_ERR "nvram: Sharp/Micron flash erase failed !\n"); return -ENXIO; } return 0; } static int sm_write_bank(int bank, u8* datas) { int i, stat = 0; unsigned long timeout; u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; DBG("nvram: Sharp/Micron Writing bank %d...\n", bank); for (i=0; i<NVRAM_SIZE; i++) { out_8(base+i, SM_FLASH_CMD_WRITE_SETUP); udelay(1); out_8(base+i, datas[i]); timeout = 0; do { if (++timeout > 1000000) { printk(KERN_ERR "nvram: Sharp/Micron flash write timeout !\n"); break; } out_8(base, SM_FLASH_CMD_READ_STATUS); stat = in_8(base); } while (!(stat & SM_FLASH_STATUS_DONE)); if (!(stat & SM_FLASH_STATUS_DONE)) break; } out_8(base, SM_FLASH_CMD_CLEAR_STATUS); out_8(base, SM_FLASH_CMD_RESET); if (memcmp(base, datas, NVRAM_SIZE)) { printk(KERN_ERR "nvram: Sharp/Micron flash write failed !\n"); return -ENXIO; } return 0; } static int amd_erase_bank(int bank) { int stat = 0; unsigned long timeout; u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; DBG("nvram: AMD Erasing bank %d...\n", bank); /* Unlock 1 */ out_8(base+0x555, 0xaa); udelay(1); /* Unlock 2 */ out_8(base+0x2aa, 0x55); udelay(1); /* Sector-Erase */ out_8(base+0x555, 0x80); udelay(1); out_8(base+0x555, 0xaa); udelay(1); out_8(base+0x2aa, 0x55); udelay(1); out_8(base, 0x30); udelay(1); timeout = 0; do { if (++timeout > 1000000) { printk(KERN_ERR "nvram: AMD flash erase timeout !\n"); break; } stat = in_8(base) ^ in_8(base); } while (stat != 0); /* Reset */ out_8(base, 0xf0); udelay(1); if (memchr_inv(base, 0xff, NVRAM_SIZE)) { printk(KERN_ERR "nvram: AMD flash erase failed !\n"); return -ENXIO; } return 0; } static int amd_write_bank(int bank, u8* datas) { int i, stat = 0; unsigned long timeout; u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; DBG("nvram: AMD Writing bank %d...\n", bank); for (i=0; i<NVRAM_SIZE; i++) { /* Unlock 1 */ out_8(base+0x555, 0xaa); udelay(1); /* Unlock 2 */ out_8(base+0x2aa, 0x55); udelay(1); /* Write single word */ out_8(base+0x555, 0xa0); udelay(1); out_8(base+i, datas[i]); timeout = 0; do { if (++timeout > 1000000) { printk(KERN_ERR "nvram: AMD flash write timeout !\n"); break; } stat = in_8(base) ^ in_8(base); } while (stat != 0); if (stat != 0) break; } /* Reset */ out_8(base, 0xf0); udelay(1); if (memcmp(base, datas, NVRAM_SIZE)) { printk(KERN_ERR "nvram: AMD flash write failed !\n"); return -ENXIO; } return 0; } static void __init lookup_partitions(void) { u8 buffer[17]; int i, offset; struct chrp_header* hdr; if (pmac_newworld) { nvram_partitions[pmac_nvram_OF] = -1; nvram_partitions[pmac_nvram_XPRAM] = -1; nvram_partitions[pmac_nvram_NR] = -1; hdr = (struct chrp_header *)buffer; offset = 0; buffer[16] = 0; do { for (i=0;i<16;i++) buffer[i] = ppc_md.nvram_read_val(offset+i); if (!strcmp(hdr->name, "common")) nvram_partitions[pmac_nvram_OF] = offset + 0x10; if (!strcmp(hdr->name, "APL,MacOS75")) { nvram_partitions[pmac_nvram_XPRAM] = offset + 0x10; nvram_partitions[pmac_nvram_NR] = offset + 0x110; } offset += (hdr->len * 0x10); } while(offset < NVRAM_SIZE); } else { nvram_partitions[pmac_nvram_OF] = 0x1800; nvram_partitions[pmac_nvram_XPRAM] = 0x1300; nvram_partitions[pmac_nvram_NR] = 0x1400; } DBG("nvram: OF partition at 0x%x\n", nvram_partitions[pmac_nvram_OF]); DBG("nvram: XP partition at 0x%x\n", nvram_partitions[pmac_nvram_XPRAM]); DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]); } static void core99_nvram_sync(void) { struct core99_header* hdr99; unsigned long flags; if (!is_core_99 || !nvram_data || !nvram_image) return; raw_spin_lock_irqsave(&nv_lock, flags); if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE, NVRAM_SIZE)) goto bail; DBG("Updating nvram...\n"); hdr99 = (struct core99_header*)nvram_image; hdr99->generation++; hdr99->hdr.signature = CORE99_SIGNATURE; hdr99->hdr.cksum = chrp_checksum(&hdr99->hdr); hdr99->adler = core99_calc_adler(nvram_image); core99_bank = core99_bank ? 0 : 1; if (core99_erase_bank) if (core99_erase_bank(core99_bank)) { printk("nvram: Error erasing bank %d\n", core99_bank); goto bail; } if (core99_write_bank) if (core99_write_bank(core99_bank, nvram_image)) printk("nvram: Error writing bank %d\n", core99_bank); bail: raw_spin_unlock_irqrestore(&nv_lock, flags); #ifdef DEBUG mdelay(2000); #endif } static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr) { int i; u32 gen_bank0, gen_bank1; if (nvram_naddrs < 1) { printk(KERN_ERR "nvram: no address\n"); return -EINVAL; } nvram_image = alloc_bootmem(NVRAM_SIZE); if (nvram_image == NULL) { printk(KERN_ERR "nvram: can't allocate ram image\n"); return -ENOMEM; } nvram_data = ioremap(addr, NVRAM_SIZE*2); nvram_naddrs = 1; /* Make sure we get the correct case */ DBG("nvram: Checking bank 0...\n"); gen_bank0 = core99_check((u8 *)nvram_data); gen_bank1 = core99_check((u8 *)nvram_data + NVRAM_SIZE); core99_bank = (gen_bank0 < gen_bank1) ? 1 : 0; DBG("nvram: gen0=%d, gen1=%d\n", gen_bank0, gen_bank1); DBG("nvram: Active bank is: %d\n", core99_bank); for (i=0; i<NVRAM_SIZE; i++) nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE]; ppc_md.nvram_read_val = core99_nvram_read_byte; ppc_md.nvram_write_val = core99_nvram_write_byte; ppc_md.nvram_read = core99_nvram_read; ppc_md.nvram_write = core99_nvram_write; ppc_md.nvram_size = core99_nvram_size; ppc_md.nvram_sync = core99_nvram_sync; ppc_md.machine_shutdown = core99_nvram_sync; /* * Maybe we could be smarter here though making an exclusive list * of known flash chips is a bit nasty as older OF didn't provide us * with a useful "compatible" entry. A solution would be to really * identify the chip using flash id commands and base ourselves on * a list of known chips IDs */ if (of_device_is_compatible(dp, "amd-0137")) { core99_erase_bank = amd_erase_bank; core99_write_bank = amd_write_bank; } else { core99_erase_bank = sm_erase_bank; core99_write_bank = sm_write_bank; } return 0; } int __init pmac_nvram_init(void) { struct device_node *dp; struct resource r1, r2; unsigned int s1 = 0, s2 = 0; int err = 0; nvram_naddrs = 0; dp = of_find_node_by_name(NULL, "nvram"); if (dp == NULL) { printk(KERN_ERR "Can't find NVRAM device\n"); return -ENODEV; } /* Try to obtain an address */ if (of_address_to_resource(dp, 0, &r1) == 0) { nvram_naddrs = 1; s1 = resource_size(&r1); if (of_address_to_resource(dp, 1, &r2) == 0) { nvram_naddrs = 2; s2 = resource_size(&r2); } } is_core_99 = of_device_is_compatible(dp, "nvram,flash"); if (is_core_99) { err = core99_nvram_setup(dp, r1.start); goto bail; } #ifdef CONFIG_PPC32 if (machine_is(chrp) && nvram_naddrs == 1) { nvram_data = ioremap(r1.start, s1); nvram_mult = 1; ppc_md.nvram_read_val = direct_nvram_read_byte; ppc_md.nvram_write_val = direct_nvram_write_byte; } else if (nvram_naddrs == 1) { nvram_data = ioremap(r1.start, s1); nvram_mult = (s1 + NVRAM_SIZE - 1) / NVRAM_SIZE; ppc_md.nvram_read_val = direct_nvram_read_byte; ppc_md.nvram_write_val = direct_nvram_write_byte; } else if (nvram_naddrs == 2) { nvram_addr = ioremap(r1.start, s1); nvram_data = ioremap(r2.start, s2); ppc_md.nvram_read_val = indirect_nvram_read_byte; ppc_md.nvram_write_val = indirect_nvram_write_byte; } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) { #ifdef CONFIG_ADB_PMU nvram_naddrs = -1; ppc_md.nvram_read_val = pmu_nvram_read_byte; ppc_md.nvram_write_val = pmu_nvram_write_byte; #endif /* CONFIG_ADB_PMU */ } else { printk(KERN_ERR "Incompatible type of NVRAM\n"); err = -ENXIO; } #endif /* CONFIG_PPC32 */ bail: of_node_put(dp); if (err == 0) lookup_partitions(); return err; } int pmac_get_partition(int partition) { return nvram_partitions[partition]; } u8 pmac_xpram_read(int xpaddr) { int offset = pmac_get_partition(pmac_nvram_XPRAM); if (offset < 0 || xpaddr < 0 || xpaddr > 0x100) return 0xff; return ppc_md.nvram_read_val(xpaddr + offset); } void pmac_xpram_write(int xpaddr, u8 data) { int offset = pmac_get_partition(pmac_nvram_XPRAM); if (offset < 0 || xpaddr < 0 || xpaddr > 0x100) return; ppc_md.nvram_write_val(xpaddr + offset, data); } EXPORT_SYMBOL(pmac_get_partition); EXPORT_SYMBOL(pmac_xpram_read); EXPORT_SYMBOL(pmac_xpram_write);
gpl-2.0
rettigs/linux-yocto-3.14
net/wireless/lib80211.c
8963
7256
/* * lib80211 -- common bits for IEEE802.11 drivers * * Copyright(c) 2008 John W. Linville <linville@tuxdriver.com> * * Portions copied from old ieee80211 component, w/ original copyright * notices below: * * Host AP crypto routines * * Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi> * Portions Copyright (C) 2004, Intel Corporation <jketreno@linux.intel.com> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/ctype.h> #include <linux/ieee80211.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/string.h> #include <net/lib80211.h> #define DRV_NAME "lib80211" #define DRV_DESCRIPTION "common routines for IEEE802.11 drivers" MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>"); MODULE_LICENSE("GPL"); struct lib80211_crypto_alg { struct list_head list; struct lib80211_crypto_ops *ops; }; static LIST_HEAD(lib80211_crypto_algs); static DEFINE_SPINLOCK(lib80211_crypto_lock); static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force); static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info); static void lib80211_crypt_deinit_handler(unsigned long data); const char *print_ssid(char *buf, const char *ssid, u8 ssid_len) { const char *s = ssid; char *d = buf; ssid_len = min_t(u8, ssid_len, IEEE80211_MAX_SSID_LEN); while (ssid_len--) { if (isprint(*s)) { *d++ = *s++; continue; } *d++ = '\\'; if (*s == '\0') *d++ = '0'; else if (*s == '\n') *d++ = 'n'; else if (*s == '\r') *d++ = 'r'; else if (*s == '\t') *d++ = 't'; else if (*s == '\\') *d++ = '\\'; else d += snprintf(d, 3, "%03o", *s); s++; } *d = '\0'; return buf; } EXPORT_SYMBOL(print_ssid); int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name, spinlock_t *lock) { memset(info, 0, sizeof(*info)); info->name = name; info->lock = lock; INIT_LIST_HEAD(&info->crypt_deinit_list); setup_timer(&info->crypt_deinit_timer, lib80211_crypt_deinit_handler, (unsigned long)info); return 0; } EXPORT_SYMBOL(lib80211_crypt_info_init); void lib80211_crypt_info_free(struct lib80211_crypt_info *info) { int i; lib80211_crypt_quiescing(info); del_timer_sync(&info->crypt_deinit_timer); lib80211_crypt_deinit_entries(info, 1); for (i = 0; i < NUM_WEP_KEYS; i++) { struct lib80211_crypt_data *crypt = info->crypt[i]; if (crypt) { if (crypt->ops) { crypt->ops->deinit(crypt->priv); module_put(crypt->ops->owner); } kfree(crypt); info->crypt[i] = NULL; } } } EXPORT_SYMBOL(lib80211_crypt_info_free); static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force) { struct lib80211_crypt_data *entry, *next; unsigned long flags; spin_lock_irqsave(info->lock, flags); list_for_each_entry_safe(entry, next, &info->crypt_deinit_list, list) { if (atomic_read(&entry->refcnt) != 0 && !force) continue; list_del(&entry->list); if (entry->ops) { entry->ops->deinit(entry->priv); module_put(entry->ops->owner); } kfree(entry); } spin_unlock_irqrestore(info->lock, flags); } /* After this, crypt_deinit_list won't accept new members */ static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info) { unsigned long flags; spin_lock_irqsave(info->lock, flags); info->crypt_quiesced = 1; spin_unlock_irqrestore(info->lock, flags); } static void lib80211_crypt_deinit_handler(unsigned long data) { struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data; unsigned long flags; lib80211_crypt_deinit_entries(info, 0); spin_lock_irqsave(info->lock, flags); if (!list_empty(&info->crypt_deinit_list) && !info->crypt_quiesced) { printk(KERN_DEBUG "%s: entries remaining in delayed crypt " "deletion list\n", info->name); info->crypt_deinit_timer.expires = jiffies + HZ; add_timer(&info->crypt_deinit_timer); } spin_unlock_irqrestore(info->lock, flags); } void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info, struct lib80211_crypt_data **crypt) { struct lib80211_crypt_data *tmp; unsigned long flags; if (*crypt == NULL) return; tmp = *crypt; *crypt = NULL; /* must not run ops->deinit() while there may be pending encrypt or * decrypt operations. Use a list of delayed deinits to avoid needing * locking. */ spin_lock_irqsave(info->lock, flags); if (!info->crypt_quiesced) { list_add(&tmp->list, &info->crypt_deinit_list); if (!timer_pending(&info->crypt_deinit_timer)) { info->crypt_deinit_timer.expires = jiffies + HZ; add_timer(&info->crypt_deinit_timer); } } spin_unlock_irqrestore(info->lock, flags); } EXPORT_SYMBOL(lib80211_crypt_delayed_deinit); int lib80211_register_crypto_ops(struct lib80211_crypto_ops *ops) { unsigned long flags; struct lib80211_crypto_alg *alg; alg = kzalloc(sizeof(*alg), GFP_KERNEL); if (alg == NULL) return -ENOMEM; alg->ops = ops; spin_lock_irqsave(&lib80211_crypto_lock, flags); list_add(&alg->list, &lib80211_crypto_algs); spin_unlock_irqrestore(&lib80211_crypto_lock, flags); printk(KERN_DEBUG "lib80211_crypt: registered algorithm '%s'\n", ops->name); return 0; } EXPORT_SYMBOL(lib80211_register_crypto_ops); int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops) { struct lib80211_crypto_alg *alg; unsigned long flags; spin_lock_irqsave(&lib80211_crypto_lock, flags); list_for_each_entry(alg, &lib80211_crypto_algs, list) { if (alg->ops == ops) goto found; } spin_unlock_irqrestore(&lib80211_crypto_lock, flags); return -EINVAL; found: printk(KERN_DEBUG "lib80211_crypt: unregistered algorithm '%s'\n", ops->name); list_del(&alg->list); spin_unlock_irqrestore(&lib80211_crypto_lock, flags); kfree(alg); return 0; } EXPORT_SYMBOL(lib80211_unregister_crypto_ops); struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name) { struct lib80211_crypto_alg *alg; unsigned long flags; spin_lock_irqsave(&lib80211_crypto_lock, flags); list_for_each_entry(alg, &lib80211_crypto_algs, list) { if (strcmp(alg->ops->name, name) == 0) goto found; } spin_unlock_irqrestore(&lib80211_crypto_lock, flags); return NULL; found: spin_unlock_irqrestore(&lib80211_crypto_lock, flags); return alg->ops; } EXPORT_SYMBOL(lib80211_get_crypto_ops); static void *lib80211_crypt_null_init(int keyidx) { return (void *)1; } static void lib80211_crypt_null_deinit(void *priv) { } static struct lib80211_crypto_ops lib80211_crypt_null = { .name = "NULL", .init = lib80211_crypt_null_init, .deinit = lib80211_crypt_null_deinit, .owner = THIS_MODULE, }; static int __init lib80211_init(void) { pr_info(DRV_DESCRIPTION "\n"); return lib80211_register_crypto_ops(&lib80211_crypt_null); } static void __exit lib80211_exit(void) { lib80211_unregister_crypto_ops(&lib80211_crypt_null); BUG_ON(!list_empty(&lib80211_crypto_algs)); } module_init(lib80211_init); module_exit(lib80211_exit);
gpl-2.0
Savaged-Zen/Savaged-Zen-Speedy
drivers/auxdisplay/cfag12864bfb.c
9219
4664
/* * Filename: cfag12864bfb.c * Version: 0.1.0 * Description: cfag12864b LCD framebuffer driver * License: GPLv2 * Depends: cfag12864b * * Author: Copyright (C) Miguel Ojeda Sandonis * Date: 2006-10-31 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/fb.h> #include <linux/mm.h> #include <linux/platform_device.h> #include <linux/string.h> #include <linux/uaccess.h> #include <linux/cfag12864b.h> #define CFAG12864BFB_NAME "cfag12864bfb" static struct fb_fix_screeninfo cfag12864bfb_fix __devinitdata = { .id = "cfag12864b", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_MONO10, .xpanstep = 0, .ypanstep = 0, .ywrapstep = 0, .line_length = CFAG12864B_WIDTH / 8, .accel = FB_ACCEL_NONE, }; static struct fb_var_screeninfo cfag12864bfb_var __devinitdata = { .xres = CFAG12864B_WIDTH, .yres = CFAG12864B_HEIGHT, .xres_virtual = CFAG12864B_WIDTH, .yres_virtual = CFAG12864B_HEIGHT, .bits_per_pixel = 1, .red = { 0, 1, 0 }, .green = { 0, 1, 0 }, .blue = { 0, 1, 0 }, .left_margin = 0, .right_margin = 0, .upper_margin = 0, .lower_margin = 0, .vmode = FB_VMODE_NONINTERLACED, }; static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { return vm_insert_page(vma, vma->vm_start, virt_to_page(cfag12864b_buffer)); } static struct fb_ops cfag12864bfb_ops = { .owner = THIS_MODULE, .fb_read = fb_sys_read, .fb_write = fb_sys_write, .fb_fillrect = sys_fillrect, .fb_copyarea = sys_copyarea, .fb_imageblit = sys_imageblit, .fb_mmap = cfag12864bfb_mmap, }; static int __devinit cfag12864bfb_probe(struct platform_device *device) { int ret = -EINVAL; struct fb_info *info = framebuffer_alloc(0, &device->dev); if (!info) goto none; info->screen_base = (char __iomem *) cfag12864b_buffer; info->screen_size = CFAG12864B_SIZE; info->fbops = &cfag12864bfb_ops; info->fix = cfag12864bfb_fix; info->var = cfag12864bfb_var; info->pseudo_palette = NULL; info->par = NULL; info->flags = FBINFO_FLAG_DEFAULT; if (register_framebuffer(info) < 0) goto fballoced; platform_set_drvdata(device, info); printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id); return 0; fballoced: framebuffer_release(info); none: return ret; } static int __devexit cfag12864bfb_remove(struct platform_device *device) { struct fb_info *info = platform_get_drvdata(device); if (info) { unregister_framebuffer(info); framebuffer_release(info); } return 0; } static struct platform_driver cfag12864bfb_driver = { .probe = cfag12864bfb_probe, .remove = __devexit_p(cfag12864bfb_remove), .driver = { .name = CFAG12864BFB_NAME, }, }; static struct platform_device *cfag12864bfb_device; static int __init cfag12864bfb_init(void) { int ret = -EINVAL; /* cfag12864b_init() must be called first */ if (!cfag12864b_isinited()) { printk(KERN_ERR CFAG12864BFB_NAME ": ERROR: " "cfag12864b is not initialized\n"); goto none; } if (cfag12864b_enable()) { printk(KERN_ERR CFAG12864BFB_NAME ": ERROR: " "can't enable cfag12864b refreshing (being used)\n"); return -ENODEV; } ret = platform_driver_register(&cfag12864bfb_driver); if (!ret) { cfag12864bfb_device = platform_device_alloc(CFAG12864BFB_NAME, 0); if (cfag12864bfb_device) ret = platform_device_add(cfag12864bfb_device); else ret = -ENOMEM; if (ret) { platform_device_put(cfag12864bfb_device); platform_driver_unregister(&cfag12864bfb_driver); } } none: return ret; } static void __exit cfag12864bfb_exit(void) { platform_device_unregister(cfag12864bfb_device); platform_driver_unregister(&cfag12864bfb_driver); cfag12864b_disable(); } module_init(cfag12864bfb_init); module_exit(cfag12864bfb_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>"); MODULE_DESCRIPTION("cfag12864b LCD framebuffer driver");
gpl-2.0
ItachiSan/android_kernel_msm_caf
arch/powerpc/kernel/vecemu.c
11779
8612
/* * Routines to emulate some Altivec/VMX instructions, specifically * those that can trap when given denormalized operands in Java mode. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/sched.h> #include <asm/ptrace.h> #include <asm/processor.h> #include <asm/uaccess.h> /* Functions in vector.S */ extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b); extern void vsubfp(vector128 *dst, vector128 *a, vector128 *b); extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c); extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c); extern void vrefp(vector128 *dst, vector128 *src); extern void vrsqrtefp(vector128 *dst, vector128 *src); extern void vexptep(vector128 *dst, vector128 *src); static unsigned int exp2s[8] = { 0x800000, 0x8b95c2, 0x9837f0, 0xa5fed7, 0xb504f3, 0xc5672a, 0xd744fd, 0xeac0c7 }; /* * Computes an estimate of 2^x. The `s' argument is the 32-bit * single-precision floating-point representation of x. */ static unsigned int eexp2(unsigned int s) { int exp, pwr; unsigned int mant, frac; /* extract exponent field from input */ exp = ((s >> 23) & 0xff) - 127; if (exp > 7) { /* check for NaN input */ if (exp == 128 && (s & 0x7fffff) != 0) return s | 0x400000; /* return QNaN */ /* 2^-big = 0, 2^+big = +Inf */ return (s & 0x80000000)? 0: 0x7f800000; /* 0 or +Inf */ } if (exp < -23) return 0x3f800000; /* 1.0 */ /* convert to fixed point integer in 9.23 representation */ pwr = (s & 0x7fffff) | 0x800000; if (exp > 0) pwr <<= exp; else pwr >>= -exp; if (s & 0x80000000) pwr = -pwr; /* extract integer part, which becomes exponent part of result */ exp = (pwr >> 23) + 126; if (exp >= 254) return 0x7f800000; if (exp < -23) return 0; /* table lookup on top 3 bits of fraction to get mantissa */ mant = exp2s[(pwr >> 20) & 7]; /* linear interpolation using remaining 20 bits of fraction */ asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (pwr << 12), "r" (0x172b83ff)); asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (frac), "r" (mant)); mant += frac; if (exp >= 0) return mant + (exp << 23); /* denormalized result */ exp = -exp; mant += 1 << (exp - 1); return mant >> exp; } /* * Computes an estimate of log_2(x). The `s' argument is the 32-bit * single-precision floating-point representation of x. */ static unsigned int elog2(unsigned int s) { int exp, mant, lz, frac; exp = s & 0x7f800000; mant = s & 0x7fffff; if (exp == 0x7f800000) { /* Inf or NaN */ if (mant != 0) s |= 0x400000; /* turn NaN into QNaN */ return s; } if ((exp | mant) == 0) /* +0 or -0 */ return 0xff800000; /* return -Inf */ if (exp == 0) { /* denormalized */ asm("cntlzw %0,%1" : "=r" (lz) : "r" (mant)); mant <<= lz - 8; exp = (-118 - lz) << 23; } else { mant |= 0x800000; exp -= 127 << 23; } if (mant >= 0xb504f3) { /* 2^0.5 * 2^23 */ exp |= 0x400000; /* 0.5 * 2^23 */ asm("mulhwu %0,%1,%2" : "=r" (mant) : "r" (mant), "r" (0xb504f334)); /* 2^-0.5 * 2^32 */ } if (mant >= 0x9837f0) { /* 2^0.25 * 2^23 */ exp |= 0x200000; /* 0.25 * 2^23 */ asm("mulhwu %0,%1,%2" : "=r" (mant) : "r" (mant), "r" (0xd744fccb)); /* 2^-0.25 * 2^32 */ } if (mant >= 0x8b95c2) { /* 2^0.125 * 2^23 */ exp |= 0x100000; /* 0.125 * 2^23 */ asm("mulhwu %0,%1,%2" : "=r" (mant) : "r" (mant), "r" (0xeac0c6e8)); /* 2^-0.125 * 2^32 */ } if (mant > 0x800000) { /* 1.0 * 2^23 */ /* calculate (mant - 1) * 1.381097463 */ /* 1.381097463 == 0.125 / (2^0.125 - 1) */ asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" ((mant - 0x800000) << 1), "r" (0xb0c7cd3a)); exp += frac; } s = exp & 0x80000000; if (exp != 0) { if (s) exp = -exp; asm("cntlzw %0,%1" : "=r" (lz) : "r" (exp)); lz = 8 - lz; if (lz > 0) exp >>= lz; else if (lz < 0) exp <<= -lz; s += ((lz + 126) << 23) + exp; } return s; } #define VSCR_SAT 1 static int ctsxs(unsigned int x, int scale, unsigned int *vscrp) { int exp, mant; exp = (x >> 23) & 0xff; mant = x & 0x7fffff; if (exp == 255 && mant != 0) return 0; /* NaN -> 0 */ exp = exp - 127 + scale; if (exp < 0) return 0; /* round towards zero */ if (exp >= 31) { /* saturate, unless the result would be -2^31 */ if (x + (scale << 23) != 0xcf000000) *vscrp |= VSCR_SAT; return (x & 0x80000000)? 0x80000000: 0x7fffffff; } mant |= 0x800000; mant = (mant << 7) >> (30 - exp); return (x & 0x80000000)? -mant: mant; } static unsigned int ctuxs(unsigned int x, int scale, unsigned int *vscrp) { int exp; unsigned int mant; exp = (x >> 23) & 0xff; mant = x & 0x7fffff; if (exp == 255 && mant != 0) return 0; /* NaN -> 0 */ exp = exp - 127 + scale; if (exp < 0) return 0; /* round towards zero */ if (x & 0x80000000) { /* negative => saturate to 0 */ *vscrp |= VSCR_SAT; return 0; } if (exp >= 32) { /* saturate */ *vscrp |= VSCR_SAT; return 0xffffffff; } mant |= 0x800000; mant = (mant << 8) >> (31 - exp); return mant; } /* Round to floating integer, towards 0 */ static unsigned int rfiz(unsigned int x) { int exp; exp = ((x >> 23) & 0xff) - 127; if (exp == 128 && (x & 0x7fffff) != 0) return x | 0x400000; /* NaN -> make it a QNaN */ if (exp >= 23) return x; /* it's an integer already (or Inf) */ if (exp < 0) return x & 0x80000000; /* |x| < 1.0 rounds to 0 */ return x & ~(0x7fffff >> exp); } /* Round to floating integer, towards +/- Inf */ static unsigned int rfii(unsigned int x) { int exp, mask; exp = ((x >> 23) & 0xff) - 127; if (exp == 128 && (x & 0x7fffff) != 0) return x | 0x400000; /* NaN -> make it a QNaN */ if (exp >= 23) return x; /* it's an integer already (or Inf) */ if ((x & 0x7fffffff) == 0) return x; /* +/-0 -> +/-0 */ if (exp < 0) /* 0 < |x| < 1.0 rounds to +/- 1.0 */ return (x & 0x80000000) | 0x3f800000; mask = 0x7fffff >> exp; /* mantissa overflows into exponent - that's OK, it can't overflow into the sign bit */ return (x + mask) & ~mask; } /* Round to floating integer, to nearest */ static unsigned int rfin(unsigned int x) { int exp, half; exp = ((x >> 23) & 0xff) - 127; if (exp == 128 && (x & 0x7fffff) != 0) return x | 0x400000; /* NaN -> make it a QNaN */ if (exp >= 23) return x; /* it's an integer already (or Inf) */ if (exp < -1) return x & 0x80000000; /* |x| < 0.5 -> +/-0 */ if (exp == -1) /* 0.5 <= |x| < 1.0 rounds to +/- 1.0 */ return (x & 0x80000000) | 0x3f800000; half = 0x400000 >> exp; /* add 0.5 to the magnitude and chop off the fraction bits */ return (x + half) & ~(0x7fffff >> exp); } int emulate_altivec(struct pt_regs *regs) { unsigned int instr, i; unsigned int va, vb, vc, vd; vector128 *vrs; if (get_user(instr, (unsigned int __user *) regs->nip)) return -EFAULT; if ((instr >> 26) != 4) return -EINVAL; /* not an altivec instruction */ vd = (instr >> 21) & 0x1f; va = (instr >> 16) & 0x1f; vb = (instr >> 11) & 0x1f; vc = (instr >> 6) & 0x1f; vrs = current->thread.vr; switch (instr & 0x3f) { case 10: switch (vc) { case 0: /* vaddfp */ vaddfp(&vrs[vd], &vrs[va], &vrs[vb]); break; case 1: /* vsubfp */ vsubfp(&vrs[vd], &vrs[va], &vrs[vb]); break; case 4: /* vrefp */ vrefp(&vrs[vd], &vrs[vb]); break; case 5: /* vrsqrtefp */ vrsqrtefp(&vrs[vd], &vrs[vb]); break; case 6: /* vexptefp */ for (i = 0; i < 4; ++i) vrs[vd].u[i] = eexp2(vrs[vb].u[i]); break; case 7: /* vlogefp */ for (i = 0; i < 4; ++i) vrs[vd].u[i] = elog2(vrs[vb].u[i]); break; case 8: /* vrfin */ for (i = 0; i < 4; ++i) vrs[vd].u[i] = rfin(vrs[vb].u[i]); break; case 9: /* vrfiz */ for (i = 0; i < 4; ++i) vrs[vd].u[i] = rfiz(vrs[vb].u[i]); break; case 10: /* vrfip */ for (i = 0; i < 4; ++i) { u32 x = vrs[vb].u[i]; x = (x & 0x80000000)? rfiz(x): rfii(x); vrs[vd].u[i] = x; } break; case 11: /* vrfim */ for (i = 0; i < 4; ++i) { u32 x = vrs[vb].u[i]; x = (x & 0x80000000)? rfii(x): rfiz(x); vrs[vd].u[i] = x; } break; case 14: /* vctuxs */ for (i = 0; i < 4; ++i) vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va, &current->thread.vscr.u[3]); break; case 15: /* vctsxs */ for (i = 0; i < 4; ++i) vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va, &current->thread.vscr.u[3]); break; default: return -EINVAL; } break; case 46: /* vmaddfp */ vmaddfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]); break; case 47: /* vnmsubfp */ vnmsubfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]); break; default: return -EINVAL; } return 0; }
gpl-2.0
TheWolfer22/android_kernel_lge_g3
drivers/media/video/sn9c102/sn9c102_pas106b.c
12803
8105
/*************************************************************************** * Plug-in for PAS106B image sensor connected to the SN9C1xx PC Camera * * Controllers * * * * Copyright (C) 2004-2007 by Luca Risolia <luca.risolia@studio.unibo.it> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ***************************************************************************/ #include <linux/delay.h> #include "sn9c102_sensor.h" #include "sn9c102_devtable.h" static int pas106b_init(struct sn9c102_device* cam) { int err = 0; err = sn9c102_write_const_regs(cam, {0x00, 0x10}, {0x00, 0x11}, {0x00, 0x14}, {0x20, 0x17}, {0x20, 0x19}, {0x09, 0x18}); err += sn9c102_i2c_write(cam, 0x02, 0x0c); err += sn9c102_i2c_write(cam, 0x05, 0x5a); err += sn9c102_i2c_write(cam, 0x06, 0x88); err += sn9c102_i2c_write(cam, 0x07, 0x80); err += sn9c102_i2c_write(cam, 0x10, 0x06); err += sn9c102_i2c_write(cam, 0x11, 0x06); err += sn9c102_i2c_write(cam, 0x12, 0x00); err += sn9c102_i2c_write(cam, 0x14, 0x02); err += sn9c102_i2c_write(cam, 0x13, 0x01); msleep(400); return err; } static int pas106b_get_ctrl(struct sn9c102_device* cam, struct v4l2_control* ctrl) { switch (ctrl->id) { case V4L2_CID_EXPOSURE: { int r1 = sn9c102_i2c_read(cam, 0x03), r2 = sn9c102_i2c_read(cam, 0x04); if (r1 < 0 || r2 < 0) return -EIO; ctrl->value = (r1 << 4) | (r2 & 0x0f); } return 0; case V4L2_CID_RED_BALANCE: if ((ctrl->value = sn9c102_i2c_read(cam, 0x0c)) < 0) return -EIO; ctrl->value &= 0x1f; return 0; case V4L2_CID_BLUE_BALANCE: if ((ctrl->value = sn9c102_i2c_read(cam, 0x09)) < 0) return -EIO; ctrl->value &= 0x1f; return 0; case V4L2_CID_GAIN: if ((ctrl->value = sn9c102_i2c_read(cam, 0x0e)) < 0) return -EIO; ctrl->value &= 0x1f; return 0; case V4L2_CID_CONTRAST: if ((ctrl->value = sn9c102_i2c_read(cam, 0x0f)) < 0) return -EIO; ctrl->value &= 0x07; return 0; case SN9C102_V4L2_CID_GREEN_BALANCE: if ((ctrl->value = sn9c102_i2c_read(cam, 0x0a)) < 0) return -EIO; ctrl->value = (ctrl->value & 0x1f) << 1; return 0; case SN9C102_V4L2_CID_DAC_MAGNITUDE: if ((ctrl->value = sn9c102_i2c_read(cam, 0x08)) < 0) return -EIO; ctrl->value &= 0xf8; return 0; default: return -EINVAL; } } static int pas106b_set_ctrl(struct sn9c102_device* cam, const struct v4l2_control* ctrl) { int err = 0; switch (ctrl->id) { case V4L2_CID_EXPOSURE: err += sn9c102_i2c_write(cam, 0x03, ctrl->value >> 4); err += sn9c102_i2c_write(cam, 0x04, ctrl->value & 0x0f); break; case V4L2_CID_RED_BALANCE: err += sn9c102_i2c_write(cam, 0x0c, ctrl->value); break; case V4L2_CID_BLUE_BALANCE: err += sn9c102_i2c_write(cam, 0x09, ctrl->value); break; case V4L2_CID_GAIN: err += sn9c102_i2c_write(cam, 0x0e, ctrl->value); break; case V4L2_CID_CONTRAST: err += sn9c102_i2c_write(cam, 0x0f, ctrl->value); break; case SN9C102_V4L2_CID_GREEN_BALANCE: err += sn9c102_i2c_write(cam, 0x0a, ctrl->value >> 1); err += sn9c102_i2c_write(cam, 0x0b, ctrl->value >> 1); break; case SN9C102_V4L2_CID_DAC_MAGNITUDE: err += sn9c102_i2c_write(cam, 0x08, ctrl->value << 3); break; default: return -EINVAL; } err += sn9c102_i2c_write(cam, 0x13, 0x01); return err ? -EIO : 0; } static int pas106b_set_crop(struct sn9c102_device* cam, const struct v4l2_rect* rect) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 4, v_start = (u8)(rect->top - s->cropcap.bounds.top) + 3; err += sn9c102_write_reg(cam, h_start, 0x12); err += sn9c102_write_reg(cam, v_start, 0x13); return err; } static int pas106b_set_pix_format(struct sn9c102_device* cam, const struct v4l2_pix_format* pix) { int err = 0; if (pix->pixelformat == V4L2_PIX_FMT_SN9C10X) err += sn9c102_write_reg(cam, 0x2c, 0x17); else err += sn9c102_write_reg(cam, 0x20, 0x17); return err; } static const struct sn9c102_sensor pas106b = { .name = "PAS106B", .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102, .sysfs_ops = SN9C102_I2C_READ | SN9C102_I2C_WRITE, .frequency = SN9C102_I2C_400KHZ | SN9C102_I2C_100KHZ, .interface = SN9C102_I2C_2WIRES, .i2c_slave_id = 0x40, .init = &pas106b_init, .qctrl = { { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "exposure", .minimum = 0x125, .maximum = 0xfff, .step = 0x001, .default_value = 0x140, .flags = 0, }, { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "global gain", .minimum = 0x00, .maximum = 0x1f, .step = 0x01, .default_value = 0x0d, .flags = 0, }, { .id = V4L2_CID_CONTRAST, .type = V4L2_CTRL_TYPE_INTEGER, .name = "contrast", .minimum = 0x00, .maximum = 0x07, .step = 0x01, .default_value = 0x00, /* 0x00~0x03 have same effect */ .flags = 0, }, { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "red balance", .minimum = 0x00, .maximum = 0x1f, .step = 0x01, .default_value = 0x04, .flags = 0, }, { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "blue balance", .minimum = 0x00, .maximum = 0x1f, .step = 0x01, .default_value = 0x06, .flags = 0, }, { .id = SN9C102_V4L2_CID_GREEN_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "green balance", .minimum = 0x00, .maximum = 0x3e, .step = 0x02, .default_value = 0x02, .flags = 0, }, { .id = SN9C102_V4L2_CID_DAC_MAGNITUDE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "DAC magnitude", .minimum = 0x00, .maximum = 0x1f, .step = 0x01, .default_value = 0x01, .flags = 0, }, }, .get_ctrl = &pas106b_get_ctrl, .set_ctrl = &pas106b_set_ctrl, .cropcap = { .bounds = { .left = 0, .top = 0, .width = 352, .height = 288, }, .defrect = { .left = 0, .top = 0, .width = 352, .height = 288, }, }, .set_crop = &pas106b_set_crop, .pix_format = { .width = 352, .height = 288, .pixelformat = V4L2_PIX_FMT_SBGGR8, .priv = 8, /* we use this field as 'bits per pixel' */ }, .set_pix_format = &pas106b_set_pix_format }; int sn9c102_probe_pas106b(struct sn9c102_device* cam) { int r0 = 0, r1 = 0; unsigned int pid = 0; /* Minimal initialization to enable the I2C communication NOTE: do NOT change the values! */ if (sn9c102_write_const_regs(cam, {0x01, 0x01}, /* sensor power down */ {0x00, 0x01}, /* sensor power on */ {0x28, 0x17})) /* sensor clock at 24 MHz */ return -EIO; r0 = sn9c102_i2c_try_read(cam, &pas106b, 0x00); r1 = sn9c102_i2c_try_read(cam, &pas106b, 0x01); if (r0 < 0 || r1 < 0) return -EIO; pid = (r0 << 11) | ((r1 & 0xf0) >> 4); if (pid != 0x007) return -ENODEV; sn9c102_attach_sensor(cam, &pas106b); return 0; }
gpl-2.0
edoko/Air_Kernel_for_GN
fs/afs/misc.c
13315
2353
/* miscellaneous bits * * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <rxrpc/packet.h> #include "internal.h" #include "afs_fs.h" /* * convert an AFS abort code to a Linux error number */ int afs_abort_to_error(u32 abort_code) { switch (abort_code) { case 13: return -EACCES; case 27: return -EFBIG; case 30: return -EROFS; case VSALVAGE: return -EIO; case VNOVNODE: return -ENOENT; case VNOVOL: return -ENOMEDIUM; case VVOLEXISTS: return -EEXIST; case VNOSERVICE: return -EIO; case VOFFLINE: return -ENOENT; case VONLINE: return -EEXIST; case VDISKFULL: return -ENOSPC; case VOVERQUOTA: return -EDQUOT; case VBUSY: return -EBUSY; case VMOVED: return -ENXIO; case 0x2f6df0a: return -EWOULDBLOCK; case 0x2f6df0c: return -EACCES; case 0x2f6df0f: return -EBUSY; case 0x2f6df10: return -EEXIST; case 0x2f6df11: return -EXDEV; case 0x2f6df13: return -ENOTDIR; case 0x2f6df14: return -EISDIR; case 0x2f6df15: return -EINVAL; case 0x2f6df1a: return -EFBIG; case 0x2f6df1b: return -ENOSPC; case 0x2f6df1d: return -EROFS; case 0x2f6df1e: return -EMLINK; case 0x2f6df20: return -EDOM; case 0x2f6df21: return -ERANGE; case 0x2f6df22: return -EDEADLK; case 0x2f6df23: return -ENAMETOOLONG; case 0x2f6df24: return -ENOLCK; case 0x2f6df26: return -ENOTEMPTY; case 0x2f6df78: return -EDQUOT; case RXKADINCONSISTENCY: return -EPROTO; case RXKADPACKETSHORT: return -EPROTO; case RXKADLEVELFAIL: return -EKEYREJECTED; case RXKADTICKETLEN: return -EKEYREJECTED; case RXKADOUTOFSEQUENCE: return -EPROTO; case RXKADNOAUTH: return -EKEYREJECTED; case RXKADBADKEY: return -EKEYREJECTED; case RXKADBADTICKET: return -EKEYREJECTED; case RXKADUNKNOWNKEY: return -EKEYREJECTED; case RXKADEXPIRED: return -EKEYEXPIRED; case RXKADSEALEDINCON: return -EKEYREJECTED; case RXKADDATALEN: return -EKEYREJECTED; case RXKADILLEGALLEVEL: return -EKEYREJECTED; default: return -EREMOTEIO; } }
gpl-2.0
MinimumLaw/uccu-kernel
arch/arm/plat-mxc/clock.c
4
13659
/* * Based on arch/arm/plat-omap/clock.c * * Copyright (C) 2004 - 2005 Nokia corporation * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> * Copyright 2007-2010 Freescale Semiconductor, Inc. * Copyright 2008 Juergen Beisert, kernel@pengutronix.de * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ /* #define DEBUG */ #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/semaphore.h> #include <linux/string.h> #include <mach/clock.h> #include <mach/hardware.h> #if (defined(CONFIG_ARCH_MX5) || defined(CONFIG_ARCH_MX37)) extern int dvfs_core_is_active; extern int lp_high_freq; extern int lp_med_freq; extern int low_bus_freq_mode; extern int high_bus_freq_mode; extern int set_high_bus_freq(int high_freq); extern int set_low_bus_freq(void); extern int low_freq_bus_used(void); #else int dvfs_core_is_active; #endif static LIST_HEAD(clocks); static DEFINE_MUTEX(clocks_mutex); static DEFINE_SPINLOCK(clockfw_lock); /*------------------------------------------------------------------------- * Standard clock functions defined in include/linux/clk.h *-------------------------------------------------------------------------*/ /* * All the code inside #ifndef CONFIG_COMMON_CLKDEV can be removed once all * MXC architectures have switched to using clkdev. */ #ifndef CONFIG_COMMON_CLKDEV /* * Retrieve a clock by name. * * Note that we first try to use device id on the bus * and clock name. If this fails, we try to use "<name>.<id>". If this fails, * we try to use clock name only. * The reference count to the clock's module owner ref count is incremented. */ struct clk *clk_get(struct device *dev, const char *id) { struct clk *p, *clk = ERR_PTR(-ENOENT); int idno; const char *str; if (id == NULL) return clk; if (dev == NULL || dev->bus != &platform_bus_type) idno = -1; else idno = to_platform_device(dev)->id; mutex_lock(&clocks_mutex); list_for_each_entry(p, &clocks, node) { if (p->id == idno && strcmp(id, p->name) == 0 && try_module_get(p->owner)) { clk = p; goto found; } } str = strrchr(id, '.'); if (str) { int cnt = str - id; str++; idno = simple_strtol(str, NULL, 10); list_for_each_entry(p, &clocks, node) { if (p->id == idno && strlen(p->name) == cnt && strncmp(id, p->name, cnt) == 0 && try_module_get(p->owner)) { clk = p; goto found; } } } list_for_each_entry(p, &clocks, node) { if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { clk = p; goto found; } } printk(KERN_WARNING "clk: Unable to get requested clock: %s\n", id); found: mutex_unlock(&clocks_mutex); return clk; } EXPORT_SYMBOL(clk_get); #endif static void __clk_disable(struct clk *clk) { if (clk == NULL || IS_ERR(clk) || !clk->usecount) return; if (!(--clk->usecount)) { __clk_disable(clk->parent); __clk_disable(clk->secondary); if (clk->disable) clk->disable(clk); } } static int __clk_enable(struct clk *clk) { if (clk == NULL || IS_ERR(clk)) return -EINVAL; if (clk->usecount++ == 0) { __clk_enable(clk->parent); __clk_enable(clk->secondary); if (clk->enable) clk->enable(clk); } return 0; } /* This function increments the reference count on the clock and enables the * clock if not already enabled. The parent clock tree is recursively enabled */ int clk_enable(struct clk *clk) { unsigned long flags; int ret = 0; if (clk == NULL || IS_ERR(clk)) return -EINVAL; if ((clk->flags & CPU_FREQ_TRIG_UPDATE) && (clk_get_usecount(clk) == 0)) { #if (defined(CONFIG_ARCH_MX5) || defined(CONFIG_ARCH_MX37)) if (low_freq_bus_used() && !low_bus_freq_mode) { if (!(clk->flags & (AHB_HIGH_SET_POINT | AHB_MED_SET_POINT))) set_low_bus_freq(); } else { if (!high_bus_freq_mode) /* Currently at ow or medium set point, * need to set to high setpoint */ set_high_bus_freq(0); else if (high_bus_freq_mode || low_bus_freq_mode) /* Currently at low or high set point, * need to set to medium setpoint */ set_high_bus_freq(0); } #endif } spin_lock_irqsave(&clockfw_lock, flags); ret = __clk_enable(clk); spin_unlock_irqrestore(&clockfw_lock, flags); return ret; } EXPORT_SYMBOL(clk_enable); /* This function decrements the reference count on the clock and disables * the clock when reference count is 0. The parent clock tree is * recursively disabled */ void clk_disable(struct clk *clk) { unsigned long flags; if (clk == NULL || IS_ERR(clk)) return; spin_lock_irqsave(&clockfw_lock, flags); __clk_disable(clk); spin_unlock_irqrestore(&clockfw_lock, flags); if ((clk->flags & CPU_FREQ_TRIG_UPDATE) && (clk_get_usecount(clk) == 0)) { #if (defined(CONFIG_ARCH_MX5) || defined(CONFIG_ARCH_MX37)) if (low_freq_bus_used() && !low_bus_freq_mode) set_low_bus_freq(); else if (!high_bus_freq_mode) set_high_bus_freq(0); #endif } } EXPORT_SYMBOL(clk_disable); /*! * @brief Function to get the usage count for the requested clock. * * This function returns the reference count for the clock. * * @param clk Handle to clock to disable. * * @return Returns the usage count for the requested clock. */ int clk_get_usecount(struct clk *clk) { if (clk == NULL || IS_ERR(clk)) return 0; return clk->usecount; } EXPORT_SYMBOL(clk_get_usecount); /* Retrieve the *current* clock rate. If the clock itself * does not provide a special calculation routine, ask * its parent and so on, until one is able to return * a valid clock rate */ unsigned long clk_get_rate(struct clk *clk) { if (clk == NULL || IS_ERR(clk)) return 0UL; if (clk->get_rate) return clk->get_rate(clk); return clk_get_rate(clk->parent); } EXPORT_SYMBOL(clk_get_rate); #ifndef CONFIG_COMMON_CLKDEV /* Decrement the clock's module reference count */ void clk_put(struct clk *clk) { if (clk && !IS_ERR(clk)) module_put(clk->owner); } EXPORT_SYMBOL(clk_put); #endif /* Round the requested clock rate to the nearest supported * rate that is less than or equal to the requested rate. * This is dependent on the clock's current parent. */ long clk_round_rate(struct clk *clk, unsigned long rate) { if (clk == NULL || IS_ERR(clk) || !clk->round_rate) return 0; return clk->round_rate(clk, rate); } EXPORT_SYMBOL(clk_round_rate); /* Set the clock to the requested clock rate. The rate must * match a supported rate exactly based on what clk_round_rate returns */ int clk_set_rate(struct clk *clk, unsigned long rate) { unsigned long flags; int ret = -EINVAL; if (clk == NULL || IS_ERR(clk) || clk->set_rate == NULL || rate == 0) return ret; spin_lock_irqsave(&clockfw_lock, flags); ret = clk->set_rate(clk, rate); spin_unlock_irqrestore(&clockfw_lock, flags); return ret; } EXPORT_SYMBOL(clk_set_rate); /* Set the clock's parent to another clock source */ int clk_set_parent(struct clk *clk, struct clk *parent) { unsigned long flags; int ret = -EINVAL; struct clk *prev_parent = clk->parent; if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent) || clk->set_parent == NULL) return ret; if (clk->usecount != 0) { clk_enable(parent); } spin_lock_irqsave(&clockfw_lock, flags); ret = clk->set_parent(clk, parent); if (ret == 0) clk->parent = parent; spin_unlock_irqrestore(&clockfw_lock, flags); if (clk->usecount != 0) { clk_disable(prev_parent); } return ret; } EXPORT_SYMBOL(clk_set_parent); /* Retrieve the clock's parent clock source */ struct clk *clk_get_parent(struct clk *clk) { struct clk *ret = NULL; if (clk == NULL || IS_ERR(clk)) return ret; return clk->parent; } EXPORT_SYMBOL(clk_get_parent); /* * Add a new clock to the clock tree. */ int clk_register(struct mxc_clk *clk) { if (clk == NULL || IS_ERR(clk)) return -EINVAL; mutex_lock(&clocks_mutex); list_add(&clk->node, &clocks); mutex_unlock(&clocks_mutex); return 0; } EXPORT_SYMBOL(clk_register); /* Remove a clock from the clock tree */ void clk_unregister(struct mxc_clk *clk) { if (clk == NULL || IS_ERR(clk)) return; mutex_lock(&clocks_mutex); list_del(&clk->node); mutex_unlock(&clocks_mutex); } EXPORT_SYMBOL(clk_unregister); #ifdef CONFIG_PROC_FS static void *mxc_proc_clocks_seq_start(struct seq_file *file, loff_t *index) { unsigned int i; unsigned int name_length; unsigned int longest_length = 0; struct mxc_clk *current_clock = 0; struct mxc_clk *clock; /* Examine the clock list. */ i = 0; list_for_each_entry(clock, &clocks, node) { if (i++ == *index) current_clock = clock; name_length = strlen(clock->name); if (name_length > longest_length) longest_length = name_length; } /* Check if we found the indicated clock. */ if (!current_clock) return NULL; /* Stash the length of the longest clock name for later use. */ file->private = (void *) longest_length; /* Return success. */ return current_clock; } static void *mxc_proc_clocks_seq_next(struct seq_file *file, void *data, loff_t *index) { struct mxc_clk *current_clock = (struct clk *) data; /* Check for nonsense. */ if (!current_clock) return NULL; /* Check if the current clock is the last. */ if (list_is_last(&current_clock->node, &clocks)) return NULL; /* Move to the next clock structure. */ current_clock = list_entry(current_clock->node.next, typeof(*current_clock), node); (*index)++; /* Return the new current clock. */ return current_clock; } static void mxc_proc_clocks_seq_stop(struct seq_file *file, void *data) { } static int mxc_proc_clocks_seq_show(struct seq_file *file, void *data) { int result; struct mxc_clk *clock = (struct mxc_clk *) data; struct clk *parent = clock->reg_clk->parent; unsigned int longest_length = (unsigned int) file->private; unsigned long range_divisor; const char *range_units; int rate = clk_get_rate(clock->reg_clk); if (rate >= 1000000) { range_divisor = 1000000; range_units = "MHz"; } else if (rate >= 1000) { range_divisor = 1000; range_units = "KHz"; } else { range_divisor = 1; range_units = "Hz"; } result = seq_printf(file, "%s-%-d%*s %*s %c%c%c%c%c%c %3d", clock->name, clock->reg_clk->id, longest_length - strlen(clock->name), "", longest_length + 2, "", (clock->reg_clk->flags & RATE_PROPAGATES) ? 'P' : '_', (clock->reg_clk->flags & ALWAYS_ENABLED) ? 'A' : '_', (clock->reg_clk->flags & RATE_FIXED) ? 'F' : '_', (clock->reg_clk->flags & CPU_FREQ_TRIG_UPDATE) ? 'T' : '_', (clock->reg_clk->flags & AHB_HIGH_SET_POINT) ? 'H' : '_', (clock->reg_clk->flags & AHB_MED_SET_POINT) ? 'M' : '_', clock->reg_clk->usecount); if (result) return result; result = seq_printf(file, " %10lu (%lu%s)\n", rate, rate / range_divisor, range_units); return result; } static const struct seq_operations mxc_proc_clocks_seq_ops = { .start = mxc_proc_clocks_seq_start, .next = mxc_proc_clocks_seq_next, .stop = mxc_proc_clocks_seq_stop, .show = mxc_proc_clocks_seq_show }; static int mxc_proc_clocks_open(struct inode *inode, struct file *file) { return seq_open(file, &mxc_proc_clocks_seq_ops); } static const struct file_operations mxc_proc_clocks_ops = { .open = mxc_proc_clocks_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init mxc_setup_proc_entry(void) { struct proc_dir_entry *res; res = create_proc_entry("cpu/clocks", 0, NULL); if (!res) { printk(KERN_ERR "Failed to create proc/cpu/clocks\n"); return -ENOMEM; } res->proc_fops = &mxc_proc_clocks_ops; return 0; } late_initcall(mxc_setup_proc_entry); #endif /* CONFIG_PROC_FS */ /* * Get the resulting clock rate from a PLL register value and the input * frequency. PLLs with this register layout can at least be found on * MX1, MX21, MX27 and MX31 * * mfi + mfn / (mfd + 1) * f = 2 * f_ref * -------------------- * pd + 1 */ unsigned long mxc_decode_pll(unsigned int reg_val, u32 freq) { long long ll; int mfn_abs; unsigned int mfi, mfn, mfd, pd; mfi = (reg_val >> 10) & 0xf; mfn = reg_val & 0x3ff; mfd = (reg_val >> 16) & 0x3ff; pd = (reg_val >> 26) & 0xf; mfi = mfi <= 5 ? 5 : mfi; mfn_abs = mfn; /* On all i.MXs except i.MX1 and i.MX21 mfn is a 10bit * 2's complements number */ if (!cpu_is_mx1() && !cpu_is_mx21() && mfn >= 0x200) mfn_abs = 0x400 - mfn; freq *= 2; freq /= pd + 1; ll = (unsigned long long)freq * mfn_abs; do_div(ll, mfd + 1); if (!cpu_is_mx1() && !cpu_is_mx21() && mfn >= 0x200) ll = -ll; ll = (freq * mfi) + ll; return ll; }
gpl-2.0
aelarabawy/NetworkManager
src/devices/wimax/nm-device-wimax.c
4
41310
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- */ /* NetworkManager -- Network link manager * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * Copyright (C) 2010 - 2011 Red Hat, Inc. * Copyright (C) 2009 Novell, Inc. */ #include "config.h" #include <stdlib.h> #include <string.h> #include <unistd.h> #include <sys/ioctl.h> #include <sys/socket.h> #include <glib/gi18n.h> #include "nm-wimax-util.h" #include <WiMaxAPI.h> #include <WiMaxAPIEx.h> #include "nm-device-wimax.h" #include "nm-wimax-util.h" #include "nm-logging.h" #include "nm-device-private.h" #include "NetworkManagerUtils.h" #include "nm-active-connection.h" #include "nm-dbus-manager.h" #include "nm-connection.h" #include "nm-platform.h" #include "nm-setting-connection.h" #include "nm-setting-wimax.h" #include "nm-utils.h" #include "nm-rfkill-manager.h" #include "iwmxsdk.h" #include "nm-enum-types.h" #include "nm-dbus-glib-types.h" static gboolean impl_device_get_nsp_list (NMDeviceWimax *device, GPtrArray **list, GError **error); #include "nm-device-wimax-glue.h" G_DEFINE_TYPE (NMDeviceWimax, nm_device_wimax, NM_TYPE_DEVICE) enum { PROP_0, PROP_NSPS, PROP_ACTIVE_NSP, PROP_CENTER_FREQ, PROP_RSSI, PROP_CINR, PROP_TX_POWER, PROP_BSID, LAST_PROP }; enum { NSP_ADDED, NSP_REMOVED, LAST_SIGNAL }; static guint signals[LAST_SIGNAL] = { 0 }; #define NM_DEVICE_WIMAX_GET_PRIVATE(o) (G_TYPE_INSTANCE_GET_PRIVATE ((o), \ NM_TYPE_DEVICE_WIMAX, \ NMDeviceWimaxPrivate)) typedef struct { gboolean disposed; struct wmxsdk *sdk; WIMAX_API_DEVICE_STATUS status; gboolean connect_failed; gboolean enabled; gboolean wimaxd_enabled; guint activation_timeout_id; /* Track whether stage1 (Prepare) is completed yet or not */ gboolean prepare_done; guint sdk_action_defer_id; guint link_timeout_id; guint poll_id; GSList *nsp_list; NMWimaxNsp *current_nsp; /* interesting stuff when connected */ guint center_freq; gint rssi; gint cinr; gint tx_power; char *bsid; } NMDeviceWimaxPrivate; /***********************************************************/ static gboolean impl_device_get_nsp_list (NMDeviceWimax *self, GPtrArray **nsps, GError **error) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); GSList *iter; *nsps = g_ptr_array_sized_new (4); for (iter = priv->nsp_list; iter; iter = iter->next) g_ptr_array_add (*nsps, g_strdup (nm_wimax_nsp_get_dbus_path (NM_WIMAX_NSP (iter->data)))); return TRUE; } static void set_current_nsp (NMDeviceWimax *self, NMWimaxNsp *new_nsp) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); NMWimaxNsp *old_nsp; gboolean path_changed = FALSE; old_nsp = priv->current_nsp; priv->current_nsp = NULL; if (new_nsp) priv->current_nsp = g_object_ref (new_nsp); if (old_nsp && new_nsp) { path_changed = (g_strcmp0 (nm_wimax_nsp_get_dbus_path (old_nsp), nm_wimax_nsp_get_dbus_path (new_nsp)) != 0); } /* Only notify if it's really changed */ if (old_nsp != new_nsp || path_changed) g_object_notify (G_OBJECT (self), NM_DEVICE_WIMAX_ACTIVE_NSP); if (old_nsp) g_object_unref (old_nsp); } static gboolean activation_timed_out (gpointer data) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (data); priv->activation_timeout_id = 0; nm_device_state_changed (NM_DEVICE (data), NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_CONFIG_FAILED); return FALSE; } static void emit_nsp_added_removed (NMDeviceWimax *self, guint signum, NMWimaxNsp *nsp, gboolean recheck_available_connections) { g_signal_emit (self, signals[signum], 0, nsp); g_object_notify (G_OBJECT (self), NM_DEVICE_WIMAX_NSPS); nm_device_emit_recheck_auto_activate (NM_DEVICE (self)); if (recheck_available_connections) nm_device_recheck_available_connections (NM_DEVICE (self)); } static void remove_all_nsps (NMDeviceWimax *self) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); set_current_nsp (self, NULL); while (priv->nsp_list) { NMWimaxNsp *nsp = NM_WIMAX_NSP (priv->nsp_list->data); priv->nsp_list = g_slist_remove (priv->nsp_list, nsp); emit_nsp_added_removed (self, NSP_REMOVED, nsp, FALSE); g_object_unref (nsp); } nm_device_recheck_available_connections (NM_DEVICE (self)); } static NMWimaxNsp * get_nsp_by_name (NMDeviceWimax *self, const char *name) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); GSList *iter; g_return_val_if_fail (name, NULL); for (iter = priv->nsp_list; iter; iter = iter->next) { NMWimaxNsp *nsp = NM_WIMAX_NSP (iter->data); if (!g_strcmp0 (nm_wimax_nsp_get_name (nsp), name)) return nsp; } return NULL; } static NMWimaxNsp * get_nsp_by_path (NMDeviceWimax *self, const char *path) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); GSList *iter; g_return_val_if_fail (path, NULL); for (iter = priv->nsp_list; iter; iter = iter->next) { NMWimaxNsp *nsp = NM_WIMAX_NSP (iter->data); if (!strcmp (nm_wimax_nsp_get_dbus_path (nsp), path)) return nsp; } return NULL; } static gboolean update_availability (NMDeviceWimax *self, gboolean old_available) { NMDevice *device = NM_DEVICE (self); NMDeviceState state; gboolean new_available, changed = FALSE; new_available = nm_device_is_available (device); if (new_available == old_available) return FALSE; state = nm_device_get_state (device); if (state == NM_DEVICE_STATE_UNAVAILABLE) { if (new_available == TRUE) { nm_device_state_changed (device, NM_DEVICE_STATE_DISCONNECTED, NM_DEVICE_STATE_REASON_NONE); changed = TRUE; } } else if (state >= NM_DEVICE_STATE_DISCONNECTED) { if (new_available == FALSE) { nm_device_state_changed (device, NM_DEVICE_STATE_UNAVAILABLE, NM_DEVICE_STATE_REASON_NONE); changed = TRUE; } } return changed; } /* NMDeviceInterface interface */ static void set_enabled (NMDevice *device, gboolean enabled) { NMDeviceWimax *self = NM_DEVICE_WIMAX (device); NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); gboolean old_available; int ret; const char *iface; iface = nm_device_get_iface (NM_DEVICE (self)); nm_log_dbg (LOGD_WIMAX, "(%s): setting radio enabled %d -> %d", iface, priv->enabled, enabled); if (priv->enabled == enabled) return; old_available = nm_device_is_available (NM_DEVICE (device)); priv->enabled = enabled; nm_log_dbg (LOGD_WIMAX, "(%s): radio now %s", iface, priv->enabled ? "enabled" : "disabled"); /* Set the WiMAX device RF state to the current user-specified enabled state */ if (priv->sdk) { ret = iwmx_sdk_rf_state_set (priv->sdk, enabled ? WIMAX_API_RF_ON : WIMAX_API_RF_OFF); if (ret < 0 && ret != -EINPROGRESS) { nm_log_warn (LOGD_WIMAX, "(%s): failed to %s radio", iface, priv->enabled ? "enable" : "disable"); } } update_availability (self, old_available); } /* NMDevice methods */ static gboolean check_connection_compatible (NMDevice *device, NMConnection *connection) { NMSettingConnection *s_con; NMSettingWimax *s_wimax; const char *connection_type; const char *mac; if (!NM_DEVICE_CLASS (nm_device_wimax_parent_class)->check_connection_compatible (device, connection)) return FALSE; s_con = nm_connection_get_setting_connection (connection); g_assert (s_con); connection_type = nm_setting_connection_get_connection_type (s_con); if (strcmp (connection_type, NM_SETTING_WIMAX_SETTING_NAME)) return FALSE; s_wimax = nm_connection_get_setting_wimax (connection); if (!s_wimax) return FALSE; mac = nm_setting_wimax_get_mac_address (s_wimax); if (mac && !nm_utils_hwaddr_matches (mac, -1, nm_device_get_hw_address (device), -1)) return FALSE; return TRUE; } static gboolean check_connection_available (NMDevice *device, NMConnection *connection, const char *specific_object) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (device); const GSList *ns_iter = NULL; NMWimaxNsp *nsp; if (specific_object) { nsp = get_nsp_by_path (NM_DEVICE_WIMAX (device), specific_object); return nsp ? nm_wimax_nsp_check_compatible (nsp, connection) : FALSE; } /* Ensure the connection applies to an NSP in the scan list */ for (ns_iter = priv->nsp_list; ns_iter; ns_iter = ns_iter->next) { if (nm_wimax_nsp_check_compatible (NM_WIMAX_NSP (ns_iter->data), connection)) return TRUE; } return FALSE; } static gboolean complete_connection (NMDevice *device, NMConnection *connection, const char *specific_object, const GSList *existing_connections, GError **error) { NMDeviceWimax *self = NM_DEVICE_WIMAX (device); NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); NMSettingWimax *s_wimax; const char *setting_mac; const char *hw_address; const char *nsp_name = NULL; NMWimaxNsp *nsp = NULL; GSList *iter; s_wimax = nm_connection_get_setting_wimax (connection); if (!specific_object) { /* If not given a specific object, we need at minimum an NSP name */ if (!s_wimax) { g_set_error_literal (error, NM_DEVICE_ERROR, NM_DEVICE_ERROR_INVALID_CONNECTION, "A 'wimax' setting is required if no NSP path was given."); return FALSE; } nsp_name = nm_setting_wimax_get_network_name (s_wimax); if (!nsp_name || !strlen (nsp_name)) { g_set_error_literal (error, NM_DEVICE_ERROR, NM_DEVICE_ERROR_INVALID_CONNECTION, "A 'wimax' setting with a valid network name is required if no NSP path was given."); return FALSE; } /* Find a compatible NSP in the list */ nsp = get_nsp_by_name (self, nsp_name); /* If we still don't have an NSP, then the WiMAX settings needs to be * fully specified by the client. Might not be able to find the NSP * if the scan didn't find the NSP yet. */ if (!nsp) { if (!nm_setting_verify (NM_SETTING (s_wimax), NULL, error)) return FALSE; } } else { /* Find a compatible NSP in the list */ for (iter = priv->nsp_list; iter; iter = g_slist_next (iter)) { if (!strcmp (specific_object, nm_wimax_nsp_get_dbus_path (NM_WIMAX_NSP (iter->data)))) { nsp = NM_WIMAX_NSP (iter->data); break; } } if (!nsp) { g_set_error (error, NM_DEVICE_ERROR, NM_DEVICE_ERROR_SPECIFIC_OBJECT_NOT_FOUND, "The NSP %s was not in the scan list.", specific_object); return FALSE; } nsp_name = nm_wimax_nsp_get_name (nsp); } /* Add a WiMAX setting if one doesn't exist */ if (!s_wimax) { s_wimax = (NMSettingWimax *) nm_setting_wimax_new (); nm_connection_add_setting (connection, NM_SETTING (s_wimax)); } g_assert (nsp_name); nm_utils_complete_generic (connection, NM_SETTING_WIMAX_SETTING_NAME, existing_connections, nsp_name, nsp_name, NULL, TRUE); g_object_set (G_OBJECT (s_wimax), NM_SETTING_WIMAX_NETWORK_NAME, nsp_name, NULL); setting_mac = nm_setting_wimax_get_mac_address (s_wimax); hw_address = nm_device_get_hw_address (device); if (setting_mac) { /* Make sure the setting MAC (if any) matches the device's permanent MAC */ if (!nm_utils_hwaddr_matches (setting_mac, -1, hw_address, -1)) { g_set_error_literal (error, NM_CONNECTION_ERROR, NM_CONNECTION_ERROR_INVALID_PROPERTY, _("connection does not match device")); g_prefix_error (error, "%s.%s: ", NM_SETTING_WIMAX_SETTING_NAME, NM_SETTING_WIMAX_MAC_ADDRESS); return FALSE; } } else { /* Lock the connection to this device by default */ if (!nm_utils_hwaddr_matches (hw_address, -1, NULL, ETH_ALEN)) g_object_set (G_OBJECT (s_wimax), NM_SETTING_WIMAX_MAC_ADDRESS, hw_address, NULL); } return TRUE; } static gboolean can_auto_connect (NMDevice *device, NMConnection *connection, char **specific_object) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (device); GSList *iter; if (!NM_DEVICE_CLASS (nm_device_wimax_parent_class)->can_auto_connect (device, connection, specific_object)) return FALSE; for (iter = priv->nsp_list; iter; iter = iter->next) { NMWimaxNsp *nsp = NM_WIMAX_NSP (iter->data); if (nm_wimax_nsp_check_compatible (nsp, connection)) { *specific_object = (char *) nm_wimax_nsp_get_dbus_path (nsp); return TRUE; } } return FALSE; } static gboolean is_available (NMDevice *device) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (device); const char *iface = nm_device_get_iface (device); if (!priv->enabled) { nm_log_dbg (LOGD_WIMAX, "(%s): not available because not enabled", iface); return FALSE; } if (!priv->wimaxd_enabled) { nm_log_dbg (LOGD_WIMAX, "(%s): not available because not enabled in wimaxd", iface); return FALSE; } if (!nm_wimax_util_sdk_is_initialized ()) { nm_log_dbg (LOGD_WIMAX, "(%s): not available because WiMAX SDK not initialized", iface); return FALSE; } if (!priv->sdk) { nm_log_dbg (LOGD_WIMAX, "(%s): not available because not known to WiMAX SDK", iface); return FALSE; } return iwmxsdk_status_get (priv->sdk) >= WIMAX_API_DEVICE_STATUS_Ready; } static void clear_activation_timeout (NMDeviceWimax *self) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); if (priv->activation_timeout_id) { g_source_remove (priv->activation_timeout_id); priv->activation_timeout_id = 0; } priv->connect_failed = FALSE; } static void clear_link_timeout (NMDeviceWimax *self) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); if (priv->link_timeout_id) { g_source_remove (priv->link_timeout_id); priv->link_timeout_id = 0; } } static void clear_connected_poll (NMDeviceWimax *self) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); if (priv->poll_id) { g_source_remove (priv->poll_id); priv->poll_id = 0; } } static NMActStageReturn act_stage1_prepare (NMDevice *device, NMDeviceStateReason *reason) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (device); NMActRequest *req; GSList *iter; const char *path; NMWimaxNsp *nsp = NULL; clear_link_timeout (NM_DEVICE_WIMAX (device)); *reason = NM_DEVICE_STATE_REASON_NONE; req = nm_device_get_act_request (device); if (!req) return NM_ACT_STAGE_RETURN_FAILURE; path = nm_active_connection_get_specific_object (NM_ACTIVE_CONNECTION (req)); if (!path) return NM_ACT_STAGE_RETURN_FAILURE; /* Find the NSP in the scan list */ for (iter = priv->nsp_list; iter; iter = iter->next) { NMWimaxNsp *candidate = NM_WIMAX_NSP (iter->data); if (!strcmp (path, nm_wimax_nsp_get_dbus_path (candidate))) { nsp = candidate; break; } } /* Couldn't find the NSP for some reason */ if (nsp == NULL) return NM_ACT_STAGE_RETURN_FAILURE; set_current_nsp (NM_DEVICE_WIMAX (device), nsp); priv->prepare_done = TRUE; /* If the device is scanning, it won't connect, so we have to wait until * it's not scanning to proceed to stage 2. */ if (priv->status == WIMAX_API_DEVICE_STATUS_Scanning) return NM_ACT_STAGE_RETURN_POSTPONE; return NM_ACT_STAGE_RETURN_SUCCESS; } static NMActStageReturn act_stage2_config (NMDevice *device, NMDeviceStateReason *reason) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (device); NMConnection *connection; NMSettingWimax *s_wimax; const char *nsp_name, *iface; int ret; iface = nm_device_get_iface (device); g_assert (iface); connection = nm_device_get_connection (device); g_assert (connection); s_wimax = nm_connection_get_setting_wimax (connection); g_assert (s_wimax); nsp_name = nm_setting_wimax_get_network_name (s_wimax); g_assert (nsp_name); nm_log_info (LOGD_WIMAX, "(%s): connecting to NSP '%s'", iface, nsp_name); priv->connect_failed = FALSE; ret = iwmx_sdk_connect (priv->sdk, nsp_name); if (ret < 0 && ret != -EINPROGRESS) { nm_log_err (LOGD_WIMAX, "(%s): failed to connect to NSP '%s'", iface, nsp_name); *reason = NM_DEVICE_STATE_REASON_CONFIG_FAILED; return NM_ACT_STAGE_RETURN_FAILURE; } /* FIXME: Is 40 seconds good estimation? I have no idea */ priv->activation_timeout_id = g_timeout_add_seconds (40, activation_timed_out, device); return NM_ACT_STAGE_RETURN_POSTPONE; } static void force_disconnect (NMDeviceWimax *self, struct wmxsdk *sdk) { WIMAX_API_DEVICE_STATUS status; int ret; const char *iface; g_return_if_fail (sdk != NULL); iface = nm_device_get_iface (NM_DEVICE (self)); status = iwmxsdk_status_get (sdk); if ((int) status < 0) { nm_log_err (LOGD_WIMAX, "(%s): failed to read WiMAX device status: %d", iface, status); return; } if ( status == WIMAX_API_DEVICE_STATUS_Connecting || status == WIMAX_API_DEVICE_STATUS_Data_Connected) { nm_log_dbg (LOGD_WIMAX, "(%s): requesting disconnect", iface); ret = iwmx_sdk_disconnect (sdk); if (ret < 0 && ret != -EINPROGRESS) { nm_log_err (LOGD_WIMAX, "(%s): failed to disconnect WiMAX device: %d", iface, ret); } } } static void deactivate (NMDevice *device) { NMDeviceWimax *self = NM_DEVICE_WIMAX (device); NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); clear_activation_timeout (self); clear_link_timeout (self); clear_connected_poll (self); set_current_nsp (self, NULL); if (priv->sdk) { /* Read explicit status here just to make sure we have the most * up-to-date status and to ensure we disconnect if needed. */ force_disconnect (self, priv->sdk); } } /*************************************************************************/ static void wmx_state_change_cb (struct wmxsdk *wmxsdk, WIMAX_API_DEVICE_STATUS new_status, WIMAX_API_DEVICE_STATUS old_status, WIMAX_API_STATUS_REASON reason, WIMAX_API_CONNECTION_PROGRESS_INFO progress, void *user_data) { NMDeviceWimax *self = NM_DEVICE_WIMAX (user_data); NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); NMDeviceState state; const char *iface; gboolean old_available = FALSE; const char *nsp_name = NULL; iface = nm_device_get_iface (NM_DEVICE (self)); nm_log_info (LOGD_WIMAX, "(%s): wimax state change %s -> %s (%s (%d))", iface, iwmx_sdk_dev_status_to_str (old_status), iwmx_sdk_dev_status_to_str (new_status), iwmx_sdk_con_progress_to_str (progress), progress); if (new_status == old_status) return; state = nm_device_get_state (NM_DEVICE (self)); old_available = nm_device_is_available (NM_DEVICE (self)); priv->status = new_status; if (priv->current_nsp) nsp_name = nm_wimax_nsp_get_name (priv->current_nsp); switch (new_status) { case WIMAX_API_DEVICE_STATUS_UnInitialized: case WIMAX_API_DEVICE_STATUS_RF_OFF_HW_SW: case WIMAX_API_DEVICE_STATUS_RF_OFF_HW: case WIMAX_API_DEVICE_STATUS_RF_OFF_SW: if (priv->wimaxd_enabled) { priv->wimaxd_enabled = FALSE; if (update_availability (self, old_available)) return; } break; case WIMAX_API_DEVICE_STATUS_Connecting: case WIMAX_API_DEVICE_STATUS_Data_Connected: /* If for some reason we're initially connected, force a disconnect here */ if (state < NM_DEVICE_STATE_DISCONNECTED) force_disconnect (self, wmxsdk); /* Fall through */ case WIMAX_API_DEVICE_STATUS_Ready: case WIMAX_API_DEVICE_STATUS_Scanning: if (priv->wimaxd_enabled == FALSE) { priv->wimaxd_enabled = TRUE; if (update_availability (self, old_available)) return; } break; default: nm_log_warn (LOGD_WIMAX, "(%s): unhandled WiMAX device state %d", iface, new_status); break; } /* Handle activation success and failure */ if (nm_device_is_activating (NM_DEVICE (self))) { if (new_status == WIMAX_API_DEVICE_STATUS_Data_Connected) { /* Success */ clear_activation_timeout (self); nm_log_info (LOGD_WIMAX, "(%s): connected to '%s'", iface, nsp_name); nm_device_activate_schedule_stage3_ip_config_start (NM_DEVICE (self)); return; } if (priv->connect_failed) { /* Connection attempt failed */ nm_log_info (LOGD_WIMAX, "(%s): connection to '%s' failed: (%d) %s", iface, nsp_name, reason, iwmx_sdk_reason_to_str (reason)); nm_device_state_changed (NM_DEVICE (self), NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_CONFIG_FAILED); return; } /* If stage2 was postponed because the device was scanning or something, * then check if we need to move to stage2 now that the device might be * ready. */ if (state == NM_DEVICE_STATE_PREPARE && priv->prepare_done) { if ( new_status == WIMAX_API_DEVICE_STATUS_Ready || new_status == WIMAX_API_DEVICE_STATUS_Connecting) { nm_device_activate_schedule_stage2_device_config (NM_DEVICE (self)); return; } } } /* Handle disconnection */ if (state == NM_DEVICE_STATE_ACTIVATED) { if ( old_status == WIMAX_API_DEVICE_STATUS_Data_Connected && new_status < WIMAX_API_DEVICE_STATUS_Connecting) { nm_log_info (LOGD_WIMAX, "(%s): disconnected from '%s': (%d) %s", iface, nsp_name, reason, iwmx_sdk_reason_to_str (reason)); nm_device_state_changed (NM_DEVICE (self), NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_CONFIG_FAILED); } } } static gboolean link_timeout_cb (gpointer user_data) { NMDeviceWimax *self = NM_DEVICE_WIMAX (user_data); NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); priv->link_timeout_id = 0; nm_log_dbg (LOGD_WIMAX, "(%s): link timed out", nm_device_get_iface (NM_DEVICE (self))); nm_device_state_changed (NM_DEVICE (self), NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_CARRIER); return FALSE; } static void wmx_media_status_cb (struct wmxsdk *wmxsdk, WIMAX_API_MEDIA_STATUS new_status, void *user_data) { NMDeviceWimax *self = NM_DEVICE_WIMAX (user_data); NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); NMDeviceState state; const char *iface; iface = nm_device_get_iface (NM_DEVICE (self)); state = nm_device_get_state (NM_DEVICE (self)); nm_log_dbg (LOGD_WIMAX, "(%s): media status change to %s", iface, iwmx_sdk_media_status_to_str (new_status)); /* We only care about media events while activated */ if (state != NM_DEVICE_STATE_ACTIVATED) return; clear_link_timeout (self); switch (new_status) { case WIMAX_API_MEDIA_STATUS_LINK_UP: break; case WIMAX_API_MEDIA_STATUS_LINK_DOWN: nm_log_dbg (LOGD_WIMAX, "(%s): starting link timeout", iface); priv->link_timeout_id = g_timeout_add_seconds (15, link_timeout_cb, self); break; case WIMAX_API_MEDIA_STATUS_LINK_RENEW: nm_log_dbg (LOGD_WIMAX, "(%s): renewing DHCP lease", iface); if (!nm_device_dhcp4_renew (NM_DEVICE (self), TRUE)) { nm_device_state_changed (NM_DEVICE (self), NM_DEVICE_STATE_FAILED, NM_DEVICE_STATE_REASON_DHCP_FAILED); } break; default: nm_log_err (LOGD_WIMAX, "(%s): unhandled media status %d", iface, new_status); break; } } static void wmx_connect_result_cb (struct wmxsdk *wmxsdk, WIMAX_API_NETWORK_CONNECTION_RESP result, void *user_data) { NMDeviceWimax *self = NM_DEVICE_WIMAX (user_data); NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); if (nm_device_is_activating (NM_DEVICE (self))) { priv->connect_failed = (result == WIMAX_API_CONNECTION_SUCCESS); /* Wait for the state change so we can get the reason code; we * cache the connect failure so we don't have to wait for the * activation timeout. */ } } static void remove_outdated_nsps (NMDeviceWimax *self, WIMAX_API_NSP_INFO_EX *nsp_list, guint32 list_size) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); GSList *iter; GSList *to_remove = NULL; for (iter = priv->nsp_list; iter; iter = iter->next) { NMWimaxNsp *nsp = NM_WIMAX_NSP (iter->data); gboolean found = FALSE; int i; for (i = 0; i < list_size; i++) { WIMAX_API_NSP_INFO_EX *info = &nsp_list[i]; if (!g_strcmp0 (nm_wimax_nsp_get_name (nsp), (char *) info->NSPName)) { found = TRUE; break; } } if (!found) to_remove = g_slist_prepend (to_remove, nsp); } for (iter = to_remove; iter; iter = iter->next) { NMWimaxNsp *nsp = NM_WIMAX_NSP (iter->data); emit_nsp_added_removed (self, NSP_REMOVED, nsp, FALSE); priv->nsp_list = g_slist_remove (priv->nsp_list, nsp); g_object_unref (nsp); } if (g_slist_length(to_remove) > 0) nm_device_recheck_available_connections (NM_DEVICE (self)); g_slist_free (to_remove); } static void wmx_scan_result_cb (struct wmxsdk *wmxsdk, WIMAX_API_NSP_INFO_EX *nsps, guint num_nsps, void *user_data) { NMDeviceWimax *self = NM_DEVICE_WIMAX (user_data); NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); const char *iface = nm_device_get_iface (NM_DEVICE (self)); int i; remove_outdated_nsps (self, nsps, num_nsps); /* Add new NSPs and update existing ones */ for (i = 0; i < num_nsps; i++) { WIMAX_API_NSP_INFO_EX *sdk_nsp = &nsps[i]; const char *nsp_name = (const char *) sdk_nsp->NSPName; NMWimaxNspNetworkType net_type; guint signalq; NMWimaxNsp *nsp; gboolean new_nsp; nsp = get_nsp_by_name (self, nsp_name); new_nsp = (nsp == NULL); if (new_nsp) { nsp = nm_wimax_nsp_new (nsp_name); nm_log_dbg (LOGD_WIMAX, "(%s): new WiMAX NSP '%s'", iface, nsp_name); } net_type = nm_wimax_util_convert_network_type (sdk_nsp->networkType); if (net_type != nm_wimax_nsp_get_network_type (nsp)) g_object_set (nsp, NM_WIMAX_NSP_NETWORK_TYPE, net_type, NULL); signalq = CLAMP (sdk_nsp->linkQuality, 0, 100); if (signalq != nm_wimax_nsp_get_signal_quality (nsp)) g_object_set (nsp, NM_WIMAX_NSP_SIGNAL_QUALITY, signalq, NULL); nm_log_dbg (LOGD_WIMAX, "(%s): WiMAX NSP '%s' quality %d%% type %d", iface, nsp_name, sdk_nsp->linkQuality, net_type); if (new_nsp) { priv->nsp_list = g_slist_append (priv->nsp_list, nsp); nm_wimax_nsp_export_to_dbus (nsp); emit_nsp_added_removed (self, NSP_ADDED, nsp, TRUE); } } } static void wmx_removed_cb (struct wmxsdk *wmxsdk, void *user_data) { NMDeviceWimax *self = NM_DEVICE_WIMAX (user_data); NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); if (!priv->sdk) { nm_log_dbg (LOGD_WIMAX, "(%s): removed unhandled WiMAX interface", wmxsdk->ifname); return; } nm_log_dbg (LOGD_WIMAX, "(%s): removed WiMAX interface", wmxsdk->ifname); /* Clear callbacks just in case we don't hold the last reference */ iwmx_sdk_set_callbacks (priv->sdk, NULL, NULL, NULL, NULL, NULL, NULL); wmxsdk_unref (priv->sdk); priv->sdk = NULL; priv->status = WIMAX_API_DEVICE_STATUS_UnInitialized; nm_device_state_changed (NM_DEVICE (self), NM_DEVICE_STATE_UNAVAILABLE, NM_DEVICE_STATE_REASON_NONE); } /*************************************************************************/ static inline gint sdk_rssi_to_dbm (guint raw_rssi) { /* Values range from 0x00 to 0x53, where -123dBm is encoded as 0x00 and * -40dBm encoded as 0x53 in 1dB increments. */ return raw_rssi - 123; } static inline gint sdk_cinr_to_db (guint raw_cinr) { /* Values range from 0x00 to 0x3F, where -10dB is encoded as 0x00 and * 53dB encoded as 0x3F in 1dB increments. */ return raw_cinr - 10; } static inline gint sdk_tx_pow_to_dbm (guint raw_tx_pow) { /* Values range from 0x00 to 0xFF, where -84dBm is encoded as 0x00 and * 43.5dBm is encoded as 0xFF in 0.5dB increments. Normalize so that * 0 dBm == 0. */ return (int) (((double) raw_tx_pow / 2.0) - 84) * 2; } static void set_link_status (NMDeviceWimax *self, WIMAX_API_LINK_STATUS_INFO_EX *link_status) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); guint center_freq = 0; gint conv_rssi = 0, conv_cinr = 0, conv_tx_pow = 0; char *new_bsid = NULL; if (link_status) { center_freq = link_status->centerFrequency; conv_rssi = sdk_rssi_to_dbm (link_status->RSSI); conv_cinr = sdk_cinr_to_db (link_status->CINR); conv_tx_pow = sdk_tx_pow_to_dbm (link_status->txPWR); new_bsid = nm_utils_hwaddr_ntoa (link_status->bsId, 6); } if (priv->center_freq != center_freq) { priv->center_freq = center_freq; g_object_notify (G_OBJECT (self), NM_DEVICE_WIMAX_CENTER_FREQUENCY); } if (priv->rssi != conv_rssi) { priv->rssi = conv_rssi; g_object_notify (G_OBJECT (self), NM_DEVICE_WIMAX_RSSI); } if (priv->cinr != conv_cinr) { priv->cinr = conv_cinr; g_object_notify (G_OBJECT (self), NM_DEVICE_WIMAX_CINR); } if (priv->tx_power != conv_tx_pow) { priv->tx_power = conv_tx_pow; g_object_notify (G_OBJECT (self), NM_DEVICE_WIMAX_TX_POWER); } if (g_strcmp0 (priv->bsid, new_bsid) != 0) { g_free (priv->bsid); priv->bsid = new_bsid; g_object_notify (G_OBJECT (self), NM_DEVICE_WIMAX_BSID); } else g_free (new_bsid); } static gboolean connected_poll_cb (gpointer user_data) { NMDeviceWimax *self = NM_DEVICE_WIMAX (user_data); NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); WIMAX_API_CONNECTED_NSP_INFO_EX *sdk_nsp; WIMAX_API_LINK_STATUS_INFO_EX *link_status; g_return_val_if_fail (priv->sdk != NULL, FALSE); /* Get details of the connected NSP */ sdk_nsp = iwmx_sdk_get_connected_network (priv->sdk); if (sdk_nsp) { const char *nsp_name = (const char *) sdk_nsp->NSPName; NMWimaxNsp *nsp; nsp = get_nsp_by_name (self, nsp_name); if (nsp) { NMWimaxNspNetworkType net_type; guint signalq; net_type = nm_wimax_util_convert_network_type (sdk_nsp->networkType); if (net_type != nm_wimax_nsp_get_network_type (nsp)) g_object_set (nsp, NM_WIMAX_NSP_NETWORK_TYPE, net_type, NULL); signalq = sdk_nsp->linkQuality; if (signalq != nm_wimax_nsp_get_signal_quality (nsp)) g_object_set (nsp, NM_WIMAX_NSP_SIGNAL_QUALITY, signalq, NULL); nm_log_dbg (LOGD_WIMAX, "(%s): WiMAX NSP '%s' quality %d%% type %d", nm_device_get_iface (NM_DEVICE (self)), nsp_name, sdk_nsp->linkQuality, net_type); } free (sdk_nsp); } /* Get details of the current radio link */ link_status = iwmx_sdk_get_link_status_info (priv->sdk); if (link_status) { set_link_status (self, link_status); free (link_status); } return TRUE; /* reschedule */ } static void device_state_changed (NMDevice *device, NMDeviceState new_state, NMDeviceState old_state, NMDeviceStateReason reason) { NMDeviceWimax *self = NM_DEVICE_WIMAX (device); NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); /* Reset our stage1 (Prepare) done marker since it's only valid while in stage1 */ priv->prepare_done = FALSE; if (new_state < NM_DEVICE_STATE_DISCONNECTED) remove_all_nsps (self); /* Request initial NSP list when device is first started */ if ( new_state == NM_DEVICE_STATE_DISCONNECTED && old_state < NM_DEVICE_STATE_DISCONNECTED) { if (priv->sdk) iwmx_sdk_get_networks (priv->sdk); } if (new_state == NM_DEVICE_STATE_FAILED || new_state <= NM_DEVICE_STATE_DISCONNECTED) { set_current_nsp (self, NULL); clear_activation_timeout (self); } if (new_state == NM_DEVICE_STATE_ACTIVATED) { /* poll link quality and BSID */ clear_connected_poll (self); priv->poll_id = g_timeout_add_seconds (10, connected_poll_cb, self); connected_poll_cb (self); } else { clear_link_timeout (self); clear_connected_poll (self); set_link_status (self, NULL); } } /*************************************************************************/ static gboolean sdk_action_defer_cb (gpointer user_data) { NMDeviceWimax *self = NM_DEVICE_WIMAX (user_data); gboolean old_available = nm_device_is_available (NM_DEVICE (self)); NM_DEVICE_WIMAX_GET_PRIVATE (self)->sdk_action_defer_id = 0; update_availability (self, old_available); return FALSE; } static void wmx_new_sdk_cb (struct wmxsdk *sdk, void *user_data) { NMDeviceWimax *self = NM_DEVICE_WIMAX (user_data); NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); /* We only track one wmxsdk at a time because the WiMAX SDK is pretty stupid */ if (priv->sdk) { nm_log_dbg (LOGD_WIMAX, "(%s): WiMAX interface already known", sdk->ifname); return; } nm_log_dbg (LOGD_WIMAX, "(%s): new WiMAX interface (%s)", sdk->ifname, sdk->name); /* Now that we have an SDK, schedule an idle handler to start the device up */ priv->sdk = wmxsdk_ref (sdk); iwmx_sdk_set_callbacks(priv->sdk, wmx_state_change_cb, wmx_media_status_cb, wmx_connect_result_cb, wmx_scan_result_cb, wmx_removed_cb, self); iwmx_sdk_set_fast_reconnect_enabled (priv->sdk, 0); if (!priv->sdk_action_defer_id) priv->sdk_action_defer_id = g_idle_add (sdk_action_defer_cb, self); } /*************************************************************************/ NMDevice * nm_device_wimax_new (NMPlatformLink *platform_device) { NMDevice *device; g_return_val_if_fail (platform_device != NULL, NULL); device = (NMDevice *) g_object_new (NM_TYPE_DEVICE_WIMAX, NM_DEVICE_PLATFORM_DEVICE, platform_device, NM_DEVICE_TYPE_DESC, "WiMAX", NM_DEVICE_DEVICE_TYPE, NM_DEVICE_TYPE_WIMAX, NM_DEVICE_RFKILL_TYPE, RFKILL_TYPE_WIMAX, NULL); if (device) { struct wmxsdk *sdk; nm_wimax_util_sdk_ref (); /* See if the SDK already knows about this interface */ sdk = iwmx_sdk_get_wmxsdk_for_iface (platform_device->name); if (sdk) wmx_new_sdk_cb (sdk, device); /* If it doesn't, we want to be notified when it does */ iwmx_sdk_new_callback_register (wmx_new_sdk_cb, device); } return device; } static void nm_device_wimax_init (NMDeviceWimax *self) { NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); priv->status = WIMAX_API_DEVICE_STATUS_UnInitialized; } static void set_property (GObject *object, guint prop_id, const GValue *value, GParamSpec *pspec) { switch (prop_id) { default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } static void get_property (GObject *object, guint prop_id, GValue *value, GParamSpec *pspec) { NMDeviceWimax *self = NM_DEVICE_WIMAX (object); NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); GPtrArray *array; GSList *iter; switch (prop_id) { case PROP_NSPS: array = g_ptr_array_sized_new (4); for (iter = priv->nsp_list; iter; iter = g_slist_next (iter)) g_ptr_array_add (array, g_strdup (nm_wimax_nsp_get_dbus_path (NM_WIMAX_NSP (iter->data)))); g_value_take_boxed (value, array); break; case PROP_ACTIVE_NSP: if (priv->current_nsp) g_value_set_boxed (value, nm_wimax_nsp_get_dbus_path (priv->current_nsp)); else g_value_set_boxed (value, "/"); break; case PROP_CENTER_FREQ: g_value_set_uint (value, priv->center_freq); break; case PROP_RSSI: g_value_set_int (value, priv->rssi); break; case PROP_CINR: g_value_set_int (value, priv->cinr); break; case PROP_TX_POWER: g_value_set_int (value, priv->tx_power); break; case PROP_BSID: g_value_set_string (value, priv->bsid); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } static void dispose (GObject *object) { NMDeviceWimax *self = NM_DEVICE_WIMAX (object); NMDeviceWimaxPrivate *priv = NM_DEVICE_WIMAX_GET_PRIVATE (self); if (priv->disposed) goto done; priv->disposed = TRUE; clear_activation_timeout (self); clear_link_timeout (self); clear_connected_poll (self); if (priv->sdk_action_defer_id) g_source_remove (priv->sdk_action_defer_id); if (priv->sdk) { iwmx_sdk_set_callbacks (priv->sdk, NULL, NULL, NULL, NULL, NULL, NULL); wmxsdk_unref (priv->sdk); } g_free (priv->bsid); set_current_nsp (self, NULL); g_slist_free_full (priv->nsp_list, g_object_unref); iwmx_sdk_new_callback_unregister (wmx_new_sdk_cb, self); nm_wimax_util_sdk_unref (); done: G_OBJECT_CLASS (nm_device_wimax_parent_class)->dispose (object); } static void nm_device_wimax_class_init (NMDeviceWimaxClass *klass) { GObjectClass *object_class = G_OBJECT_CLASS (klass); NMDeviceClass *device_class = NM_DEVICE_CLASS (klass); g_type_class_add_private (object_class, sizeof (NMDeviceWimaxPrivate)); /* Virtual methods */ object_class->set_property = set_property; object_class->get_property = get_property; object_class->dispose = dispose; device_class->check_connection_compatible = check_connection_compatible; device_class->check_connection_available = check_connection_available; device_class->complete_connection = complete_connection; device_class->can_auto_connect = can_auto_connect; device_class->is_available = is_available; device_class->act_stage1_prepare = act_stage1_prepare; device_class->act_stage2_config = act_stage2_config; device_class->deactivate = deactivate; device_class->set_enabled = set_enabled; device_class->state_changed = device_state_changed; /* Properties */ g_object_class_install_property (object_class, PROP_NSPS, g_param_spec_boxed (NM_DEVICE_WIMAX_NSPS, "", "", DBUS_TYPE_G_ARRAY_OF_OBJECT_PATH, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (object_class, PROP_ACTIVE_NSP, g_param_spec_boxed (NM_DEVICE_WIMAX_ACTIVE_NSP, "", "", DBUS_TYPE_G_OBJECT_PATH, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (object_class, PROP_CENTER_FREQ, g_param_spec_uint (NM_DEVICE_WIMAX_CENTER_FREQUENCY, "", "", 0, G_MAXUINT, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (object_class, PROP_RSSI, g_param_spec_int (NM_DEVICE_WIMAX_RSSI, "", "", G_MININT, G_MAXINT, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (object_class, PROP_CINR, g_param_spec_int (NM_DEVICE_WIMAX_CINR, "", "", G_MININT, G_MAXINT, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (object_class, PROP_TX_POWER, g_param_spec_int (NM_DEVICE_WIMAX_TX_POWER, "", "", G_MININT, G_MAXINT, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (object_class, PROP_BSID, g_param_spec_string (NM_DEVICE_WIMAX_BSID, "", "", NULL, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)); /* Signals */ signals[NSP_ADDED] = g_signal_new ("nsp-added", G_OBJECT_CLASS_TYPE (object_class), G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (NMDeviceWimaxClass, nsp_added), NULL, NULL, g_cclosure_marshal_VOID__OBJECT, G_TYPE_NONE, 1, G_TYPE_OBJECT); signals[NSP_REMOVED] = g_signal_new ("nsp-removed", G_OBJECT_CLASS_TYPE (object_class), G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (NMDeviceWimaxClass, nsp_removed), NULL, NULL, g_cclosure_marshal_VOID__OBJECT, G_TYPE_NONE, 1, G_TYPE_OBJECT); nm_dbus_manager_register_exported_type (nm_dbus_manager_get (), G_TYPE_FROM_CLASS (klass), &dbus_glib_nm_device_wimax_object_info); }
gpl-2.0
steffengraber/nest-simulator
models/mip_generator.cpp
4
5646
/* * mip_generator.cpp * * This file is part of NEST. * * Copyright (C) 2004 The NEST Initiative * * NEST is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * NEST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with NEST. If not, see <http://www.gnu.org/licenses/>. * */ #include "mip_generator.h" // Includes from libnestutil: #include "dict_util.h" // Includes from nestkernel: #include "event_delivery_manager_impl.h" #include "exceptions.h" #include "kernel_manager.h" /* ---------------------------------------------------------------- * Default constructors defining default parameter * ---------------------------------------------------------------- */ nest::mip_generator::Parameters_::Parameters_() : rate_( 0.0 ) // Hz , p_copy_( 1.0 ) { } /* ---------------------------------------------------------------- * Parameter extraction and manipulation functions * ---------------------------------------------------------------- */ void nest::mip_generator::Parameters_::get( DictionaryDatum& d ) const { ( *d )[ names::rate ] = rate_; ( *d )[ names::p_copy ] = p_copy_; } void nest::mip_generator::Parameters_::set( const DictionaryDatum& d, Node* node ) { updateValueParam< double >( d, names::rate, rate_, node ); updateValueParam< double >( d, names::p_copy, p_copy_, node ); if ( rate_ < 0 ) { throw BadProperty( "Rate must be non-negative." ); } if ( p_copy_ < 0 or p_copy_ > 1 ) { throw BadProperty( "Copy probability must be in [0, 1]." ); } } /* ---------------------------------------------------------------- * Default and copy constructor for node * ---------------------------------------------------------------- */ nest::mip_generator::mip_generator() : StimulationDevice() , P_() { } nest::mip_generator::mip_generator( const mip_generator& n ) : StimulationDevice( n ) , P_( n.P_ ) // also causes deep copy of random nnumber generator { } /* ---------------------------------------------------------------- * Node initialization functions * ---------------------------------------------------------------- */ void nest::mip_generator::init_state_() { StimulationDevice::init_state(); } void nest::mip_generator::init_buffers_() { StimulationDevice::init_buffers(); } void nest::mip_generator::pre_run_hook() { StimulationDevice::pre_run_hook(); // rate_ is in Hz, dt in ms, so we have to convert from s to ms poisson_distribution::param_type param( Time::get_resolution().get_ms() * P_.rate_ * 1e-3 ); V_.poisson_dist_.param( param ); } /* ---------------------------------------------------------------- * Other functions * ---------------------------------------------------------------- */ void nest::mip_generator::update( Time const& T, const long from, const long to ) { assert( to >= 0 and static_cast< delay >( from ) < kernel().connection_manager.get_min_delay() ); assert( from < to ); for ( long lag = from; lag < to; ++lag ) { if ( not StimulationDevice::is_active( T ) || P_.rate_ <= 0 ) { return; // no spikes to be generated } // generate spikes of parent process for each time slice const unsigned long n_parent_spikes = V_.poisson_dist_( get_vp_synced_rng( get_thread() ) ); if ( n_parent_spikes ) { DSSpikeEvent se; se.set_multiplicity( n_parent_spikes ); kernel().event_delivery_manager.send( *this, se, lag ); } } } void nest::mip_generator::event_hook( DSSpikeEvent& e ) { /* We temporarily set the spike multiplicity here to the number of spikes selected by the copy process. After spike delivery, the multiplicity is reset to the number of parent spikes, so that this value is available for delivery to the next target. This is thread-safe because mip_generator is replicated on each thread. */ RngPtr rng = get_vp_specific_rng( get_thread() ); const unsigned long n_parent_spikes = e.get_multiplicity(); // TODO: draw n_spikes from binomial distribution unsigned long n_spikes = 0; for ( unsigned long n = 0; n < n_parent_spikes; n++ ) { if ( rng->drand() < P_.p_copy_ ) { n_spikes++; } } if ( n_spikes > 0 ) { e.set_multiplicity( n_spikes ); e.get_receiver().handle( e ); } e.set_multiplicity( n_parent_spikes ); } /* ---------------------------------------------------------------- * Other functions * ---------------------------------------------------------------- */ void nest::mip_generator::set_data_from_stimulation_backend( std::vector< double >& input_param ) { Parameters_ ptmp = P_; // temporary copy in case of errors // For the input backend if ( not input_param.empty() ) { if ( input_param.size() != 2 ) { throw BadParameterValue( "The size of the data for the mip_generator needs to be 2 [rate, p_copy]." ); } else { DictionaryDatum d = DictionaryDatum( new Dictionary ); ( *d )[ names::rate ] = DoubleDatum( input_param[ 0 ] ); ( *d )[ names::p_copy ] = DoubleDatum( input_param[ 1 ] ); ptmp.set( d, this ); } } // if we get here, temporary contains consistent set of properties P_ = ptmp; }
gpl-2.0
kraj/gcc
libgfortran/generated/_aimag_c4.F90
4
1460
! Copyright (C) 2002-2022 Free Software Foundation, Inc. ! Contributed by Paul Brook <paul@nowt.org> ! !This file is part of the GNU Fortran 95 runtime library (libgfortran). ! !GNU libgfortran is free software; you can redistribute it and/or !modify it under the terms of the GNU General Public !License as published by the Free Software Foundation; either !version 3 of the License, or (at your option) any later version. !GNU libgfortran is distributed in the hope that it will be useful, !but WITHOUT ANY WARRANTY; without even the implied warranty of !MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the !GNU General Public License for more details. ! !Under Section 7 of GPL version 3, you are granted additional !permissions described in the GCC Runtime Library Exception, version !3.1, as published by the Free Software Foundation. ! !You should have received a copy of the GNU General Public License and !a copy of the GCC Runtime Library Exception along with this program; !see the files COPYING3 and COPYING.RUNTIME respectively. If not, see !<http://www.gnu.org/licenses/>. ! !This file is machine generated. #include "config.h" #include "kinds.inc" #include "c99_protos.inc" #if defined (HAVE_GFC_COMPLEX_4) elemental function _gfortran_specific__aimag_c4 (parm) complex (kind=4), intent (in) :: parm real (kind=4) :: _gfortran_specific__aimag_c4 _gfortran_specific__aimag_c4 = aimag (parm) end function #endif
gpl-2.0
xtreamerdev/linux-xtr
arch/ia64/sn/pci/tioca_provider.c
4
18507
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved. */ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <asm/sn/sn_sal.h> #include <asm/sn/addrs.h> #include <asm/sn/pcidev.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/tioca_provider.h> uint32_t tioca_gart_found; EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */ LIST_HEAD(tioca_list); EXPORT_SYMBOL(tioca_list); /* used by agp-sgi */ static int tioca_gart_init(struct tioca_kernel *); /** * tioca_gart_init - Initialize SGI TIOCA GART * @tioca_common: ptr to common prom/kernel struct identifying the * * If the indicated tioca has devices present, initialize its associated * GART MMR's and kernel memory. */ static int tioca_gart_init(struct tioca_kernel *tioca_kern) { uint64_t ap_reg; uint64_t offset; struct page *tmp; struct tioca_common *tioca_common; volatile struct tioca *ca_base; tioca_common = tioca_kern->ca_common; ca_base = (struct tioca *)tioca_common->ca_common.bs_base; if (list_empty(tioca_kern->ca_devices)) return 0; ap_reg = 0; /* * Validate aperature size */ switch (CA_APERATURE_SIZE >> 20) { case 4: ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT); /* 4MB */ break; case 8: ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT); /* 8MB */ break; case 16: ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT); /* 16MB */ break; case 32: ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT); /* 32 MB */ break; case 64: ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT); /* 64 MB */ break; case 128: ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT); /* 128 MB */ break; case 256: ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT); /* 256 MB */ break; case 512: ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT); /* 512 MB */ break; case 1024: ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT); /* 1GB */ break; case 2048: ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT); /* 2GB */ break; case 4096: ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT); /* 4 GB */ break; default: printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE " "0x%lx\n", __FUNCTION__, (ulong) CA_APERATURE_SIZE); return -1; } /* * Set up other aperature parameters */ if (PAGE_SIZE >= 16384) { tioca_kern->ca_ap_pagesize = 16384; ap_reg |= CA_GART_PAGE_SIZE; } else { tioca_kern->ca_ap_pagesize = 4096; } tioca_kern->ca_ap_size = CA_APERATURE_SIZE; tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE; tioca_kern->ca_gart_entries = tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize; ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI); ap_reg |= tioca_kern->ca_ap_bus_base; /* * Allocate and set up the GART */ tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64); tmp = alloc_pages_node(tioca_kern->ca_closest_node, GFP_KERNEL | __GFP_ZERO, get_order(tioca_kern->ca_gart_size)); if (!tmp) { printk(KERN_ERR "%s: Could not allocate " "%lu bytes (order %d) for GART\n", __FUNCTION__, tioca_kern->ca_gart_size, get_order(tioca_kern->ca_gart_size)); return -ENOMEM; } tioca_kern->ca_gart = page_address(tmp); tioca_kern->ca_gart_coretalk_addr = PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart)); /* * Compute PCI/AGP convenience fields */ offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE; tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE; tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE; tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize; tioca_kern->ca_pcigart_base = tioca_kern->ca_gart_coretalk_addr + offset; tioca_kern->ca_pcigart = &tioca_kern->ca_gart[tioca_kern->ca_pcigart_start]; tioca_kern->ca_pcigart_entries = tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize; tioca_kern->ca_pcigart_pagemap = kcalloc(1, tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL); if (!tioca_kern->ca_pcigart_pagemap) { free_pages((unsigned long)tioca_kern->ca_gart, get_order(tioca_kern->ca_gart_size)); return -1; } offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE; tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE; tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE; tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize; tioca_kern->ca_gfxgart_base = tioca_kern->ca_gart_coretalk_addr + offset; tioca_kern->ca_gfxgart = &tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start]; tioca_kern->ca_gfxgart_entries = tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize; /* * various control settings: * use agp op-combining * use GET semantics to fetch memory * participate in coherency domain * DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029 */ ca_base->ca_control1 |= CA_AGPDMA_OP_ENB_COMBDELAY; /* PV895469 ? */ ca_base->ca_control2 &= ~(CA_GART_MEM_PARAM); ca_base->ca_control2 |= (0x2ull << CA_GART_MEM_PARAM_SHFT); tioca_kern->ca_gart_iscoherent = 1; ca_base->ca_control2 &= ~(CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB); /* * Unmask GART fetch error interrupts. Clear residual errors first. */ ca_base->ca_int_status_alias = CA_GART_FETCH_ERR; ca_base->ca_mult_error_alias = CA_GART_FETCH_ERR; ca_base->ca_int_mask &= ~CA_GART_FETCH_ERR; /* * Program the aperature and gart registers in TIOCA */ ca_base->ca_gart_aperature = ap_reg; ca_base->ca_gart_ptr_table = tioca_kern->ca_gart_coretalk_addr | 1; return 0; } /** * tioca_fastwrite_enable - enable AGP FW for a tioca and its functions * @tioca_kernel: structure representing the CA * * Given a CA, scan all attached functions making sure they all support * FastWrite. If so, enable FastWrite for all functions and the CA itself. */ void tioca_fastwrite_enable(struct tioca_kernel *tioca_kern) { int cap_ptr; uint64_t ca_control1; uint32_t reg; struct tioca *tioca_base; struct pci_dev *pdev; struct tioca_common *common; common = tioca_kern->ca_common; /* * Scan all vga controllers on this bus making sure they all * suport FW. If not, return. */ list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) { if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8)) continue; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return; /* no AGP CAP means no FW */ pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, &reg); if (!(reg & PCI_AGP_STATUS_FW)) return; /* function doesn't support FW */ } /* * Set fw for all vga fn's */ list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) { if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8)) continue; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, &reg); reg |= PCI_AGP_COMMAND_FW; pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg); } /* * Set ca's fw to match */ tioca_base = (struct tioca *)common->ca_common.bs_base; ca_control1 = tioca_base->ca_control1; ca_control1 |= CA_AGP_FW_ENABLE; tioca_base->ca_control1 = ca_control1; } EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */ /** * tioca_dma_d64 - create a DMA mapping using 64-bit direct mode * @paddr: system physical address * * Map @paddr into 64-bit CA bus space. No device context is necessary. * Bits 53:0 come from the coretalk address. We just need to mask in the * following optional bits of the 64-bit pci address: * * 63:60 - Coretalk Packet Type - 0x1 for Mem Get/Put (coherent) * 0x2 for PIO (non-coherent) * We will always use 0x1 * 55:55 - Swap bytes Currently unused */ static uint64_t tioca_dma_d64(unsigned long paddr) { dma_addr_t bus_addr; bus_addr = PHYS_TO_TIODMA(paddr); BUG_ON(!bus_addr); BUG_ON(bus_addr >> 54); /* Set upper nibble to Cache Coherent Memory op */ bus_addr |= (1UL << 60); return bus_addr; } /** * tioca_dma_d48 - create a DMA mapping using 48-bit direct mode * @pdev: linux pci_dev representing the function * @paddr: system physical address * * Map @paddr into 64-bit bus space of the CA associated with @pcidev_info. * * The CA agp 48 bit direct address falls out as follows: * * When direct mapping AGP addresses, the 48 bit AGP address is * constructed as follows: * * [47:40] - Low 8 bits of the page Node ID extracted from coretalk * address [47:40]. The upper 8 node bits are fixed * and come from the xxx register bits [5:0] * [39:38] - Chiplet ID extracted from coretalk address [39:38] * [37:00] - node offset extracted from coretalk address [37:00] * * Since the node id in general will be non-zero, and the chiplet id * will always be non-zero, it follows that the device must support * a dma mask of at least 0xffffffffff (40 bits) to target node 0 * and in general should be 0xffffffffffff (48 bits) to target nodes * up to 255. Nodes above 255 need the support of the xxx register, * and so a given CA can only directly target nodes in the range * xxx - xxx+255. */ static uint64_t tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr) { struct tioca_common *tioca_common; struct tioca *ca_base; uint64_t ct_addr; dma_addr_t bus_addr; uint32_t node_upper; uint64_t agp_dma_extn; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; ca_base = (struct tioca *)tioca_common->ca_common.bs_base; ct_addr = PHYS_TO_TIODMA(paddr); if (!ct_addr) return 0; bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffff); node_upper = ct_addr >> 48; if (node_upper > 64) { printk(KERN_ERR "%s: coretalk addr 0x%p node id out " "of range\n", __FUNCTION__, (void *)ct_addr); return 0; } agp_dma_extn = ca_base->ca_agp_dma_addr_extn; if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) { printk(KERN_ERR "%s: coretalk upper node (%u) " "mismatch with ca_agp_dma_addr_extn (%lu)\n", __FUNCTION__, node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)); return 0; } return bus_addr; } /** * tioca_dma_mapped - create a DMA mapping using a CA GART * @pdev: linux pci_dev representing the function * @paddr: host physical address to map * @req_size: len (bytes) to map * * Map @paddr into CA address space using the GART mechanism. The mapped * dma_addr_t is guarenteed to be contiguous in CA bus space. */ static dma_addr_t tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size) { int i, ps, ps_shift, entry, entries, mapsize, last_entry; uint64_t xio_addr, end_xio_addr; struct tioca_common *tioca_common; struct tioca_kernel *tioca_kern; dma_addr_t bus_addr = 0; struct tioca_dmamap *ca_dmamap; void *map; unsigned long flags; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);; tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private; xio_addr = PHYS_TO_TIODMA(paddr); if (!xio_addr) return 0; spin_lock_irqsave(&tioca_kern->ca_lock, flags); /* * allocate a map struct */ ca_dmamap = kcalloc(1, sizeof(struct tioca_dmamap), GFP_ATOMIC); if (!ca_dmamap) goto map_return; /* * Locate free entries that can hold req_size. Account for * unaligned start/length when allocating. */ ps = tioca_kern->ca_ap_pagesize; /* will be power of 2 */ ps_shift = ffs(ps) - 1; end_xio_addr = xio_addr + req_size - 1; entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1; map = tioca_kern->ca_pcigart_pagemap; mapsize = tioca_kern->ca_pcigart_entries; entry = find_first_zero_bit(map, mapsize); while (entry < mapsize) { last_entry = find_next_bit(map, mapsize, entry); if (last_entry - entry >= entries) break; entry = find_next_zero_bit(map, mapsize, last_entry); } if (entry > mapsize) goto map_return; for (i = 0; i < entries; i++) set_bit(entry + i, map); bus_addr = tioca_kern->ca_pciap_base + (entry * ps); ca_dmamap->cad_dma_addr = bus_addr; ca_dmamap->cad_gart_size = entries; ca_dmamap->cad_gart_entry = entry; list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps); if (xio_addr % ps) { tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); bus_addr += xio_addr & (ps - 1); xio_addr &= ~(ps - 1); xio_addr += ps; entry++; } while (xio_addr < end_xio_addr) { tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); xio_addr += ps; entry++; } tioca_tlbflush(tioca_kern); map_return: spin_unlock_irqrestore(&tioca_kern->ca_lock, flags); return bus_addr; } /** * tioca_dma_unmap - release CA mapping resources * @pdev: linux pci_dev representing the function * @bus_addr: bus address returned by an earlier tioca_dma_map * @dir: mapping direction (unused) * * Locate mapping resources associated with @bus_addr and release them. * For mappings created using the direct modes (64 or 48) there are no * resources to release. */ void tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) { int i, entry; struct tioca_common *tioca_common; struct tioca_kernel *tioca_kern; struct tioca_dmamap *map; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); unsigned long flags; tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private; /* return straight away if this isn't be a mapped address */ if (bus_addr < tioca_kern->ca_pciap_base || bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size)) return; spin_lock_irqsave(&tioca_kern->ca_lock, flags); list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list) if (map->cad_dma_addr == bus_addr) break; BUG_ON(map == NULL); entry = map->cad_gart_entry; for (i = 0; i < map->cad_gart_size; i++, entry++) { clear_bit(entry, tioca_kern->ca_pcigart_pagemap); tioca_kern->ca_pcigart[entry] = 0; } tioca_tlbflush(tioca_kern); list_del(&map->cad_list); spin_unlock_irqrestore(&tioca_kern->ca_lock, flags); kfree(map); } /** * tioca_dma_map - map pages for PCI DMA * @pdev: linux pci_dev representing the function * @paddr: host physical address to map * @byte_count: bytes to map * * This is the main wrapper for mapping host physical pages to CA PCI space. * The mapping mode used is based on the devices dma_mask. As a last resort * use the GART mapped mode. */ uint64_t tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) { uint64_t mapaddr; /* * If card is 64 or 48 bit addresable, use a direct mapping. 32 * bit direct is so restrictive w.r.t. where the memory resides that * we don't use it even though CA has some support. */ if (pdev->dma_mask == ~0UL) mapaddr = tioca_dma_d64(paddr); else if (pdev->dma_mask == 0xffffffffffffUL) mapaddr = tioca_dma_d48(pdev, paddr); else mapaddr = 0; /* Last resort ... use PCI portion of CA GART */ if (mapaddr == 0) mapaddr = tioca_dma_mapped(pdev, paddr, byte_count); return mapaddr; } /** * tioca_error_intr_handler - SGI TIO CA error interrupt handler * @irq: unused * @arg: pointer to tioca_common struct for the given CA * @pt: unused * * Handle a CA error interrupt. Simply a wrapper around a SAL call which * defers processing to the SGI prom. */ static irqreturn_t tioca_error_intr_handler(int irq, void *arg, struct pt_regs *pt) { struct tioca_common *soft = arg; struct ia64_sal_retval ret_stuff; uint64_t segment; uint64_t busnum; ret_stuff.status = 0; ret_stuff.v0 = 0; segment = 0; busnum = soft->ca_common.bs_persist_busnum; SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_ERROR_INTERRUPT, segment, busnum, 0, 0, 0, 0, 0); return IRQ_HANDLED; } /** * tioca_bus_fixup - perform final PCI fixup for a TIO CA bus * @prom_bussoft: Common prom/kernel struct representing the bus * * Replicates the tioca_common pointed to by @prom_bussoft in kernel * space. Allocates and initializes a kernel-only area for a given CA, * and sets up an irq for handling CA error interrupts. * * On successful setup, returns the kernel version of tioca_common back to * the caller. */ void * tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft) { struct tioca_common *tioca_common; struct tioca_kernel *tioca_kern; struct pci_bus *bus; /* sanity check prom rev */ if (sn_sal_rev_major() < 4 || (sn_sal_rev_major() == 4 && sn_sal_rev_minor() < 6)) { printk (KERN_ERR "%s: SGI prom rev 4.06 or greater required " "for tioca support\n", __FUNCTION__); return NULL; } /* * Allocate kernel bus soft and copy from prom. */ tioca_common = kcalloc(1, sizeof(struct tioca_common), GFP_KERNEL); if (!tioca_common) return NULL; memcpy(tioca_common, prom_bussoft, sizeof(struct tioca_common)); tioca_common->ca_common.bs_base |= __IA64_UNCACHED_OFFSET; /* init kernel-private area */ tioca_kern = kcalloc(1, sizeof(struct tioca_kernel), GFP_KERNEL); if (!tioca_kern) { kfree(tioca_common); return NULL; } tioca_kern->ca_common = tioca_common; spin_lock_init(&tioca_kern->ca_lock); INIT_LIST_HEAD(&tioca_kern->ca_dmamaps); tioca_kern->ca_closest_node = nasid_to_cnodeid(tioca_common->ca_closest_nasid); tioca_common->ca_kernel_private = (uint64_t) tioca_kern; bus = pci_find_bus(0, tioca_common->ca_common.bs_persist_busnum); BUG_ON(!bus); tioca_kern->ca_devices = &bus->devices; /* init GART */ if (tioca_gart_init(tioca_kern) < 0) { kfree(tioca_kern); kfree(tioca_common); return NULL; } tioca_gart_found++; list_add(&tioca_kern->ca_list, &tioca_list); if (request_irq(SGI_TIOCA_ERROR, tioca_error_intr_handler, SA_SHIRQ, "TIOCA error", (void *)tioca_common)) printk(KERN_WARNING "%s: Unable to get irq %d. " "Error interrupts won't be routed for TIOCA bus %d\n", __FUNCTION__, SGI_TIOCA_ERROR, (int)tioca_common->ca_common.bs_persist_busnum); return tioca_common; } static struct sn_pcibus_provider tioca_pci_interfaces = { .dma_map = tioca_dma_map, .dma_map_consistent = tioca_dma_map, .dma_unmap = tioca_dma_unmap, .bus_fixup = tioca_bus_fixup, }; /** * tioca_init_provider - init SN PCI provider ops for TIO CA */ int tioca_init_provider(void) { sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces; return 0; }
gpl-2.0
linuxium/ubuntu-xenial
drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
4
7274
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * ******************************************************************************/ /****************************************************************************** * * * Module: rtl8192c_rf6052.c (Source C File) * * Note: Provide RF 6052 series relative API. * * Function: * * Export: * * Abbrev: * * History: * Data Who Remark * * 09/25/2008 MHC Create initial version. * 11/05/2008 MHC Add API for tw power setting. * * ******************************************************************************/ #include <rtl8723b_hal.h> /*---------------------------Define Local Constant---------------------------*/ /*---------------------------Define Local Constant---------------------------*/ /*------------------------Define global variable-----------------------------*/ /*------------------------Define global variable-----------------------------*/ /*------------------------Define local variable------------------------------*/ /* 2008/11/20 MH For Debug only, RF */ /*------------------------Define local variable------------------------------*/ /*----------------------------------------------------------------------------- * Function: PHY_RF6052SetBandwidth() * * Overview: This function is called by SetBWModeCallback8190Pci() only * * Input: struct adapter * Adapter * WIRELESS_BANDWIDTH_E Bandwidth 20M or 40M * * Output: NONE * * Return: NONE * * Note: For RF type 0222D *---------------------------------------------------------------------------*/ void PHY_RF6052SetBandwidth8723B( struct adapter * Adapter, enum CHANNEL_WIDTH Bandwidth) /* 20M or 40M */ { struct hal_com_data *pHalData = GET_HAL_DATA(Adapter); switch (Bandwidth) { case CHANNEL_WIDTH_20: pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff) | BIT10 | BIT11); PHY_SetRFReg(Adapter, ODM_RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]); PHY_SetRFReg(Adapter, ODM_RF_PATH_B, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]); break; case CHANNEL_WIDTH_40: pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff) | BIT10); PHY_SetRFReg(Adapter, ODM_RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]); PHY_SetRFReg(Adapter, ODM_RF_PATH_B, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]); break; default: /* RT_TRACE(COMP_DBG, DBG_LOUD, ("PHY_SetRF8225Bandwidth(): unknown Bandwidth: %#X\n", Bandwidth)); */ break; } } static int phy_RF6052_Config_ParaFile( struct adapter * Adapter ) { u32 u4RegValue = 0; u8 eRFPath; struct bb_register_def *pPhyReg; int rtStatus = _SUCCESS; struct hal_com_data *pHalData = GET_HAL_DATA(Adapter); static char sz8723RadioAFile[] = RTL8723B_PHY_RADIO_A; static char sz8723RadioBFile[] = RTL8723B_PHY_RADIO_B; static s8 sz8723BTxPwrTrackFile[] = RTL8723B_TXPWR_TRACK; char *pszRadioAFile, *pszRadioBFile, *pszTxPwrTrackFile; pszRadioAFile = sz8723RadioAFile; pszRadioBFile = sz8723RadioBFile; pszTxPwrTrackFile = sz8723BTxPwrTrackFile; /* 3----------------------------------------------------------------- */ /* 3 <2> Initialize RF */ /* 3----------------------------------------------------------------- */ /* for (eRFPath = RF_PATH_A; eRFPath <pHalData->NumTotalRFPath; eRFPath++) */ for (eRFPath = 0; eRFPath <pHalData->NumTotalRFPath; eRFPath++) { pPhyReg = &pHalData->PHYRegDef[eRFPath]; /*----Store original RFENV control type----*/ switch (eRFPath) { case RF_PATH_A: case RF_PATH_C: u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV); break; case RF_PATH_B : case RF_PATH_D: u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV<<16); break; } /*----Set RF_ENV enable----*/ PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1); udelay(1);/* PlatformStallExecution(1); */ /*----Set RF_ENV output high----*/ PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1); udelay(1);/* PlatformStallExecution(1); */ /* Set bit number of Address and Data for RF register */ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */ udelay(1);/* PlatformStallExecution(1); */ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */ udelay(1);/* PlatformStallExecution(1); */ /*----Initialize RF fom connfiguration file----*/ switch (eRFPath) { case RF_PATH_A: #ifdef CONFIG_LOAD_PHY_PARA_FROM_FILE if (PHY_ConfigRFWithParaFile(Adapter, pszRadioAFile, eRFPath) == _FAIL) #endif { if (HAL_STATUS_FAILURE ==ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_RADIO, (ODM_RF_RADIO_PATH_E)eRFPath)) rtStatus = _FAIL; } break; case RF_PATH_B: #ifdef CONFIG_LOAD_PHY_PARA_FROM_FILE if (PHY_ConfigRFWithParaFile(Adapter, pszRadioBFile, eRFPath) == _FAIL) #endif { if (HAL_STATUS_FAILURE ==ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_RADIO, (ODM_RF_RADIO_PATH_E)eRFPath)) rtStatus = _FAIL; } break; case RF_PATH_C: break; case RF_PATH_D: break; } /*----Restore RFENV control type----*/; switch (eRFPath) { case RF_PATH_A: case RF_PATH_C: PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue); break; case RF_PATH_B : case RF_PATH_D: PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV<<16, u4RegValue); break; } if (rtStatus != _SUCCESS) { /* RT_TRACE(COMP_FPGA, DBG_LOUD, ("phy_RF6052_Config_ParaFile():Radio[%d] Fail!!", eRFPath)); */ goto phy_RF6052_Config_ParaFile_Fail; } } /* 3 ----------------------------------------------------------------- */ /* 3 Configuration of Tx Power Tracking */ /* 3 ----------------------------------------------------------------- */ #ifdef CONFIG_LOAD_PHY_PARA_FROM_FILE if (PHY_ConfigRFWithTxPwrTrackParaFile(Adapter, pszTxPwrTrackFile) == _FAIL) #endif { ODM_ConfigRFWithTxPwrTrackHeaderFile(&pHalData->odmpriv); } /* RT_TRACE(COMP_INIT, DBG_LOUD, ("<---phy_RF6052_Config_ParaFile()\n")); */ return rtStatus; phy_RF6052_Config_ParaFile_Fail: return rtStatus; } int PHY_RF6052_Config8723B( struct adapter * Adapter) { struct hal_com_data *pHalData = GET_HAL_DATA(Adapter); int rtStatus = _SUCCESS; /* */ /* Initialize general global value */ /* */ /* TODO: Extend RF_PATH_C and RF_PATH_D in the future */ if (pHalData->rf_type == RF_1T1R) pHalData->NumTotalRFPath = 1; else pHalData->NumTotalRFPath = 2; /* */ /* Config BB and RF */ /* */ rtStatus = phy_RF6052_Config_ParaFile(Adapter); return rtStatus; } /* End of HalRf6052.c */
gpl-2.0
quantsini/cog
Extdeps/ffmpeg/src/libavcodec/dpx.c
4
7332
/* * DPX (.dpx) image decoder * Copyright (c) 2009 Jimmy Christensen * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "libavutil/imgutils.h" #include "bytestream.h" #include "avcodec.h" typedef struct DPXContext { AVFrame picture; } DPXContext; static unsigned int read32(const uint8_t **ptr, int is_big) { unsigned int temp; if (is_big) { temp = AV_RB32(*ptr); } else { temp = AV_RL32(*ptr); } *ptr += 4; return temp; } static inline unsigned make_16bit(unsigned value) { // mask away invalid bits value &= 0xFFC0; // correctly expand to 16 bits return value + (value >> 10); } static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; const uint8_t *buf_end = avpkt->data + avpkt->size; int buf_size = avpkt->size; DPXContext *const s = avctx->priv_data; AVFrame *picture = data; AVFrame *const p = &s->picture; uint8_t *ptr; int magic_num, offset, endian; int x, y; int w, h, stride, bits_per_color, descriptor, elements, target_packet_size, source_packet_size; unsigned int rgbBuffer; if (avpkt->size <= 1634) { av_log(avctx, AV_LOG_ERROR, "Packet too small for DPX header\n"); return AVERROR_INVALIDDATA; } magic_num = AV_RB32(buf); buf += 4; /* Check if the files "magic number" is "SDPX" which means it uses * big-endian or XPDS which is for little-endian files */ if (magic_num == AV_RL32("SDPX")) { endian = 0; } else if (magic_num == AV_RB32("SDPX")) { endian = 1; } else { av_log(avctx, AV_LOG_ERROR, "DPX marker not found\n"); return -1; } offset = read32(&buf, endian); if (avpkt->size <= offset) { av_log(avctx, AV_LOG_ERROR, "Invalid data start offset\n"); return AVERROR_INVALIDDATA; } // Need to end in 0x304 offset from start of file buf = avpkt->data + 0x304; w = read32(&buf, endian); h = read32(&buf, endian); // Need to end in 0x320 to read the descriptor buf += 20; descriptor = buf[0]; // Need to end in 0x323 to read the bits per color buf += 3; avctx->bits_per_raw_sample = bits_per_color = buf[0]; buf += 825; avctx->sample_aspect_ratio.num = read32(&buf, endian); avctx->sample_aspect_ratio.den = read32(&buf, endian); switch (descriptor) { case 51: // RGBA elements = 4; break; case 50: // RGB elements = 3; break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported descriptor %d\n", descriptor); return -1; } switch (bits_per_color) { case 8: if (elements == 4) { avctx->pix_fmt = PIX_FMT_RGBA; } else { avctx->pix_fmt = PIX_FMT_RGB24; } source_packet_size = elements; target_packet_size = elements; break; case 10: avctx->pix_fmt = PIX_FMT_RGB48; target_packet_size = 6; source_packet_size = 4; break; case 12: case 16: if (endian) { avctx->pix_fmt = PIX_FMT_RGB48BE; } else { avctx->pix_fmt = PIX_FMT_RGB48LE; } target_packet_size = 6; source_packet_size = elements * 2; break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported color depth : %d\n", bits_per_color); return -1; } if (s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); if (av_image_check_size(w, h, 0, avctx)) return -1; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } // Move pointer to offset from start of file buf = avpkt->data + offset; ptr = p->data[0]; stride = p->linesize[0]; if (source_packet_size*avctx->width*avctx->height > buf_end - buf) { av_log(avctx, AV_LOG_ERROR, "Overread buffer. Invalid header?\n"); return -1; } switch (bits_per_color) { case 10: for (x = 0; x < avctx->height; x++) { uint16_t *dst = (uint16_t*)ptr; for (y = 0; y < avctx->width; y++) { rgbBuffer = read32(&buf, endian); // Read out the 10-bit colors and convert to 16-bit *dst++ = make_16bit(rgbBuffer >> 16); *dst++ = make_16bit(rgbBuffer >> 6); *dst++ = make_16bit(rgbBuffer << 4); } ptr += stride; } break; case 8: case 12: // Treat 12-bit as 16-bit case 16: if (source_packet_size == target_packet_size) { for (x = 0; x < avctx->height; x++) { memcpy(ptr, buf, target_packet_size*avctx->width); ptr += stride; buf += source_packet_size*avctx->width; } } else { for (x = 0; x < avctx->height; x++) { uint8_t *dst = ptr; for (y = 0; y < avctx->width; y++) { memcpy(dst, buf, target_packet_size); dst += target_packet_size; buf += source_packet_size; } ptr += stride; } } break; } *picture = s->picture; *data_size = sizeof(AVPicture); return buf_size; } static av_cold int decode_init(AVCodecContext *avctx) { DPXContext *s = avctx->priv_data; avcodec_get_frame_defaults(&s->picture); avctx->coded_frame = &s->picture; return 0; } static av_cold int decode_end(AVCodecContext *avctx) { DPXContext *s = avctx->priv_data; if (s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); return 0; } AVCodec ff_dpx_decoder = { .name = "dpx", .type = AVMEDIA_TYPE_VIDEO, .id = CODEC_ID_DPX, .priv_data_size = sizeof(DPXContext), .init = decode_init, .close = decode_end, .decode = decode_frame, .long_name = NULL_IF_CONFIG_SMALL("DPX image"), };
gpl-2.0
lynxluna/linux-ginger
arch/arm/kernel/signal.c
4
19646
/* * linux/arch/arm/kernel/signal.c * * Copyright (C) 1995-2002 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/errno.h> #include <linux/signal.h> #include <linux/personality.h> #include <linux/freezer.h> #include <linux/uaccess.h> #include <linux/tracehook.h> #include <asm/elf.h> #include <asm/cacheflush.h> #include <asm/ucontext.h> #include <asm/unistd.h> #include "ptrace.h" #include "signal.h" #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) /* * For ARM syscalls, we encode the syscall number into the instruction. */ #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)) #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)) /* * With EABI, the syscall number has to be loaded into r7. */ #define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE)) #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) /* * For Thumb syscalls, we pass the syscall number via r7. We therefore * need two 16-bit instructions. */ #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) const unsigned long sigreturn_codes[7] = { MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, }; /* * atomically swap in the new signal mask, and wait for a signal. */ asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask) { mask &= _BLOCKABLE; spin_lock_irq(&current->sighand->siglock); current->saved_sigmask = current->blocked; siginitset(&current->blocked, mask); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_restore_sigmask(); return -ERESTARTNOHAND; } asmlinkage int sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction __user *oact) { struct k_sigaction new_ka, old_ka; int ret; if (act) { old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) return -EFAULT; __get_user(new_ka.sa.sa_flags, &act->sa_flags); __get_user(mask, &act->sa_mask); siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) return -EFAULT; __put_user(old_ka.sa.sa_flags, &oact->sa_flags); __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); } return ret; } #ifdef CONFIG_CRUNCH static int preserve_crunch_context(struct crunch_sigframe __user *frame) { char kbuf[sizeof(*frame) + 8]; struct crunch_sigframe *kframe; /* the crunch context must be 64 bit aligned */ kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); kframe->magic = CRUNCH_MAGIC; kframe->size = CRUNCH_STORAGE_SIZE; crunch_task_copy(current_thread_info(), &kframe->storage); return __copy_to_user(frame, kframe, sizeof(*frame)); } static int restore_crunch_context(struct crunch_sigframe __user *frame) { char kbuf[sizeof(*frame) + 8]; struct crunch_sigframe *kframe; /* the crunch context must be 64 bit aligned */ kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); if (__copy_from_user(kframe, frame, sizeof(*frame))) return -1; if (kframe->magic != CRUNCH_MAGIC || kframe->size != CRUNCH_STORAGE_SIZE) return -1; crunch_task_restore(current_thread_info(), &kframe->storage); return 0; } #endif #ifdef CONFIG_IWMMXT static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame) { char kbuf[sizeof(*frame) + 8]; struct iwmmxt_sigframe *kframe; /* the iWMMXt context must be 64 bit aligned */ kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); kframe->magic = IWMMXT_MAGIC; kframe->size = IWMMXT_STORAGE_SIZE; iwmmxt_task_copy(current_thread_info(), &kframe->storage); return __copy_to_user(frame, kframe, sizeof(*frame)); } static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) { char kbuf[sizeof(*frame) + 8]; struct iwmmxt_sigframe *kframe; /* the iWMMXt context must be 64 bit aligned */ kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); if (__copy_from_user(kframe, frame, sizeof(*frame))) return -1; if (kframe->magic != IWMMXT_MAGIC || kframe->size != IWMMXT_STORAGE_SIZE) return -1; iwmmxt_task_restore(current_thread_info(), &kframe->storage); return 0; } #endif /* * Do a signal return; undo the signal stack. These are aligned to 64-bit. */ struct sigframe { struct ucontext uc; unsigned long retcode[2]; }; struct rt_sigframe { struct siginfo info; struct sigframe sig; }; static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) { struct aux_sigframe __user *aux; sigset_t set; int err; err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); if (err == 0) { sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); } __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); err |= !valid_user_regs(regs); aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; #ifdef CONFIG_CRUNCH if (err == 0) err |= restore_crunch_context(&aux->crunch); #endif #ifdef CONFIG_IWMMXT if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) err |= restore_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP // if (err == 0) // err |= vfp_restore_state(&sf->aux.vfp); #endif return err; } asmlinkage int sys_sigreturn(struct pt_regs *regs) { struct sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 64-bit boundary, * then 'sp' should be word aligned here. If it's * not, then the user is trying to mess with us. */ if (regs->ARM_sp & 7) goto badframe; frame = (struct sigframe __user *)regs->ARM_sp; if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) goto badframe; if (restore_sigframe(regs, frame)) goto badframe; single_step_trap(current); return regs->ARM_r0; badframe: force_sig(SIGSEGV, current); return 0; } asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 64-bit boundary, * then 'sp' should be word aligned here. If it's * not, then the user is trying to mess with us. */ if (regs->ARM_sp & 7) goto badframe; frame = (struct rt_sigframe __user *)regs->ARM_sp; if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) goto badframe; if (restore_sigframe(regs, &frame->sig)) goto badframe; if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT) goto badframe; single_step_trap(current); return regs->ARM_r0; badframe: force_sig(SIGSEGV, current); return 0; } static int setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { struct aux_sigframe __user *aux; int err = 0; __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err); __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err); __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err); __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; #ifdef CONFIG_CRUNCH if (err == 0) err |= preserve_crunch_context(&aux->crunch); #endif #ifdef CONFIG_IWMMXT if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) err |= preserve_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP // if (err == 0) // err |= vfp_save_state(&sf->aux.vfp); #endif __put_user_error(0, &aux->end_magic, err); return err; } static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize) { unsigned long sp = regs->ARM_sp; void __user *frame; /* * This is the X/Open sanctioned signal stack switching. */ if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) sp = current->sas_ss_sp + current->sas_ss_size; /* * ATPCS B01 mandates 8-byte alignment */ frame = (void __user *)((sp - framesize) & ~7); /* * Check that we can actually write to the signal frame. */ if (!access_ok(VERIFY_WRITE, frame, framesize)) frame = NULL; return frame; } static int setup_return(struct pt_regs *regs, struct k_sigaction *ka, unsigned long __user *rc, void __user *frame, int usig) { unsigned long handler = (unsigned long)ka->sa.sa_handler; unsigned long retcode; int thumb = 0; unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; /* * Maybe we need to deliver a 32-bit signal to a 26-bit task. */ if (ka->sa.sa_flags & SA_THIRTYTWO) cpsr = (cpsr & ~MODE_MASK) | USR_MODE; #ifdef CONFIG_ARM_THUMB if (elf_hwcap & HWCAP_THUMB) { /* * The LSB of the handler determines if we're going to * be using THUMB or ARM mode for this signal handler. */ thumb = handler & 1; if (thumb) { cpsr |= PSR_T_BIT; #if __LINUX_ARM_ARCH__ >= 7 /* clear the If-Then Thumb-2 execution state */ cpsr &= ~PSR_IT_MASK; #endif } else cpsr &= ~PSR_T_BIT; } #endif if (ka->sa.sa_flags & SA_RESTORER) { retcode = (unsigned long)ka->sa.sa_restorer; } else { unsigned int idx = thumb << 1; if (ka->sa.sa_flags & SA_SIGINFO) idx += 3; if (__put_user(sigreturn_codes[idx], rc) || __put_user(sigreturn_codes[idx+1], rc+1)) return 1; if (cpsr & MODE32_BIT) { /* * 32-bit code can use the new high-page * signal return code support. */ retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; } else { /* * Ensure that the instruction cache sees * the return code written onto the stack. */ flush_icache_range((unsigned long)rc, (unsigned long)(rc + 2)); retcode = ((unsigned long)rc) + thumb; } } regs->ARM_r0 = usig; regs->ARM_sp = (unsigned long)frame; regs->ARM_lr = retcode; regs->ARM_pc = handler; regs->ARM_cpsr = cpsr; return 0; } static int setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); int err = 0; if (!frame) return 1; /* * Set uc.uc_flags to a value which sc.trap_no would never have. */ __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); err |= setup_sigframe(frame, regs, set); if (err == 0) err = setup_return(regs, ka, frame->retcode, frame, usig); return err; } static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); stack_t stack; int err = 0; if (!frame) return 1; err |= copy_siginfo_to_user(&frame->info, info); __put_user_error(0, &frame->sig.uc.uc_flags, err); __put_user_error(NULL, &frame->sig.uc.uc_link, err); memset(&stack, 0, sizeof(stack)); stack.ss_sp = (void __user *)current->sas_ss_sp; stack.ss_flags = sas_ss_flags(regs->ARM_sp); stack.ss_size = current->sas_ss_size; err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack)); err |= setup_sigframe(&frame->sig, regs, set); if (err == 0) err = setup_return(regs, ka, frame->sig.retcode, frame, usig); if (err == 0) { /* * For realtime signals we must also set the second and third * arguments for the signal handler. * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06 */ regs->ARM_r1 = (unsigned long)&frame->info; regs->ARM_r2 = (unsigned long)&frame->sig.uc; } return err; } static inline void setup_syscall_restart(struct pt_regs *regs) { regs->ARM_r0 = regs->ARM_ORIG_r0; regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; } /* * OK, we're invoking a handler */ static int handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs * regs, int syscall) { struct thread_info *thread = current_thread_info(); struct task_struct *tsk = current; int usig = sig; int ret; /* * If we were from a system call, check for system call restarting... */ if (syscall) { switch (regs->ARM_r0) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->ARM_r0 = -EINTR; break; case -ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { regs->ARM_r0 = -EINTR; break; } /* fallthrough */ case -ERESTARTNOINTR: setup_syscall_restart(regs); } } /* * translate the signal */ if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) usig = thread->exec_domain->signal_invmap[usig]; /* * Set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(usig, ka, info, oldset, regs); else ret = setup_frame(usig, ka, oldset, regs); /* * Check that the resulting registers are actually sane. */ ret |= !valid_user_regs(regs); if (ret != 0) { force_sigsegv(sig, tsk); return ret; } /* * Block the signal if we were successful. */ spin_lock_irq(&tsk->sighand->siglock); sigorsets(&tsk->blocked, &tsk->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) sigaddset(&tsk->blocked, sig); recalc_sigpending(); spin_unlock_irq(&tsk->sighand->siglock); return 0; } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ static void do_signal(struct pt_regs *regs, int syscall) { struct k_sigaction ka; siginfo_t info; int signr; /* * We want the common case to go fast, which * is why we may in certain cases get here from * kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return; if (try_to_freeze()) goto no_signal; single_step_clear(current); signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { sigset_t *oldset; if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = &current->saved_sigmask; else oldset = &current->blocked; if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) { /* * A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply * clear the TIF_RESTORE_SIGMASK flag. */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); } single_step_set(current); return; } no_signal: /* * No signal to deliver to the process - restart the syscall. */ if (syscall) { if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { if (thumb_mode(regs)) { regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; regs->ARM_pc -= 2; } else { #if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT) regs->ARM_r7 = __NR_restart_syscall; regs->ARM_pc -= 4; #else u32 __user *usp; u32 swival = __NR_restart_syscall; regs->ARM_sp -= 12; usp = (u32 __user *)regs->ARM_sp; /* * Either we supports OABI only, or we have * EABI with the OABI compat layer enabled. * In the later case we don't know if user * space is EABI or not, and if not we must * not clobber r7. Always using the OABI * syscall solves that issue and works for * all those cases. */ swival = swival - __NR_SYSCALL_BASE + __NR_OABI_SYSCALL_BASE; put_user(regs->ARM_pc, &usp[0]); /* swi __NR_restart_syscall */ put_user(0xef000000 | swival, &usp[1]); /* ldr pc, [sp], #12 */ put_user(0xe49df00c, &usp[2]); flush_icache_range((unsigned long)usp, (unsigned long)(usp + 3)); regs->ARM_pc = regs->ARM_sp + 4; #endif } } if (regs->ARM_r0 == -ERESTARTNOHAND || regs->ARM_r0 == -ERESTARTSYS || regs->ARM_r0 == -ERESTARTNOINTR) { setup_syscall_restart(regs); } /* If there's no signal to deliver, we just put the saved sigmask * back. */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } } single_step_set(current); } asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) { if (thread_flags & _TIF_SIGPENDING) do_signal(regs, syscall); if (thread_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); if (current->replacement_session_keyring) key_replace_session_keyring(); } }
gpl-2.0
christianurich/DynaMind-ToolBox
DynaMind-GDALModules/3rdparty/swmm/keywords.c
4
9190
//----------------------------------------------------------------------------- // keywords.c // // Project: EPA SWMM5 // Version: 5.0 // Date: 6/19/07 (Build 5.0.010) // 2/4/08 (Build 5.0.012) // 3/11/08 (Build 5.0.013) // 1/21/09 (Build 5.0.014) // 4/10/09 (Build 5.0.015) // 07/30/10 (Build 5.0.019) // Author: L. Rossman // // Exportable keyword dictionary // // NOTE: the keywords in each list must appear in same order used // by its complementary enumerated variable in enums.h and // must be terminated by NULL. The actual text of each keyword // is defined in text.h. //----------------------------------------------------------------------------- #define _CRT_SECURE_NO_DEPRECATE #include <stdlib.h> // need this to define NULL #include "text.h" char* FileTypeWords[] = { w_RAINFALL, w_RUNOFF, w_HOTSTART, w_RDII, w_INFLOWS, w_OUTFLOWS, NULL}; char* FileModeWords[] = { w_NO, w_SCRATCH, w_USE, w_SAVE, NULL}; char* BuildupTypeWords[] = { w_NONE, w_POW, w_EXP, w_SAT, w_EXT, NULL}; //(5.0.019 - LR) char* NormalizerWords[] = { w_PER_AREA, w_PER_CURB, NULL}; char* WashoffTypeWords[] = { w_NONE, w_EXP, w_RC, w_EMC, NULL}; char* TreatTypeWords[] = { w_REMOVAL, w_CONCEN, NULL}; char* ProcessVarWords[] = { w_HRT, w_DT, w_FLOW, w_DEPTH, w_AREA, NULL}; char* PatternTypeWords[] = { w_MONTHLY, w_DAILY, w_HOURLY, w_WEEKEND, NULL}; char* CurveTypeWords[] = { w_STORAGE, w_DIVERSION, w_TIDAL, w_RATING, w_CONTROLS, w_SHAPE, //(5.0.010 - LR) w_PUMP1, w_PUMP2, w_PUMP3, w_PUMP4, NULL}; char* RuleKeyWords[] = { w_RULE, w_IF, w_AND, w_OR, w_THEN, w_ELSE, w_PRIORITY, NULL}; char* ReportWords[] = { w_INPUT, w_CONTINUITY, w_FLOWSTATS, w_CONTROLS, w_SUBCATCH, w_NODE, w_LINK, w_NODESTATS, NULL}; char* NoYesWords[] = { w_NO, w_YES, NULL}; char* NoneAllWords[] = { w_NONE, w_ALL, NULL}; char* QualUnitsWords[] = { w_MGperL, w_UGperL, w_COUNTperL, NULL}; char* TempKeyWords[] = { w_TIMESERIES, w_FILE, w_WINDSPEED, w_SNOWMELT, w_ADC, NULL}; char* EvapTypeWords[] = { w_CONSTANT, w_MONTHLY, w_TIMESERIES, //(5.0.016 - LR) w_TEMPERATURE, w_FILE, w_RECOVERY, //(5.0.016 - LR) w_DRYONLY, NULL}; //(5.0.019 - LR) char* SnowmeltWords[] = { w_PLOWABLE, w_IMPERV, w_PERV, w_REMOVAL, NULL}; char* GageDataWords[] = { w_TIMESERIES, w_FILE, NULL}; char* RainTypeWords[] = { w_INTENSITY, w_VOLUME, w_CUMULATIVE, NULL}; char* RainUnitsWords[] = { w_INCHES, w_MMETER, NULL}; char* OffOnWords[] = { w_OFF, w_ON, NULL}; char* DividerTypeWords[] = { w_CUTOFF, w_TABULAR, w_WEIR, w_OVERFLOW, NULL}; char* OutfallTypeWords[] = { w_FREE, w_NORMAL, w_FIXED, w_TIDAL, w_TIMESERIES, NULL}; char* OrificeTypeWords[] = { w_SIDE, w_BOTTOM, NULL}; char* WeirTypeWords[] = { w_TRANSVERSE, w_SIDEFLOW, w_VNOTCH, w_TRAPEZOIDAL, NULL}; char* NormalFlowWords[] = { w_SLOPE, w_FROUDE, w_BOTH, NULL}; //(5.0.010 - LR) char* RelationWords[] = { w_TABULAR, w_FUNCTIONAL, NULL}; char* OptionWords[] = { w_FLOW_UNITS, w_INFIL_MODEL, w_ROUTE_MODEL, w_START_DATE, w_START_TIME, w_END_DATE, w_END_TIME, w_REPORT_START_DATE, w_REPORT_START_TIME, w_SWEEP_START, w_SWEEP_END, w_START_DRY_DAYS, w_WET_STEP, w_DRY_STEP, w_ROUTE_STEP, w_REPORT_STEP, w_ALLOW_PONDING, w_INERT_DAMPING, w_SLOPE_WEIGHTING, w_VARIABLE_STEP, w_NORMAL_FLOW_LTD, w_LENGTHENING_STEP, w_MIN_SURFAREA, w_COMPATIBILITY, w_SKIP_STEADY_STATE, w_TEMPDIR, w_IGNORE_RAINFALL, w_FORCE_MAIN_EQN, //(5.0.010 - LR) w_LINK_OFFSETS, w_MIN_SLOPE, //(5.0.014 - LR) w_IGNORE_SNOWMELT, w_IGNORE_GWATER, //(5.0.014 - LR) w_IGNORE_ROUTING, w_IGNORE_QUALITY, //(5.0.014 - LR) NULL}; //(5.0.014 - LR) char* FlowUnitWords[] = { w_CFS, w_GPM, w_MGD, w_CMS, w_LPS, w_MLD, NULL}; char* ForceMainEqnWords[] = { w_H_W, w_D_W, NULL}; //(5.0.010 - LR) char* LinkOffsetWords[] = { w_DEPTH, w_ELEVATION, NULL}; //(5.0.012 - LR) char* OldRouteModelWords[] = { w_NONE, w_NF, w_KW, w_EKW, w_DW, NULL}; //(5.0.010 - LR) char* RouteModelWords[] = { w_NONE, w_STEADY, w_KINWAVE, w_XKINWAVE, //(5.0.010 - LR) w_DYNWAVE, NULL}; char* InfilModelWords[] = { w_HORTON, w_GREEN_AMPT, w_CURVE_NUMEBR, NULL}; char* InertDampingWords[] = { w_NONE, w_PARTIAL, w_FULL, NULL}; //(5.0.013 - LR) char* TransectKeyWords[] = { w_NC, w_X1, w_GR, NULL}; char* XsectTypeWords[] = { w_DUMMY, w_CIRCULAR, w_FILLED_CIRCULAR, w_RECT_CLOSED, w_RECT_OPEN, w_TRAPEZOIDAL, w_TRIANGULAR, w_PARABOLIC, w_POWERFUNC, w_RECT_TRIANG, w_RECT_ROUND, w_MOD_BASKET, w_HORIZELLIPSE, w_VERTELLIPSE, w_ARCH, w_EGGSHAPED, w_HORSESHOE, w_GOTHIC, w_CATENARY, w_SEMIELLIPTICAL, w_BASKETHANDLE, w_SEMICIRCULAR, w_IRREGULAR, w_CUSTOM, //(5.0.010 - LR) w_FORCE_MAIN, NULL}; //(5.0.010 - LR) char* SectWords[] = { ws_TITLE, ws_OPTION, ws_FILE, ws_RAINGAGE, ws_TEMP, ws_EVAP, ws_SUBCATCH, ws_SUBAREA, ws_INFIL, ws_AQUIFER, ws_GROUNDWATER, ws_SNOWMELT, ws_JUNCTION, ws_OUTFALL, ws_STORAGE, ws_DIVIDER, ws_CONDUIT, ws_PUMP, ws_ORIFICE, ws_WEIR, ws_OUTLET, ws_XSECTION, ws_TRANSECT, ws_LOSS, ws_CONTROL, ws_POLLUTANT, ws_LANDUSE, ws_BUILDUP, ws_WASHOFF, ws_COVERAGE, ws_INFLOW, ws_DWF, ws_PATTERN, ws_RDII, ws_UNITHYD, ws_LOADING, ws_TREATMENT, ws_CURVE, ws_TIMESERIES, ws_REPORT, ws_COORDINATE, ws_VERTICES, ws_POLYGON, ws_LABEL, ws_SYMBOL, ws_BACKDROP, ws_TAG, ws_PROFILE, ws_MAP, ws_LID_CONTROL, //(5.0.019 - LR) ws_LID_USAGE, NULL}; //(5.0.019 - LR) char* LoadUnitsWords[] = { w_LBS, w_KG, w_LOGN }; //(5.0.012 - LR) char* NodeTypeWords[] = { w_JUNCTION, w_OUTFALL, //(5.0.012 - LR) w_STORAGE, w_DIVIDER }; //(5.0.012 - LR) char* LinkTypeWords[] = { w_CONDUIT, w_PUMP, w_ORIFICE, //(5.0.012 - LR) w_WEIR, w_OUTLET }; //(5.0.012 - LR) char* PumpTypeWords[] = { w_TYPE1, w_TYPE2, w_TYPE3, w_TYPE4, w_IDEAL }; //(5.0.012 - LR) char* VolUnitsWords[] = { w_MGAL, w_MLTRS }; //(5.0.012 - LR) char* PondingUnitsWords[] = { w_PONDED_FEET, w_PONDED_METERS }; //(5.0.019 - LR) char* UHTypeWords[] = { w_SHORT, w_MEDIUM, w_LONG, NULL}; //(5.0.015 - LR)
gpl-2.0
liamlacey/Shuttertone
JuceLibraryCode/modules/juce_gui_basics/buttons/juce_ArrowButton.cpp
4
1819
/* ============================================================================== This file is part of the JUCE library. Copyright (c) 2017 - ROLI Ltd. JUCE is an open source library subject to commercial or open-source licensing. By using JUCE, you agree to the terms of both the JUCE 5 End-User License Agreement and JUCE 5 Privacy Policy (both updated and effective as of the 27th April 2017). End User License Agreement: www.juce.com/juce-5-licence Privacy Policy: www.juce.com/juce-5-privacy-policy Or: You may also use this code under the terms of the GPL v3 (see www.gnu.org/licenses). JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE DISCLAIMED. ============================================================================== */ namespace juce { ArrowButton::ArrowButton (const String& name, float arrowDirectionInRadians, Colour arrowColour) : Button (name), colour (arrowColour) { path.addTriangle (0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.5f); path.applyTransform (AffineTransform::rotation (MathConstants<float>::twoPi * arrowDirectionInRadians, 0.5f, 0.5f)); } ArrowButton::~ArrowButton() {} void ArrowButton::paintButton (Graphics& g, bool /*shouldDrawButtonAsHighlighted*/, bool shouldDrawButtonAsDown) { Path p (path); const float offset = shouldDrawButtonAsDown ? 1.0f : 0.0f; p.applyTransform (path.getTransformToScaleToFit (offset, offset, getWidth() - 3.0f, getHeight() - 3.0f, false)); DropShadow (Colours::black.withAlpha (0.3f), shouldDrawButtonAsDown ? 2 : 4, Point<int>()).drawForPath (g, p); g.setColour (colour); g.fillPath (p); } } // namespace juce
gpl-2.0
svn2github/reactos
lib/fslib/vfatlib/fat32.c
4
18378
/* * COPYRIGHT: See COPYING in the top level directory * PROJECT: ReactOS VFAT filesystem library * FILE: fat32.c * PURPOSE: Fat32 support * PROGRAMMERS: Casper S. Hornstrup (chorns@users.sourceforge.net) * Eric Kohl * REVISIONS: * EK 05/04-2003 Created */ #include "vfatlib.h" #define NDEBUG #include <debug.h> static ULONG GetShiftCount(IN ULONG Value) { ULONG i = 1; while (Value > 0) { i++; Value /= 2; } return i - 2; } static ULONG CalcVolumeSerialNumber(VOID) { LARGE_INTEGER SystemTime; TIME_FIELDS TimeFields; ULONG Serial; PUCHAR Buffer; NtQuerySystemTime (&SystemTime); RtlTimeToTimeFields (&SystemTime, &TimeFields); Buffer = (PUCHAR)&Serial; Buffer[0] = (UCHAR)(TimeFields.Year & 0xFF) + (UCHAR)(TimeFields.Hour & 0xFF); Buffer[1] = (UCHAR)(TimeFields.Year >> 8) + (UCHAR)(TimeFields.Minute & 0xFF); Buffer[2] = (UCHAR)(TimeFields.Month & 0xFF) + (UCHAR)(TimeFields.Second & 0xFF); Buffer[3] = (UCHAR)(TimeFields.Day & 0xFF) + (UCHAR)(TimeFields.Milliseconds & 0xFF); return Serial; } static NTSTATUS Fat32WriteBootSector(IN HANDLE FileHandle, IN PFAT32_BOOT_SECTOR BootSector, IN OUT PFORMAT_CONTEXT Context) { IO_STATUS_BLOCK IoStatusBlock; NTSTATUS Status; PFAT32_BOOT_SECTOR NewBootSector; LARGE_INTEGER FileOffset; /* Allocate buffer for new bootsector */ NewBootSector = (PFAT32_BOOT_SECTOR)RtlAllocateHeap(RtlGetProcessHeap(), 0, BootSector->BytesPerSector); if (NewBootSector == NULL) return STATUS_INSUFFICIENT_RESOURCES; /* Zero the new bootsector */ memset(NewBootSector, 0, BootSector->BytesPerSector); /* Copy FAT32 BPB to new bootsector */ memcpy(&NewBootSector->OEMName[0], &BootSector->OEMName[0], FIELD_OFFSET(FAT32_BOOT_SECTOR, Res2) - FIELD_OFFSET(FAT32_BOOT_SECTOR, OEMName)); /* FAT32 BPB length (up to (not including) Res2) */ /* Write the boot sector signature */ NewBootSector->Signature1 = 0xAA550000; /* Write sector 0 */ FileOffset.QuadPart = 0ULL; Status = NtWriteFile(FileHandle, NULL, NULL, NULL, &IoStatusBlock, NewBootSector, BootSector->BytesPerSector, &FileOffset, NULL); if (!NT_SUCCESS(Status)) { DPRINT("NtWriteFile() failed (Status %lx)\n", Status); RtlFreeHeap(RtlGetProcessHeap(), 0, NewBootSector); return Status; } UpdateProgress(Context, 1); /* Write backup boot sector */ if (BootSector->BootBackup != 0x0000) { FileOffset.QuadPart = (ULONGLONG)((ULONG)BootSector->BootBackup * BootSector->BytesPerSector); Status = NtWriteFile(FileHandle, NULL, NULL, NULL, &IoStatusBlock, NewBootSector, BootSector->BytesPerSector, &FileOffset, NULL); if (!NT_SUCCESS(Status)) { DPRINT("NtWriteFile() failed (Status %lx)\n", Status); RtlFreeHeap(RtlGetProcessHeap(), 0, NewBootSector); return Status; } UpdateProgress(Context, 1); } /* Free the new boot sector */ RtlFreeHeap(RtlGetProcessHeap(), 0, NewBootSector); return Status; } static NTSTATUS Fat32WriteFsInfo(IN HANDLE FileHandle, IN PFAT32_BOOT_SECTOR BootSector, IN OUT PFORMAT_CONTEXT Context) { IO_STATUS_BLOCK IoStatusBlock; NTSTATUS Status; PFAT32_FSINFO FsInfo; LARGE_INTEGER FileOffset; /* Allocate buffer for new sector */ FsInfo = (PFAT32_FSINFO)RtlAllocateHeap(RtlGetProcessHeap(), 0, BootSector->BytesPerSector); if (FsInfo == NULL) return STATUS_INSUFFICIENT_RESOURCES; /* Zero the new sector */ memset(FsInfo, 0, BootSector->BytesPerSector); FsInfo->LeadSig = 0x41615252; FsInfo->StrucSig = 0x61417272; FsInfo->FreeCount = 0xffffffff; FsInfo->NextFree = 0xffffffff; FsInfo->TrailSig = 0xaa550000; /* Write sector */ FileOffset.QuadPart = BootSector->FSInfoSector * BootSector->BytesPerSector; Status = NtWriteFile(FileHandle, NULL, NULL, NULL, &IoStatusBlock, FsInfo, BootSector->BytesPerSector, &FileOffset, NULL); if (!NT_SUCCESS(Status)) { DPRINT("NtWriteFile() failed (Status %lx)\n", Status); RtlFreeHeap(RtlGetProcessHeap(), 0, FsInfo); return Status; } UpdateProgress(Context, 1); /* Free the new sector buffer */ RtlFreeHeap(RtlGetProcessHeap(), 0, FsInfo); return Status; } static NTSTATUS Fat32WriteFAT(IN HANDLE FileHandle, IN ULONG SectorOffset, IN PFAT32_BOOT_SECTOR BootSector, IN OUT PFORMAT_CONTEXT Context) { IO_STATUS_BLOCK IoStatusBlock; NTSTATUS Status; PUCHAR Buffer; LARGE_INTEGER FileOffset; ULONG i; ULONG Sectors; /* Allocate buffer */ Buffer = (PUCHAR)RtlAllocateHeap(RtlGetProcessHeap(), 0, 64 * 1024); if (Buffer == NULL) return STATUS_INSUFFICIENT_RESOURCES; /* Zero the buffer */ memset(Buffer, 0, 64 * 1024); /* FAT cluster 0 */ Buffer[0] = 0xf8; /* Media type */ Buffer[1] = 0xff; Buffer[2] = 0xff; Buffer[3] = 0x0f; /* FAT cluster 1 */ Buffer[4] = 0xff; /* Clean shutdown, no disk read/write errors, end-of-cluster (EOC) mark */ Buffer[5] = 0xff; Buffer[6] = 0xff; Buffer[7] = 0x0f; /* FAT cluster 2 */ Buffer[8] = 0xff; /* End of root directory */ Buffer[9] = 0xff; Buffer[10] = 0xff; Buffer[11] = 0x0f; /* Write first sector of the FAT */ FileOffset.QuadPart = (SectorOffset + BootSector->ReservedSectors) * BootSector->BytesPerSector; Status = NtWriteFile(FileHandle, NULL, NULL, NULL, &IoStatusBlock, Buffer, BootSector->BytesPerSector, &FileOffset, NULL); if (!NT_SUCCESS(Status)) { DPRINT("NtWriteFile() failed (Status %lx)\n", Status); RtlFreeHeap(RtlGetProcessHeap(), 0, Buffer); return Status; } UpdateProgress(Context, 1); /* Zero the begin of the buffer */ memset(Buffer, 0, 12); /* Zero the rest of the FAT */ Sectors = 64 * 1024 / BootSector->BytesPerSector; for (i = 1; i < BootSector->FATSectors32; i += Sectors) { /* Zero some sectors of the FAT */ FileOffset.QuadPart = (SectorOffset + BootSector->ReservedSectors + i) * BootSector->BytesPerSector; if ((BootSector->FATSectors32 - i) <= Sectors) { Sectors = BootSector->FATSectors32 - i; } Status = NtWriteFile(FileHandle, NULL, NULL, NULL, &IoStatusBlock, Buffer, Sectors * BootSector->BytesPerSector, &FileOffset, NULL); if (!NT_SUCCESS(Status)) { DPRINT("NtWriteFile() failed (Status %lx)\n", Status); RtlFreeHeap(RtlGetProcessHeap(), 0, Buffer); return Status; } UpdateProgress(Context, Sectors); } /* Free the buffer */ RtlFreeHeap(RtlGetProcessHeap(), 0, Buffer); return Status; } static NTSTATUS Fat32WriteRootDirectory(IN HANDLE FileHandle, IN PFAT32_BOOT_SECTOR BootSector, IN OUT PFORMAT_CONTEXT Context) { IO_STATUS_BLOCK IoStatusBlock; NTSTATUS Status; PUCHAR Buffer; LARGE_INTEGER FileOffset; ULONGLONG FirstDataSector; ULONGLONG FirstRootDirSector; /* Allocate buffer for the cluster */ Buffer = (PUCHAR)RtlAllocateHeap(RtlGetProcessHeap(), 0, BootSector->SectorsPerCluster * BootSector->BytesPerSector); if (Buffer == NULL) return STATUS_INSUFFICIENT_RESOURCES; /* Zero the buffer */ memset(Buffer, 0, BootSector->SectorsPerCluster * BootSector->BytesPerSector); DPRINT("BootSector->ReservedSectors = %lu\n", BootSector->ReservedSectors); DPRINT("BootSector->FATSectors32 = %lu\n", BootSector->FATSectors32); DPRINT("BootSector->RootCluster = %lu\n", BootSector->RootCluster); DPRINT("BootSector->SectorsPerCluster = %lu\n", BootSector->SectorsPerCluster); /* Write cluster */ FirstDataSector = BootSector->ReservedSectors + (BootSector->FATCount * BootSector->FATSectors32) + 0 /* RootDirSectors */; DPRINT("FirstDataSector = %lu\n", FirstDataSector); FirstRootDirSector = ((BootSector->RootCluster - 2) * BootSector->SectorsPerCluster) + FirstDataSector; FileOffset.QuadPart = FirstRootDirSector * BootSector->BytesPerSector; DPRINT("FirstRootDirSector = %lu\n", FirstRootDirSector); DPRINT("FileOffset = %lu\n", FileOffset.QuadPart); Status = NtWriteFile(FileHandle, NULL, NULL, NULL, &IoStatusBlock, Buffer, BootSector->SectorsPerCluster * BootSector->BytesPerSector, &FileOffset, NULL); if (!NT_SUCCESS(Status)) { DPRINT("NtWriteFile() failed (Status %lx)\n", Status); RtlFreeHeap(RtlGetProcessHeap(), 0, Buffer); return Status; } UpdateProgress(Context, (ULONG)BootSector->SectorsPerCluster); /* Free the buffer */ RtlFreeHeap(RtlGetProcessHeap(), 0, Buffer); return Status; } static NTSTATUS Fat32WipeSectors( IN HANDLE FileHandle, IN PFAT32_BOOT_SECTOR BootSector, IN OUT PFORMAT_CONTEXT Context) { IO_STATUS_BLOCK IoStatusBlock; PUCHAR Buffer; LARGE_INTEGER FileOffset; ULONGLONG Sector; ULONG Length; NTSTATUS Status; /* Allocate buffer for the cluster */ Buffer = (PUCHAR)RtlAllocateHeap(RtlGetProcessHeap(), HEAP_ZERO_MEMORY, BootSector->SectorsPerCluster * BootSector->BytesPerSector); if (Buffer == NULL) return STATUS_INSUFFICIENT_RESOURCES; Sector = 0; Length = BootSector->SectorsPerCluster * BootSector->BytesPerSector; while (Sector + BootSector->SectorsPerCluster < BootSector->SectorsHuge) { FileOffset.QuadPart = Sector * BootSector->BytesPerSector; Status = NtWriteFile(FileHandle, NULL, NULL, NULL, &IoStatusBlock, Buffer, Length, &FileOffset, NULL); if (!NT_SUCCESS(Status)) { DPRINT("NtWriteFile() failed (Status %lx)\n", Status); goto done; } UpdateProgress(Context, (ULONG)BootSector->SectorsPerCluster); Sector += BootSector->SectorsPerCluster; } if (Sector + BootSector->SectorsPerCluster > BootSector->SectorsHuge) { DPRINT("Remaining sectors %lu\n", BootSector->SectorsHuge - Sector); FileOffset.QuadPart = Sector * BootSector->BytesPerSector; Length = (BootSector->SectorsHuge - Sector) * BootSector->BytesPerSector; Status = NtWriteFile(FileHandle, NULL, NULL, NULL, &IoStatusBlock, Buffer, Length, &FileOffset, NULL); if (!NT_SUCCESS(Status)) { DPRINT("NtWriteFile() failed (Status %lx)\n", Status); goto done; } UpdateProgress(Context, BootSector->SectorsHuge - Sector); } done: /* Free the buffer */ RtlFreeHeap(RtlGetProcessHeap(), 0, Buffer); return Status; } NTSTATUS Fat32Format(IN HANDLE FileHandle, IN PPARTITION_INFORMATION PartitionInfo, IN PDISK_GEOMETRY DiskGeometry, IN PUNICODE_STRING Label, IN BOOLEAN QuickFormat, IN ULONG ClusterSize, IN OUT PFORMAT_CONTEXT Context) { FAT32_BOOT_SECTOR BootSector; OEM_STRING VolumeLabel; ULONG TmpVal1; ULONG TmpVal2; NTSTATUS Status; /* Calculate cluster size */ if (ClusterSize == 0) { if (PartitionInfo->PartitionLength.QuadPart < 8LL * 1024LL * 1024LL * 1024LL) { /* Partition < 8GB ==> 4KB Cluster */ ClusterSize = 4096; } else if (PartitionInfo->PartitionLength.QuadPart < 16LL * 1024LL * 1024LL * 1024LL) { /* Partition 8GB - 16GB ==> 8KB Cluster */ ClusterSize = 8192; } else if (PartitionInfo->PartitionLength.QuadPart < 32LL * 1024LL * 1024LL * 1024LL) { /* Partition 16GB - 32GB ==> 16KB Cluster */ ClusterSize = 16384; } else { /* Partition >= 32GB ==> 32KB Cluster */ ClusterSize = 32768; } } memset(&BootSector, 0, sizeof(FAT32_BOOT_SECTOR)); memcpy(&BootSector.OEMName[0], "MSWIN4.1", 8); BootSector.BytesPerSector = DiskGeometry->BytesPerSector; BootSector.SectorsPerCluster = ClusterSize / BootSector.BytesPerSector; BootSector.ReservedSectors = 32; BootSector.FATCount = 2; BootSector.RootEntries = 0; BootSector.Sectors = 0; BootSector.Media = 0xf8; BootSector.FATSectors = 0; BootSector.SectorsPerTrack = DiskGeometry->SectorsPerTrack; BootSector.Heads = DiskGeometry->TracksPerCylinder; BootSector.HiddenSectors = PartitionInfo->HiddenSectors; BootSector.SectorsHuge = PartitionInfo->PartitionLength.QuadPart >> GetShiftCount(BootSector.BytesPerSector); /* Use shifting to avoid 64-bit division */ BootSector.FATSectors32 = 0; /* Set later */ BootSector.ExtFlag = 0; /* Mirror all FATs */ BootSector.FSVersion = 0x0000; /* 0:0 */ BootSector.RootCluster = 2; BootSector.FSInfoSector = 1; BootSector.BootBackup = 6; BootSector.Drive = (DiskGeometry->MediaType == FixedMedia) ? 0x80 : 0x00; BootSector.ExtBootSignature = 0x29; BootSector.VolumeID = CalcVolumeSerialNumber (); if ((Label == NULL) || (Label->Buffer == NULL)) { memcpy(&BootSector.VolumeLabel[0], "NO NAME ", 11); } else { RtlUnicodeStringToOemString(&VolumeLabel, Label, TRUE); memset(&BootSector.VolumeLabel[0], ' ', 11); memcpy(&BootSector.VolumeLabel[0], VolumeLabel.Buffer, VolumeLabel.Length < 11 ? VolumeLabel.Length : 11); RtlFreeOemString(&VolumeLabel); } memcpy(&BootSector.SysType[0], "FAT32 ", 8); /* Calculate number of FAT sectors */ /* (BytesPerSector / 4) FAT entries (32bit) fit into one sector */ TmpVal1 = BootSector.SectorsHuge - BootSector.ReservedSectors; TmpVal2 = ((BootSector.BytesPerSector / 4) * BootSector.SectorsPerCluster) + BootSector.FATCount; BootSector.FATSectors32 = (TmpVal1 + (TmpVal2 - 1)) / TmpVal2; DPRINT("FATSectors32 = %lu\n", BootSector.FATSectors32); /* Init context data */ Context->TotalSectorCount = 2 + (BootSector.FATSectors32 * BootSector.FATCount) + BootSector.SectorsPerCluster; if (!QuickFormat) { Context->TotalSectorCount += BootSector.SectorsHuge; Status = Fat32WipeSectors(FileHandle, &BootSector, Context); if (!NT_SUCCESS(Status)) { DPRINT("Fat32WipeSectors() failed with status 0x%.08x\n", Status); return Status; } } Status = Fat32WriteBootSector(FileHandle, &BootSector, Context); if (!NT_SUCCESS(Status)) { DPRINT("Fat32WriteBootSector() failed with status 0x%.08x\n", Status); return Status; } Status = Fat32WriteFsInfo(FileHandle, &BootSector, Context); if (!NT_SUCCESS(Status)) { DPRINT("Fat32WriteFsInfo() failed with status 0x%.08x\n", Status); return Status; } /* Write first FAT copy */ Status = Fat32WriteFAT(FileHandle, 0, &BootSector, Context); if (!NT_SUCCESS(Status)) { DPRINT("Fat32WriteFAT() failed with status 0x%.08x\n", Status); return Status; } /* Write second FAT copy */ Status = Fat32WriteFAT(FileHandle, BootSector.FATSectors32, &BootSector, Context); if (!NT_SUCCESS(Status)) { DPRINT("Fat32WriteFAT() failed with status 0x%.08x.\n", Status); return Status; } Status = Fat32WriteRootDirectory(FileHandle, &BootSector, Context); if (!NT_SUCCESS(Status)) { DPRINT("Fat32WriteRootDirectory() failed with status 0x%.08x\n", Status); } return Status; } /* EOF */
gpl-2.0
Serranove/android_kernel_samsung_serranovelte
drivers/usb/gadget/u_ncm.c
4
6198
/* * File Name : u_ncm.c * * ncm utilities for composite USB gadgets. * This utilitie can support to connect head unit for mirror link * * Copyright (C) 2011 Samsung Electronics * Author: SoonYong, Cho <soonyong.cho@samsung.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "f_ncm.c" #include <linux/module.h> /* Support dynamic tethering mode. * if ncm_connect is true, device is received vendor specific request * from head unit. */ static bool ncm_connect; /* terminal version using vendor specific request */ u16 terminal_mode_version; u16 terminal_mode_vendor_id; struct ncm_function_config { u8 ethaddr[ETH_ALEN]; struct eth_dev *dev; }; static int ncm_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev) { f->config = kzalloc(sizeof(struct ncm_function_config), GFP_KERNEL); return 0; } static void ncm_function_cleanup(struct android_usb_function *f) { kfree(f->config); f->config = NULL; } static int ncm_function_bind_config(struct android_usb_function *f, struct usb_configuration *c) { int ret; int i; char *src; struct ncm_function_config *ncm = f->config; #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE struct eth_dev *e_dev; #endif if (!ncm) { pr_err("%s: ncm_pdata\n", __func__); return -1; } ncm = f->config; if (!f->config) return -ENOMEM; for (i = 0; i < ETH_ALEN; i++) ncm->ethaddr[i] = 0; /* create a fake MAC address from our serial number. * first byte is 0x02 to signify locally administered. */ ncm->ethaddr[0] = 0x02; src = serial_string; for (i = 0; (i < 256) && *src; i++) { /* XOR the USB serial across the remaining bytes */ ncm->ethaddr[i % (ETH_ALEN - 1) + 1] ^= *src++; } printk(KERN_DEBUG "usb: %s before MAC:%02X:%02X:%02X:%02X:%02X:%02X\n", __func__, ncm->ethaddr[0], ncm->ethaddr[1], ncm->ethaddr[2], ncm->ethaddr[3], ncm->ethaddr[4], ncm->ethaddr[5]); e_dev = gether_setup_name(c->cdev->gadget, ncm->ethaddr, "ncm"); if (IS_ERR(e_dev)) { ret = PTR_ERR(e_dev); pr_err("%s: gether_setup failed\n", __func__); return ret; } ncm->dev=e_dev; printk(KERN_DEBUG "usb: %s after MAC:%02X:%02X:%02X:%02X:%02X:%02X\n", __func__, ncm->ethaddr[0], ncm->ethaddr[1], ncm->ethaddr[2], ncm->ethaddr[3], ncm->ethaddr[4], ncm->ethaddr[5]); return ncm_bind_config(c, ncm->ethaddr,ncm->dev); } static void ncm_function_unbind_config(struct android_usb_function *f, struct usb_configuration *c) { struct ncm_function_config *ncm = f->config; gether_cleanup(ncm->dev); } static struct android_usb_function ncm_function = { .name = "ncm", .init = ncm_function_init, .cleanup = ncm_function_cleanup, .bind_config = ncm_function_bind_config, .unbind_config = ncm_function_unbind_config, }; bool is_ncm_ready(char *name) { /* Enable ncm function */ if (!strcmp(name, "rndis") || !strcmp(name, "ncm")) { if (ncm_connect) { printk(KERN_DEBUG "usb: %s ncm ready (%s)\n", __func__, name); return true; } } return false; } void set_ncm_device_descriptor(struct usb_device_descriptor *desc) { desc->idProduct = 0x685d; desc->bDeviceClass = USB_CLASS_COMM; printk(KERN_DEBUG "usb: %s idProduct=0x%x, DeviceClass=0x%x\n", __func__, desc->idProduct, desc->bDeviceClass); } void set_ncm_ready(bool ready) { if (ready != ncm_connect) printk(KERN_DEBUG "usb: %s old status=%d, new status=%d\n", __func__, ncm_connect, ready); ncm_connect = ready; if (ready == false) { terminal_mode_version = 0; terminal_mode_vendor_id = 0; /* Log for set_ncm_ready */ pr_info("usb:: %s ready==false, terminal_mode_version = %d\n", __func__, terminal_mode_version); } } EXPORT_SYMBOL(set_ncm_ready); static ssize_t terminal_version_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; ret = sprintf(buf, "major %x minor %x vendor %x\n", terminal_mode_version & 0xff, (terminal_mode_version >> 8 & 0xff), terminal_mode_vendor_id); if(terminal_mode_version) printk(KERN_DEBUG "usb: %s terminal_mode %s\n", __func__, buf); return ret; } static ssize_t terminal_version_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int value; sscanf(buf, "%x", &value); terminal_mode_version = (u16)value; printk(KERN_DEBUG "usb: %s buf=%s\n", __func__, buf); /* only set ncm ready when terminal verision value is not zero */ if(value) set_ncm_ready(true); return size; } static DEVICE_ATTR(terminal_version, S_IRUGO | S_IWUSR, terminal_version_show, terminal_version_store); static int create_terminal_attribute(struct device **pdev) { int err; if (IS_ERR(*pdev)) { printk(KERN_DEBUG "usb: %s error pdev(%p)\n", __func__, *pdev); return PTR_ERR(*pdev); } err = device_create_file(*pdev, &dev_attr_terminal_version); if (err) { printk(KERN_DEBUG "usb: %s failed to create attr\n", __func__); return err; } return 0; } static int terminal_ctrl_request(struct usb_composite_dev *cdev, const struct usb_ctrlrequest *ctrl) { int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) { /* Handle Terminal mode request */ if (ctrl->bRequest == 0xf0) { terminal_mode_version = w_value; terminal_mode_vendor_id = w_index; set_ncm_ready(true); printk(KERN_DEBUG "usb: %s ver=0x%x vendor_id=0x%x\n", __func__, terminal_mode_version, terminal_mode_vendor_id); value = 0; } } /* respond ZLP */ if (value >= 0) { int rc; cdev->req->zero = 0; cdev->req->length = value; rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC); if (rc < 0) printk(KERN_DEBUG "usb: %s failed usb_ep_queue\n", __func__); } return value; }
gpl-2.0
empeg/empeg-hijack
fs/hfs/file_cap.c
4
8053
/* * linux/fs/hfs/file_cap.c * * Copyright (C) 1995-1997 Paul H. Hargrove * This file may be distributed under the terms of the GNU Public License. * * This file contains the file_ops and inode_ops for the metadata * files under the CAP representation. * * The source code distribution of the Columbia AppleTalk Package for * UNIX, version 6.0, (CAP) was used as a specification of the * location and format of files used by CAP's Aufs. No code from CAP * appears in hfs_fs. hfs_fs is not a work ``derived'' from CAP in * the sense of intellectual property law. * * "XXX" in a comment is a note to myself to consider changing something. * * In function preconditions the term "valid" applied to a pointer to * a structure means that the pointer is non-NULL and the structure it * points to has all fields initialized to consistent values. */ #include "hfs.h" #include <linux/hfs_fs_sb.h> #include <linux/hfs_fs_i.h> #include <linux/hfs_fs.h> /*================ Forward declarations ================*/ static hfs_rwret_t cap_info_read(struct file *, char *, hfs_rwarg_t, loff_t *); static hfs_rwret_t cap_info_write(struct file *, const char *, hfs_rwarg_t, loff_t *); static void cap_info_truncate(struct inode *); /*================ Function-like macros ================*/ /* * OVERLAPS() * * Determines if a given range overlaps the specified structure member */ #define OVERLAPS(START, END, TYPE, MEMB) \ ((END > offsetof(TYPE, MEMB)) && \ (START < offsetof(TYPE, MEMB) + sizeof(((TYPE *)0)->MEMB))) /*================ Global variables ================*/ static struct file_operations hfs_cap_info_operations = { NULL, /* lseek - default */ cap_info_read, /* read */ cap_info_write, /* write */ NULL, /* readdir - bad */ NULL, /* select - default */ NULL, /* ioctl - default */ NULL, /* mmap - not yet */ NULL, /* no special open code */ NULL, /* flush */ NULL, /* no special release code */ file_fsync, /* fsync - default */ NULL, /* fasync - default */ NULL, /* check_media_change - none */ NULL, /* revalidate - none */ NULL /* lock - none */ }; struct inode_operations hfs_cap_info_inode_operations = { &hfs_cap_info_operations, /* default file operations */ NULL, /* create */ NULL, /* lookup */ NULL, /* link */ NULL, /* unlink */ NULL, /* symlink */ NULL, /* mkdir */ NULL, /* rmdir */ NULL, /* mknod */ NULL, /* rename */ NULL, /* readlink */ NULL, /* follow_link */ NULL, /* readpage */ NULL, /* writepage */ NULL, /* bmap - none */ cap_info_truncate, /* truncate */ NULL, /* permission */ NULL, /* smap */ NULL, /* updatepage */ NULL /* revalidata */ }; /*================ File-local functions ================*/ /* * cap_build_meta() * * Build the metadata structure. */ static void cap_build_meta(struct hfs_cap_info *meta, struct hfs_cat_entry *entry) { memset(meta, 0, sizeof(*meta)); memcpy(meta->fi_fndr, &entry->info, 32); if ((entry->type == HFS_CDR_FIL) && (entry->u.file.flags & HFS_FIL_LOCK)) { /* Couple the locked bit of the file to the AFP {write,rename,delete} inhibit bits. */ hfs_put_hs(HFS_AFP_RDONLY, meta->fi_attr); } meta->fi_magic1 = HFS_CAP_MAGIC1; meta->fi_version = HFS_CAP_VERSION; meta->fi_magic = HFS_CAP_MAGIC; meta->fi_bitmap = HFS_CAP_LONGNAME; memcpy(meta->fi_macfilename, entry->key.CName.Name, entry->key.CName.Len); meta->fi_datemagic = HFS_CAP_DMAGIC; meta->fi_datevalid = HFS_CAP_MDATE | HFS_CAP_CDATE; hfs_put_nl(hfs_m_to_htime(entry->create_date), meta->fi_ctime); hfs_put_nl(hfs_m_to_htime(entry->modify_date), meta->fi_mtime); hfs_put_nl(CURRENT_TIME, meta->fi_utime); } /* * cap_info_read() * * This is the read() entry in the file_operations structure for CAP * metadata files. The purpose is to transfer up to 'count' bytes * from the file corresponding to 'inode' beginning at offset * 'file->f_pos' to user-space at the address 'buf'. The return value * is the number of bytes actually transferred. */ static hfs_rwret_t cap_info_read(struct file *filp, char *buf, hfs_rwarg_t count, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; struct hfs_cat_entry *entry = HFS_I(inode)->entry; hfs_s32 left, size, read = 0; hfs_u32 pos; if (!S_ISREG(inode->i_mode)) { hfs_warn("hfs_cap_info_read: mode = %07o\n", inode->i_mode); return -EINVAL; } pos = *ppos; if (pos > HFS_FORK_MAX) { return 0; } size = inode->i_size; if (pos > size) { left = 0; } else { left = size - pos; } if (left > count) { left = count; } if (left <= 0) { return 0; } if (pos < sizeof(struct hfs_cap_info)) { int memcount = sizeof(struct hfs_cap_info) - pos; struct hfs_cap_info meta; if (memcount > left) { memcount = left; } cap_build_meta(&meta, entry); memcount -= copy_to_user(buf, ((char *)&meta) + pos, memcount); left -= memcount; read += memcount; pos += memcount; buf += memcount; } if (left > 0) { clear_user(buf, left); pos += left; } if (read) { inode->i_atime = CURRENT_TIME; *ppos = pos; mark_inode_dirty(inode); } return read; } /* * cap_info_write() * * This is the write() entry in the file_operations structure for CAP * metadata files. The purpose is to transfer up to 'count' bytes * to the file corresponding to 'inode' beginning at offset * '*ppos' from user-space at the address 'buf'. * The return value is the number of bytes actually transferred. */ static hfs_rwret_t cap_info_write(struct file *filp, const char *buf, hfs_rwarg_t count, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; hfs_u32 pos; if (!S_ISREG(inode->i_mode)) { hfs_warn("hfs_file_write: mode = %07o\n", inode->i_mode); return -EINVAL; } if (count <= 0) { return 0; } pos = (filp->f_flags & O_APPEND) ? inode->i_size : *ppos; if (pos > HFS_FORK_MAX) { return 0; } *ppos += count; if (*ppos > HFS_FORK_MAX) { *ppos = HFS_FORK_MAX; count = HFS_FORK_MAX - pos; } if (*ppos > inode->i_size) inode->i_size = *ppos; /* Only deal with the part we store in memory */ if (pos < sizeof(struct hfs_cap_info)) { int end, mem_count; struct hfs_cat_entry *entry = HFS_I(inode)->entry; struct hfs_cap_info meta; mem_count = sizeof(struct hfs_cap_info) - pos; if (mem_count > count) { mem_count = count; } end = pos + mem_count; cap_build_meta(&meta, entry); mem_count -= copy_from_user(((char *)&meta) + pos, buf, mem_count); /* Update finder attributes if changed */ if (OVERLAPS(pos, end, struct hfs_cap_info, fi_fndr)) { memcpy(&entry->info, meta.fi_fndr, 32); hfs_cat_mark_dirty(entry); } /* Update file flags if changed */ if (OVERLAPS(pos, end, struct hfs_cap_info, fi_attr) && (entry->type == HFS_CDR_FIL)) { int locked = hfs_get_ns(&meta.fi_attr) & htons(HFS_AFP_WRI); hfs_u8 new_flags; if (locked) { new_flags = entry->u.file.flags | HFS_FIL_LOCK; } else { new_flags = entry->u.file.flags & ~HFS_FIL_LOCK; } if (new_flags != entry->u.file.flags) { entry->u.file.flags = new_flags; hfs_cat_mark_dirty(entry); hfs_file_fix_mode(entry); } } /* Update CrDat if changed */ if (OVERLAPS(pos, end, struct hfs_cap_info, fi_ctime)) { entry->create_date = hfs_h_to_mtime(hfs_get_nl(meta.fi_ctime)); hfs_cat_mark_dirty(entry); } /* Update MdDat if changed */ if (OVERLAPS(pos, end, struct hfs_cap_info, fi_mtime)) { entry->modify_date = hfs_h_to_mtime(hfs_get_nl(meta.fi_mtime)); hfs_cat_mark_dirty(entry); } } inode->i_mtime = inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); return count; } /* * cap_info_truncate() * * This is the truncate field in the inode_operations structure for * CAP metadata files. */ static void cap_info_truncate(struct inode *inode) { if (inode->i_size > HFS_FORK_MAX) { inode->i_size = HFS_FORK_MAX; mark_inode_dirty(inode); } }
gpl-2.0
kipill-nn/Kernel-for-Mega
drivers/media/video/msm/mt9p012_reg.c
4
10466
/* * Copyright (C) 2009 QUALCOMM Incorporated. */ #include "mt9p012.h" /*Micron settings from Applications for lower power consumption.*/ struct reg_struct mt9p012_reg_pat[2] = { { /* Preview */ /* vt_pix_clk_div REG=0x0300 */ 6, /* 5 */ /* vt_sys_clk_div REG=0x0302 */ 1, /* pre_pll_clk_div REG=0x0304 */ 2, /* pll_multiplier REG=0x0306 */ 60, /* op_pix_clk_div REG=0x0308 */ 8, /* 10 */ /* op_sys_clk_div REG=0x030A */ 1, /* scale_m REG=0x0404 */ 16, /* row_speed REG=0x3016 */ 0x0111, /* x_addr_start REG=0x3004 */ 8, /* x_addr_end REG=0x3008 */ 2597, /* y_addr_start REG=0x3002 */ 8, /* y_addr_end REG=0x3006 */ 1949, /* read_mode REG=0x3040 * Preview 2x2 skipping */ 0x00C3, /* x_output_size REG=0x034C */ 1296, /* y_output_size REG=0x034E */ 972, /* line_length_pck REG=0x300C */ 3784, /* frame_length_lines REG=0x300A */ 1057, /* coarse_integration_time REG=0x3012 */ 16, /* fine_integration_time REG=0x3014 */ 1764 }, { /*Snapshot*/ /* vt_pix_clk_div REG=0x0300 */ 6, /* vt_sys_clk_div REG=0x0302 */ 1, /* pre_pll_clk_div REG=0x0304 */ 2, /* pll_multiplier REG=0x0306 * 60 for 10fps snapshot */ 60, /* op_pix_clk_div REG=0x0308 */ 8, /* op_sys_clk_div REG=0x030A */ 1, /* scale_m REG=0x0404 */ 16, /* row_speed REG=0x3016 */ 0x0111, /* x_addr_start REG=0x3004 */ 8, /* x_addr_end REG=0x3008 */ 2615, /* y_addr_start REG=0x3002 */ 8, /* y_addr_end REG=0x3006 */ 1967, /* read_mode REG=0x3040 */ 0x0041, /* x_output_size REG=0x034C */ 2608, /* y_output_size REG=0x034E */ 1960, /* line_length_pck REG=0x300C */ 3911, /* frame_length_lines REG=0x300A //10 fps snapshot */ 2045, /* coarse_integration_time REG=0x3012 */ 16, /* fine_integration_time REG=0x3014 */ 882 } }; struct mt9p012_i2c_reg_conf mt9p012_test_tbl[] = { {0x3044, 0x0544 & 0xFBFF}, {0x30CA, 0x0004 | 0x0001}, {0x30D4, 0x9020 & 0x7FFF}, {0x31E0, 0x0003 & 0xFFFE}, {0x3180, 0x91FF & 0x7FFF}, {0x301A, (0x10CC | 0x8000) & 0xFFF7}, {0x301E, 0x0000}, {0x3780, 0x0000}, }; struct mt9p012_i2c_reg_conf mt9p012_lc_tbl[] = { /* [Lens shading 85 Percent TL84] */ /* P_RD_P0Q0 */ {0x360A, 0x7FEF}, /* P_RD_P0Q1 */ {0x360C, 0x232C}, /* P_RD_P0Q2 */ {0x360E, 0x7050}, /* P_RD_P0Q3 */ {0x3610, 0xF3CC}, /* P_RD_P0Q4 */ {0x3612, 0x89D1}, /* P_RD_P1Q0 */ {0x364A, 0xBE0D}, /* P_RD_P1Q1 */ {0x364C, 0x9ACB}, /* P_RD_P1Q2 */ {0x364E, 0x2150}, /* P_RD_P1Q3 */ {0x3650, 0xB26B}, /* P_RD_P1Q4 */ {0x3652, 0x9511}, /* P_RD_P2Q0 */ {0x368A, 0x2151}, /* P_RD_P2Q1 */ {0x368C, 0x00AD}, /* P_RD_P2Q2 */ {0x368E, 0x8334}, /* P_RD_P2Q3 */ {0x3690, 0x478E}, /* P_RD_P2Q4 */ {0x3692, 0x0515}, /* P_RD_P3Q0 */ {0x36CA, 0x0710}, /* P_RD_P3Q1 */ {0x36CC, 0x452D}, /* P_RD_P3Q2 */ {0x36CE, 0xF352}, /* P_RD_P3Q3 */ {0x36D0, 0x190F}, /* P_RD_P3Q4 */ {0x36D2, 0x4413}, /* P_RD_P4Q0 */ {0x370A, 0xD112}, /* P_RD_P4Q1 */ {0x370C, 0xF50F}, /* P_RD_P4Q2 */ {0x370C, 0xF50F}, /* P_RD_P4Q3 */ {0x3710, 0xDC11}, /* P_RD_P4Q4 */ {0x3712, 0xD776}, /* P_GR_P0Q0 */ {0x3600, 0x1750}, /* P_GR_P0Q1 */ {0x3602, 0xF0AC}, /* P_GR_P0Q2 */ {0x3604, 0x4711}, /* P_GR_P0Q3 */ {0x3606, 0x07CE}, /* P_GR_P0Q4 */ {0x3608, 0x96B2}, /* P_GR_P1Q0 */ {0x3640, 0xA9AE}, /* P_GR_P1Q1 */ {0x3642, 0xF9AC}, /* P_GR_P1Q2 */ {0x3644, 0x39F1}, /* P_GR_P1Q3 */ {0x3646, 0x016F}, /* P_GR_P1Q4 */ {0x3648, 0x8AB2}, /* P_GR_P2Q0 */ {0x3680, 0x1752}, /* P_GR_P2Q1 */ {0x3682, 0x70F0}, /* P_GR_P2Q2 */ {0x3684, 0x83F5}, /* P_GR_P2Q3 */ {0x3686, 0x8392}, /* P_GR_P2Q4 */ {0x3688, 0x1FD6}, /* P_GR_P3Q0 */ {0x36C0, 0x1131}, /* P_GR_P3Q1 */ {0x36C2, 0x3DAF}, /* P_GR_P3Q2 */ {0x36C4, 0x89B4}, /* P_GR_P3Q3 */ {0x36C6, 0xA391}, /* P_GR_P3Q4 */ {0x36C8, 0x1334}, /* P_GR_P4Q0 */ {0x3700, 0xDC13}, /* P_GR_P4Q1 */ {0x3702, 0xD052}, /* P_GR_P4Q2 */ {0x3704, 0x5156}, /* P_GR_P4Q3 */ {0x3706, 0x1F13}, /* P_GR_P4Q4 */ {0x3708, 0x8C38}, /* P_BL_P0Q0 */ {0x3614, 0x0050}, /* P_BL_P0Q1 */ {0x3616, 0xBD4C}, /* P_BL_P0Q2 */ {0x3618, 0x41B0}, /* P_BL_P0Q3 */ {0x361A, 0x660D}, /* P_BL_P0Q4 */ {0x361C, 0xC590}, /* P_BL_P1Q0 */ {0x3654, 0x87EC}, /* P_BL_P1Q1 */ {0x3656, 0xE44C}, /* P_BL_P1Q2 */ {0x3658, 0x302E}, /* P_BL_P1Q3 */ {0x365A, 0x106E}, /* P_BL_P1Q4 */ {0x365C, 0xB58E}, /* P_BL_P2Q0 */ {0x3694, 0x0DD1}, /* P_BL_P2Q1 */ {0x3696, 0x2A50}, /* P_BL_P2Q2 */ {0x3698, 0xC793}, /* P_BL_P2Q3 */ {0x369A, 0xE8F1}, /* P_BL_P2Q4 */ {0x369C, 0x4174}, /* P_BL_P3Q0 */ {0x36D4, 0x01EF}, /* P_BL_P3Q1 */ {0x36D6, 0x06CF}, /* P_BL_P3Q2 */ {0x36D8, 0x8D91}, /* P_BL_P3Q3 */ {0x36DA, 0x91F0}, /* P_BL_P3Q4 */ {0x36DC, 0x52EF}, /* P_BL_P4Q0 */ {0x3714, 0xA6D2}, /* P_BL_P4Q1 */ {0x3716, 0xA312}, /* P_BL_P4Q2 */ {0x3718, 0x2695}, /* P_BL_P4Q3 */ {0x371A, 0x3953}, /* P_BL_P4Q4 */ {0x371C, 0x9356}, /* P_GB_P0Q0 */ {0x361E, 0x7EAF}, /* P_GB_P0Q1 */ {0x3620, 0x2A4C}, /* P_GB_P0Q2 */ {0x3622, 0x49F0}, {0x3624, 0xF1EC}, /* P_GB_P0Q4 */ {0x3626, 0xC670}, /* P_GB_P1Q0 */ {0x365E, 0x8E0C}, /* P_GB_P1Q1 */ {0x3660, 0xC2A9}, /* P_GB_P1Q2 */ {0x3662, 0x274F}, /* P_GB_P1Q3 */ {0x3664, 0xADAB}, /* P_GB_P1Q4 */ {0x3666, 0x8EF0}, /* P_GB_P2Q0 */ {0x369E, 0x09B1}, /* P_GB_P2Q1 */ {0x36A0, 0xAA2E}, /* P_GB_P2Q2 */ {0x36A2, 0xC3D3}, /* P_GB_P2Q3 */ {0x36A4, 0x7FAF}, /* P_GB_P2Q4 */ {0x36A6, 0x3F34}, /* P_GB_P3Q0 */ {0x36DE, 0x4C8F}, /* P_GB_P3Q1 */ {0x36E0, 0x886E}, /* P_GB_P3Q2 */ {0x36E2, 0xE831}, /* P_GB_P3Q3 */ {0x36E4, 0x1FD0}, /* P_GB_P3Q4 */ {0x36E6, 0x1192}, /* P_GB_P4Q0 */ {0x371E, 0xB952}, /* P_GB_P4Q1 */ {0x3720, 0x6DCF}, /* P_GB_P4Q2 */ {0x3722, 0x1B55}, /* P_GB_P4Q3 */ {0x3724, 0xA112}, /* P_GB_P4Q4 */ {0x3726, 0x82F6}, /* POLY_ORIGIN_C */ {0x3782, 0x0510}, /* POLY_ORIGIN_R */ {0x3784, 0x0390}, /* POLY_SC_ENABLE */ {0x3780, 0x8000}, }; /* rolloff table for illuminant A */ struct mt9p012_i2c_reg_conf mt9p012_rolloff_tbl[] = { /* P_RD_P0Q0 */ {0x360A, 0x7FEF}, /* P_RD_P0Q1 */ {0x360C, 0x232C}, /* P_RD_P0Q2 */ {0x360E, 0x7050}, /* P_RD_P0Q3 */ {0x3610, 0xF3CC}, /* P_RD_P0Q4 */ {0x3612, 0x89D1}, /* P_RD_P1Q0 */ {0x364A, 0xBE0D}, /* P_RD_P1Q1 */ {0x364C, 0x9ACB}, /* P_RD_P1Q2 */ {0x364E, 0x2150}, /* P_RD_P1Q3 */ {0x3650, 0xB26B}, /* P_RD_P1Q4 */ {0x3652, 0x9511}, /* P_RD_P2Q0 */ {0x368A, 0x2151}, /* P_RD_P2Q1 */ {0x368C, 0x00AD}, /* P_RD_P2Q2 */ {0x368E, 0x8334}, /* P_RD_P2Q3 */ {0x3690, 0x478E}, /* P_RD_P2Q4 */ {0x3692, 0x0515}, /* P_RD_P3Q0 */ {0x36CA, 0x0710}, /* P_RD_P3Q1 */ {0x36CC, 0x452D}, /* P_RD_P3Q2 */ {0x36CE, 0xF352}, /* P_RD_P3Q3 */ {0x36D0, 0x190F}, /* P_RD_P3Q4 */ {0x36D2, 0x4413}, /* P_RD_P4Q0 */ {0x370A, 0xD112}, /* P_RD_P4Q1 */ {0x370C, 0xF50F}, /* P_RD_P4Q2 */ {0x370E, 0x6375}, /* P_RD_P4Q3 */ {0x3710, 0xDC11}, /* P_RD_P4Q4 */ {0x3712, 0xD776}, /* P_GR_P0Q0 */ {0x3600, 0x1750}, /* P_GR_P0Q1 */ {0x3602, 0xF0AC}, /* P_GR_P0Q2 */ {0x3604, 0x4711}, /* P_GR_P0Q3 */ {0x3606, 0x07CE}, /* P_GR_P0Q4 */ {0x3608, 0x96B2}, /* P_GR_P1Q0 */ {0x3640, 0xA9AE}, /* P_GR_P1Q1 */ {0x3642, 0xF9AC}, /* P_GR_P1Q2 */ {0x3644, 0x39F1}, /* P_GR_P1Q3 */ {0x3646, 0x016F}, /* P_GR_P1Q4 */ {0x3648, 0x8AB2}, /* P_GR_P2Q0 */ {0x3680, 0x1752}, /* P_GR_P2Q1 */ {0x3682, 0x70F0}, /* P_GR_P2Q2 */ {0x3684, 0x83F5}, /* P_GR_P2Q3 */ {0x3686, 0x8392}, /* P_GR_P2Q4 */ {0x3688, 0x1FD6}, /* P_GR_P3Q0 */ {0x36C0, 0x1131}, /* P_GR_P3Q1 */ {0x36C2, 0x3DAF}, /* P_GR_P3Q2 */ {0x36C4, 0x89B4}, /* P_GR_P3Q3 */ {0x36C6, 0xA391}, /* P_GR_P3Q4 */ {0x36C8, 0x1334}, /* P_GR_P4Q0 */ {0x3700, 0xDC13}, /* P_GR_P4Q1 */ {0x3702, 0xD052}, /* P_GR_P4Q2 */ {0x3704, 0x5156}, /* P_GR_P4Q3 */ {0x3706, 0x1F13}, /* P_GR_P4Q4 */ {0x3708, 0x8C38}, /* P_BL_P0Q0 */ {0x3614, 0x0050}, /* P_BL_P0Q1 */ {0x3616, 0xBD4C}, /* P_BL_P0Q2 */ {0x3618, 0x41B0}, /* P_BL_P0Q3 */ {0x361A, 0x660D}, /* P_BL_P0Q4 */ {0x361C, 0xC590}, /* P_BL_P1Q0 */ {0x3654, 0x87EC}, /* P_BL_P1Q1 */ {0x3656, 0xE44C}, /* P_BL_P1Q2 */ {0x3658, 0x302E}, /* P_BL_P1Q3 */ {0x365A, 0x106E}, /* P_BL_P1Q4 */ {0x365C, 0xB58E}, /* P_BL_P2Q0 */ {0x3694, 0x0DD1}, /* P_BL_P2Q1 */ {0x3696, 0x2A50}, /* P_BL_P2Q2 */ {0x3698, 0xC793}, /* P_BL_P2Q3 */ {0x369A, 0xE8F1}, /* P_BL_P2Q4 */ {0x369C, 0x4174}, /* P_BL_P3Q0 */ {0x36D4, 0x01EF}, /* P_BL_P3Q1 */ {0x36D6, 0x06CF}, /* P_BL_P3Q2 */ {0x36D8, 0x8D91}, /* P_BL_P3Q3 */ {0x36DA, 0x91F0}, /* P_BL_P3Q4 */ {0x36DC, 0x52EF}, /* P_BL_P4Q0 */ {0x3714, 0xA6D2}, /* P_BL_P4Q1 */ {0x3716, 0xA312}, /* P_BL_P4Q2 */ {0x3718, 0x2695}, /* P_BL_P4Q3 */ {0x371A, 0x3953}, /* P_BL_P4Q4 */ {0x371C, 0x9356}, /* P_GB_P0Q0 */ {0x361E, 0x7EAF}, /* P_GB_P0Q1 */ {0x3620, 0x2A4C}, /* P_GB_P0Q2 */ {0x3622, 0x49F0}, {0x3624, 0xF1EC}, /* P_GB_P0Q4 */ {0x3626, 0xC670}, /* P_GB_P1Q0 */ {0x365E, 0x8E0C}, /* P_GB_P1Q1 */ {0x3660, 0xC2A9}, /* P_GB_P1Q2 */ {0x3662, 0x274F}, /* P_GB_P1Q3 */ {0x3664, 0xADAB}, /* P_GB_P1Q4 */ {0x3666, 0x8EF0}, /* P_GB_P2Q0 */ {0x369E, 0x09B1}, /* P_GB_P2Q1 */ {0x36A0, 0xAA2E}, /* P_GB_P2Q2 */ {0x36A2, 0xC3D3}, /* P_GB_P2Q3 */ {0x36A4, 0x7FAF}, /* P_GB_P2Q4 */ {0x36A6, 0x3F34}, /* P_GB_P3Q0 */ {0x36DE, 0x4C8F}, /* P_GB_P3Q1 */ {0x36E0, 0x886E}, /* P_GB_P3Q2 */ {0x36E2, 0xE831}, /* P_GB_P3Q3 */ {0x36E4, 0x1FD0}, /* P_GB_P3Q4 */ {0x36E6, 0x1192}, /* P_GB_P4Q0 */ {0x371E, 0xB952}, /* P_GB_P4Q1 */ {0x3720, 0x6DCF}, /* P_GB_P4Q2 */ {0x3722, 0x1B55}, /* P_GB_P4Q3 */ {0x3724, 0xA112}, /* P_GB_P4Q4 */ {0x3726, 0x82F6}, /* POLY_ORIGIN_C */ {0x3782, 0x0510}, /* POLY_ORIGIN_R */ {0x3784, 0x0390}, /* POLY_SC_ENABLE */ {0x3780, 0x8000}, }; struct mt9p012_reg_t mt9p012_regs = { .reg_pat = &mt9p012_reg_pat[0], .reg_pat_size = ARRAY_SIZE(mt9p012_reg_pat), .ttbl = &mt9p012_test_tbl[0], .ttbl_size = ARRAY_SIZE(mt9p012_test_tbl), .lctbl = &mt9p012_lc_tbl[0], .lctbl_size = ARRAY_SIZE(mt9p012_lc_tbl), .rftbl = &mt9p012_rolloff_tbl[0], .rftbl_size = ARRAY_SIZE(mt9p012_rolloff_tbl) };
gpl-2.0
nimon/m8_kernel
net/core/sockev_nlmcast.c
772
3524
/* * Copyright (c) 2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * Default SOCKEV client implementation * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/netlink.h> #include <linux/sockev.h> #include <net/sock.h> static int registration_status; static struct sock *socknlmsgsk; static void sockev_skmsg_recv(struct sk_buff *skb) { pr_debug("%s(): Got unsolicited request\n", __func__); } static void _sockev_event(unsigned long event, __u8 *evstr, int buflen) { switch (event) { case SOCKEV_SOCKET: strlcpy(evstr, "SOCKEV_SOCKET", buflen); break; case SOCKEV_BIND: strlcpy(evstr, "SOCKEV_BIND", buflen); break; case SOCKEV_LISTEN: strlcpy(evstr, "SOCKEV_LISTEN", buflen); break; case SOCKEV_ACCEPT: strlcpy(evstr, "SOCKEV_ACCEPT", buflen); break; case SOCKEV_CONNECT: strlcpy(evstr, "SOCKEV_CONNECT", buflen); break; case SOCKEV_SHUTDOWN: strlcpy(evstr, "SOCKEV_SHUTDOWN", buflen); break; default: strlcpy(evstr, "UNKOWN", buflen); } } static int sockev_client_cb(struct notifier_block *nb, unsigned long event, void *data) { struct sk_buff *skb; struct nlmsghdr *nlh; struct sknlsockevmsg *smsg; struct socket *sock; sock = (struct socket *)data; if (socknlmsgsk == 0) goto done; if ((socknlmsgsk == NULL) || (sock == NULL) || (sock->sk == NULL)) goto done; skb = nlmsg_new(sizeof(struct sknlsockevmsg), GFP_KERNEL); if (skb == NULL) goto done; nlh = nlmsg_put(skb, 0, 0, event, sizeof(struct sknlsockevmsg), 0); if (nlh == NULL) { kfree_skb(skb); goto done; } NETLINK_CB(skb).dst_group = SKNLGRP_SOCKEV; smsg = nlmsg_data(nlh); smsg->pid = current->pid; _sockev_event(event, smsg->event, sizeof(smsg->event)); smsg->skfamily = sock->sk->sk_family; smsg->skstate = sock->sk->sk_state; smsg->skprotocol = sock->sk->sk_protocol; smsg->sktype = sock->sk->sk_type; smsg->skflags = sock->sk->sk_flags; nlmsg_notify(socknlmsgsk, skb, 0, SKNLGRP_SOCKEV, 0, GFP_KERNEL); done: return 0; } static struct notifier_block sockev_notifier_client = { .notifier_call = sockev_client_cb, .next = 0, .priority = 0 }; /* ***************** Startup/Shutdown *************************************** */ static int __init sockev_client_init(void) { int rc; registration_status = 1; rc = sockev_register_notify(&sockev_notifier_client); if (rc != 0) { registration_status = 0; pr_err("%s(): Failed to register cb (%d)\n", __func__, rc); } socknlmsgsk = netlink_kernel_create(&init_net, NETLINK_SOCKEV, 0, sockev_skmsg_recv, NULL, THIS_MODULE); if (!socknlmsgsk) { pr_err("%s(): Failed to initialize netlink socket\n", __func__); if (registration_status) sockev_unregister_notify(&sockev_notifier_client); registration_status = 0; } return rc; } static void __exit sockev_client_exit(void) { if (registration_status) sockev_unregister_notify(&sockev_notifier_client); } module_init(sockev_client_init) module_exit(sockev_client_exit) MODULE_LICENSE("GPL v2");
gpl-2.0
CMRemix/trltetmo-dok2
fs/f2fs/namei.c
1540
12004
/* * fs/f2fs/namei.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <linux/ctype.h> #include "f2fs.h" #include "node.h" #include "xattr.h" #include "acl.h" #include <trace/events/f2fs.h> static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) { struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); nid_t ino; struct inode *inode; bool nid_free = false; int err, ilock; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); ilock = mutex_lock_op(sbi); if (!alloc_nid(sbi, &ino)) { mutex_unlock_op(sbi, ilock); err = -ENOSPC; goto fail; } mutex_unlock_op(sbi, ilock); inode->i_uid = current_fsuid(); if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) mode |= S_ISGID; } else { inode->i_gid = current_fsgid(); } inode->i_ino = ino; inode->i_mode = mode; inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_generation = sbi->s_next_generation++; err = insert_inode_locked(inode); if (err) { err = -EINVAL; nid_free = true; goto out; } trace_f2fs_new_inode(inode, 0); mark_inode_dirty(inode); return inode; out: clear_nlink(inode); unlock_new_inode(inode); fail: trace_f2fs_new_inode(inode, err); make_bad_inode(inode); iput(inode); if (nid_free) alloc_nid_failed(sbi, ino); return ERR_PTR(err); } static int is_multimedia_file(const unsigned char *s, const char *sub) { size_t slen = strlen(s); size_t sublen = strlen(sub); int ret; if (sublen > slen) return 0; ret = memcmp(s + slen - sublen, sub, sublen); if (ret) { /* compare upper case */ int i; char upper_sub[8]; for (i = 0; i < sublen && i < sizeof(upper_sub); i++) upper_sub[i] = toupper(sub[i]); return !memcmp(s + slen - sublen, upper_sub, sublen); } return !ret; } /* * Set multimedia files as cold files for hot/cold data separation */ static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode, const unsigned char *name) { int i; __u8 (*extlist)[8] = sbi->raw_super->extension_list; int count = le32_to_cpu(sbi->raw_super->extension_count); for (i = 0; i < count; i++) { if (is_multimedia_file(name, extlist[i])) { set_cold_file(inode); break; } } } static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; nid_t ino = 0; int err, ilock; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); if (!test_opt(sbi, DISABLE_EXT_IDENTIFY)) set_cold_files(sbi, inode, dentry->d_name.name); inode->i_op = &f2fs_file_inode_operations; inode->i_fop = &f2fs_file_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; ino = inode->i_ino; ilock = mutex_lock_op(sbi); err = f2fs_add_link(dentry, inode); mutex_unlock_op(sbi, ilock); if (err) goto out; alloc_nid_done(sbi, ino); if (!sbi->por_doing) d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; out: clear_nlink(inode); unlock_new_inode(inode); make_bad_inode(inode); iput(inode); alloc_nid_failed(sbi, ino); return err; } static int f2fs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); int err, ilock; f2fs_balance_fs(sbi); inode->i_ctime = CURRENT_TIME; atomic_inc(&inode->i_count); set_inode_flag(F2FS_I(inode), FI_INC_LINK); ilock = mutex_lock_op(sbi); err = f2fs_add_link(dentry, inode); mutex_unlock_op(sbi, ilock); if (err) goto out; /* * This file should be checkpointed during fsync. * We lost i_pino from now on. */ set_cp_file(inode); d_instantiate(dentry, inode); return 0; out: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); make_bad_inode(inode); iput(inode); return err; } struct dentry *f2fs_get_parent(struct dentry *child) { struct qstr dotdot = QSTR_INIT("..", 2); unsigned long ino = f2fs_inode_by_name(child->d_inode, &dotdot); if (!ino) return ERR_PTR(-ENOENT); return d_obtain_alias(f2fs_iget(child->d_inode->i_sb, ino)); } static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode = NULL; struct f2fs_dir_entry *de; struct page *page; if (dentry->d_name.len > F2FS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (de) { nid_t ino = le32_to_cpu(de->ino); kunmap(page); f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); } return d_splice_alias(inode, dentry); } static int f2fs_unlink(struct inode *dir, struct dentry *dentry) { struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode = dentry->d_inode; struct f2fs_dir_entry *de; struct page *page; int err = -ENOENT; int ilock; trace_f2fs_unlink_enter(dir, dentry); f2fs_balance_fs(sbi); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) goto fail; err = check_orphan_space(sbi); if (err) { kunmap(page); f2fs_put_page(page, 0); goto fail; } ilock = mutex_lock_op(sbi); f2fs_delete_entry(de, page, inode); mutex_unlock_op(sbi, ilock); /* In order to evict this inode, we set it dirty */ mark_inode_dirty(inode); fail: trace_f2fs_unlink_exit(inode, err); return err; } static int f2fs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; size_t symlen = strlen(symname) + 1; int err, ilock; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_symlink_inode_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; ilock = mutex_lock_op(sbi); err = f2fs_add_link(dentry, inode); mutex_unlock_op(sbi, ilock); if (err) goto out; err = page_symlink(inode, symname, symlen); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return err; out: clear_nlink(inode); unlock_new_inode(inode); make_bad_inode(inode); iput(inode); alloc_nid_failed(sbi, inode->i_ino); return err; } static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); struct inode *inode; int err, ilock; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); set_inode_flag(F2FS_I(inode), FI_INC_LINK); ilock = mutex_lock_op(sbi); err = f2fs_add_link(dentry, inode); mutex_unlock_op(sbi, ilock); if (err) goto out_fail; alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; out_fail: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); clear_nlink(inode); unlock_new_inode(inode); make_bad_inode(inode); iput(inode); alloc_nid_failed(sbi, inode->i_ino); return err; } static int f2fs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; if (f2fs_empty_dir(inode)) return f2fs_unlink(dir, dentry); return -ENOTEMPTY; } static int f2fs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; int err = 0; int ilock; if (!new_valid_dev(rdev)) return -EINVAL; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &f2fs_special_inode_operations; ilock = mutex_lock_op(sbi); err = f2fs_add_link(dentry, inode); mutex_unlock_op(sbi, ilock); if (err) goto out; alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; out: clear_nlink(inode); unlock_new_inode(inode); make_bad_inode(inode); iput(inode); alloc_nid_failed(sbi, inode->i_ino); return err; } static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct super_block *sb = old_dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct page *old_dir_page; struct page *old_page; struct f2fs_dir_entry *old_dir_entry = NULL; struct f2fs_dir_entry *old_entry; struct f2fs_dir_entry *new_entry; int err = -ENOENT, ilock = -1; f2fs_balance_fs(sbi); old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) goto out_old; } ilock = mutex_lock_op(sbi); if (new_inode) { struct page *new_page; err = -ENOTEMPTY; if (old_dir_entry && !f2fs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_entry) goto out_dir; f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; if (old_dir_entry) drop_nlink(new_inode); drop_nlink(new_inode); if (!new_inode->i_nlink) add_orphan_inode(sbi, new_inode->i_ino); update_inode_page(new_inode); } else { err = f2fs_add_link(new_dentry, old_inode); if (err) goto out_dir; if (old_dir_entry) { inc_nlink(new_dir); update_inode_page(new_dir); } } old_inode->i_ctime = CURRENT_TIME; mark_inode_dirty(old_inode); f2fs_delete_entry(old_entry, old_page, NULL); if (old_dir_entry) { if (old_dir != new_dir) { f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); } else { kunmap(old_dir_page); f2fs_put_page(old_dir_page, 0); } drop_nlink(old_dir); update_inode_page(old_dir); } mutex_unlock_op(sbi, ilock); return 0; out_dir: if (old_dir_entry) { kunmap(old_dir_page); f2fs_put_page(old_dir_page, 0); } mutex_unlock_op(sbi, ilock); out_old: kunmap(old_page); f2fs_put_page(old_page, 0); out: return err; } const struct inode_operations f2fs_dir_inode_operations = { .create = f2fs_create, .lookup = f2fs_lookup, .link = f2fs_link, .unlink = f2fs_unlink, .symlink = f2fs_symlink, .mkdir = f2fs_mkdir, .rmdir = f2fs_rmdir, .mknod = f2fs_mknod, .rename = f2fs_rename, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .setattr = f2fs_setattr, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_special_inode_operations = { .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif };
gpl-2.0
knone1/pinaslang-zenfone5-kernel
fs/ufs/inode.c
2308
25864
/* * linux/fs/ufs/inode.c * * Copyright (C) 1998 * Daniel Pirkl <daniel.pirkl@email.cz> * Charles University, Faculty of Mathematics and Physics * * from * * linux/fs/ext2/inode.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993 * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <asm/uaccess.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include "ufs_fs.h" #include "ufs.h" #include "swab.h" #include "util.h" static u64 ufs_frag_map(struct inode *inode, sector_t frag, bool needs_lock); static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4]) { struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; int ptrs = uspi->s_apb; int ptrs_bits = uspi->s_apbshift; const long direct_blocks = UFS_NDADDR, indirect_blocks = ptrs, double_blocks = (1 << (ptrs_bits * 2)); int n = 0; UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks); if (i_block < direct_blocks) { offsets[n++] = i_block; } else if ((i_block -= direct_blocks) < indirect_blocks) { offsets[n++] = UFS_IND_BLOCK; offsets[n++] = i_block; } else if ((i_block -= indirect_blocks) < double_blocks) { offsets[n++] = UFS_DIND_BLOCK; offsets[n++] = i_block >> ptrs_bits; offsets[n++] = i_block & (ptrs - 1); } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { offsets[n++] = UFS_TIND_BLOCK; offsets[n++] = i_block >> (ptrs_bits * 2); offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); offsets[n++] = i_block & (ptrs - 1); } else { ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big"); } return n; } /* * Returns the location of the fragment from * the beginning of the filesystem. */ static u64 ufs_frag_map(struct inode *inode, sector_t frag, bool needs_lock) { struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift; int shift = uspi->s_apbshift-uspi->s_fpbshift; sector_t offsets[4], *p; int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets); u64 ret = 0L; __fs32 block; __fs64 u2_block = 0L; unsigned flags = UFS_SB(sb)->s_flags; u64 temp = 0L; UFSD(": frag = %llu depth = %d\n", (unsigned long long)frag, depth); UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n", uspi->s_fpbshift, uspi->s_apbmask, (unsigned long long)mask); if (depth == 0) return 0; p = offsets; if (needs_lock) lock_ufs(sb); if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) goto ufs2; block = ufsi->i_u1.i_data[*p++]; if (!block) goto out; while (--depth) { struct buffer_head *bh; sector_t n = *p++; bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift)); if (!bh) goto out; block = ((__fs32 *) bh->b_data)[n & mask]; brelse (bh); if (!block) goto out; } ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask)); goto out; ufs2: u2_block = ufsi->i_u1.u2_i_data[*p++]; if (!u2_block) goto out; while (--depth) { struct buffer_head *bh; sector_t n = *p++; temp = (u64)(uspi->s_sbbase) + fs64_to_cpu(sb, u2_block); bh = sb_bread(sb, temp +(u64) (n>>shift)); if (!bh) goto out; u2_block = ((__fs64 *)bh->b_data)[n & mask]; brelse(bh); if (!u2_block) goto out; } temp = (u64)uspi->s_sbbase + fs64_to_cpu(sb, u2_block); ret = temp + (u64) (frag & uspi->s_fpbmask); out: if (needs_lock) unlock_ufs(sb); return ret; } /** * ufs_inode_getfrag() - allocate new fragment(s) * @inode - pointer to inode * @fragment - number of `fragment' which hold pointer * to new allocated fragment(s) * @new_fragment - number of new allocated fragment(s) * @required - how many fragment(s) we require * @err - we set it if something wrong * @phys - pointer to where we save physical number of new allocated fragments, * NULL if we allocate not data(indirect blocks for example). * @new - we set it if we allocate new block * @locked_page - for ufs_new_fragments() */ static struct buffer_head * ufs_inode_getfrag(struct inode *inode, u64 fragment, sector_t new_fragment, unsigned int required, int *err, long *phys, int *new, struct page *locked_page) { struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct buffer_head * result; unsigned blockoff, lastblockoff; u64 tmp, goal, lastfrag, block, lastblock; void *p, *p2; UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, " "metadata %d\n", inode->i_ino, (unsigned long long)fragment, (unsigned long long)new_fragment, required, !phys); /* TODO : to be done for write support if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) goto ufs2; */ block = ufs_fragstoblks (fragment); blockoff = ufs_fragnum (fragment); p = ufs_get_direct_data_ptr(uspi, ufsi, block); goal = 0; repeat: tmp = ufs_data_ptr_to_cpu(sb, p); lastfrag = ufsi->i_lastfrag; if (tmp && fragment < lastfrag) { if (!phys) { result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); if (tmp == ufs_data_ptr_to_cpu(sb, p)) { UFSD("EXIT, result %llu\n", (unsigned long long)tmp + blockoff); return result; } brelse (result); goto repeat; } else { *phys = uspi->s_sbbase + tmp + blockoff; return NULL; } } lastblock = ufs_fragstoblks (lastfrag); lastblockoff = ufs_fragnum (lastfrag); /* * We will extend file into new block beyond last allocated block */ if (lastblock < block) { /* * We must reallocate last allocated block */ if (lastblockoff) { p2 = ufs_get_direct_data_ptr(uspi, ufsi, lastblock); tmp = ufs_new_fragments(inode, p2, lastfrag, ufs_data_ptr_to_cpu(sb, p2), uspi->s_fpb - lastblockoff, err, locked_page); if (!tmp) { if (lastfrag != ufsi->i_lastfrag) goto repeat; else return NULL; } lastfrag = ufsi->i_lastfrag; } tmp = ufs_data_ptr_to_cpu(sb, ufs_get_direct_data_ptr(uspi, ufsi, lastblock)); if (tmp) goal = tmp + uspi->s_fpb; tmp = ufs_new_fragments (inode, p, fragment - blockoff, goal, required + blockoff, err, phys != NULL ? locked_page : NULL); } else if (lastblock == block) { /* * We will extend last allocated block */ tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), ufs_data_ptr_to_cpu(sb, p), required + (blockoff - lastblockoff), err, phys != NULL ? locked_page : NULL); } else /* (lastblock > block) */ { /* * We will allocate new block before last allocated block */ if (block) { tmp = ufs_data_ptr_to_cpu(sb, ufs_get_direct_data_ptr(uspi, ufsi, block - 1)); if (tmp) goal = tmp + uspi->s_fpb; } tmp = ufs_new_fragments(inode, p, fragment - blockoff, goal, uspi->s_fpb, err, phys != NULL ? locked_page : NULL); } if (!tmp) { if ((!blockoff && ufs_data_ptr_to_cpu(sb, p)) || (blockoff && lastfrag != ufsi->i_lastfrag)) goto repeat; *err = -ENOSPC; return NULL; } if (!phys) { result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); } else { *phys = uspi->s_sbbase + tmp + blockoff; result = NULL; *err = 0; *new = 1; } inode->i_ctime = CURRENT_TIME_SEC; if (IS_SYNC(inode)) ufs_sync_inode (inode); mark_inode_dirty(inode); UFSD("EXIT, result %llu\n", (unsigned long long)tmp + blockoff); return result; /* This part : To be implemented .... Required only for writing, not required for READ-ONLY. ufs2: u2_block = ufs_fragstoblks(fragment); u2_blockoff = ufs_fragnum(fragment); p = ufsi->i_u1.u2_i_data + block; goal = 0; repeat2: tmp = fs32_to_cpu(sb, *p); lastfrag = ufsi->i_lastfrag; */ } /** * ufs_inode_getblock() - allocate new block * @inode - pointer to inode * @bh - pointer to block which hold "pointer" to new allocated block * @fragment - number of `fragment' which hold pointer * to new allocated block * @new_fragment - number of new allocated fragment * (block will hold this fragment and also uspi->s_fpb-1) * @err - see ufs_inode_getfrag() * @phys - see ufs_inode_getfrag() * @new - see ufs_inode_getfrag() * @locked_page - see ufs_inode_getfrag() */ static struct buffer_head * ufs_inode_getblock(struct inode *inode, struct buffer_head *bh, u64 fragment, sector_t new_fragment, int *err, long *phys, int *new, struct page *locked_page) { struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct buffer_head * result; unsigned blockoff; u64 tmp, goal, block; void *p; block = ufs_fragstoblks (fragment); blockoff = ufs_fragnum (fragment); UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n", inode->i_ino, (unsigned long long)fragment, (unsigned long long)new_fragment, !phys); result = NULL; if (!bh) goto out; if (!buffer_uptodate(bh)) { ll_rw_block (READ, 1, &bh); wait_on_buffer (bh); if (!buffer_uptodate(bh)) goto out; } if (uspi->fs_magic == UFS2_MAGIC) p = (__fs64 *)bh->b_data + block; else p = (__fs32 *)bh->b_data + block; repeat: tmp = ufs_data_ptr_to_cpu(sb, p); if (tmp) { if (!phys) { result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); if (tmp == ufs_data_ptr_to_cpu(sb, p)) goto out; brelse (result); goto repeat; } else { *phys = uspi->s_sbbase + tmp + blockoff; goto out; } } if (block && (uspi->fs_magic == UFS2_MAGIC ? (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[block-1])) : (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[block-1])))) goal = tmp + uspi->s_fpb; else goal = bh->b_blocknr + uspi->s_fpb; tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal, uspi->s_fpb, err, locked_page); if (!tmp) { if (ufs_data_ptr_to_cpu(sb, p)) goto repeat; goto out; } if (!phys) { result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); } else { *phys = uspi->s_sbbase + tmp + blockoff; *new = 1; } mark_buffer_dirty(bh); if (IS_SYNC(inode)) sync_dirty_buffer(bh); inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); UFSD("result %llu\n", (unsigned long long)tmp + blockoff); out: brelse (bh); UFSD("EXIT\n"); return result; } /** * ufs_getfrag_block() - `get_block_t' function, interface between UFS and * readpage, writepage and so on */ int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) { struct super_block * sb = inode->i_sb; struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi = sbi->s_uspi; struct buffer_head * bh; int ret, err, new; unsigned long ptr,phys; u64 phys64 = 0; bool needs_lock = (sbi->mutex_owner != current); if (!create) { phys64 = ufs_frag_map(inode, fragment, needs_lock); UFSD("phys64 = %llu\n", (unsigned long long)phys64); if (phys64) map_bh(bh_result, sb, phys64); return 0; } /* This code entered only while writing ....? */ err = -EIO; new = 0; ret = 0; bh = NULL; if (needs_lock) lock_ufs(sb); UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment); if (fragment > ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb) << uspi->s_fpbshift)) goto abort_too_big; err = 0; ptr = fragment; /* * ok, these macros clean the logic up a bit and make * it much more readable: */ #define GET_INODE_DATABLOCK(x) \ ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\ bh_result->b_page) #define GET_INODE_PTR(x) \ ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\ bh_result->b_page) #define GET_INDIRECT_DATABLOCK(x) \ ufs_inode_getblock(inode, bh, x, fragment, \ &err, &phys, &new, bh_result->b_page) #define GET_INDIRECT_PTR(x) \ ufs_inode_getblock(inode, bh, x, fragment, \ &err, NULL, NULL, NULL) if (ptr < UFS_NDIR_FRAGMENT) { bh = GET_INODE_DATABLOCK(ptr); goto out; } ptr -= UFS_NDIR_FRAGMENT; if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) { bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift)); goto get_indirect; } ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift); if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) { bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift)); goto get_double; } ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift); bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift)); bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask); get_double: bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask); get_indirect: bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask); #undef GET_INODE_DATABLOCK #undef GET_INODE_PTR #undef GET_INDIRECT_DATABLOCK #undef GET_INDIRECT_PTR out: if (err) goto abort; if (new) set_buffer_new(bh_result); map_bh(bh_result, sb, phys); abort: if (needs_lock) unlock_ufs(sb); return err; abort_too_big: ufs_warning(sb, "ufs_get_block", "block > big"); goto abort; } static int ufs_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page,ufs_getfrag_block,wbc); } static int ufs_readpage(struct file *file, struct page *page) { return block_read_full_page(page,ufs_getfrag_block); } int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len) { return __block_write_begin(page, pos, len, ufs_getfrag_block); } static void ufs_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) truncate_pagecache(inode, to, inode->i_size); } static int ufs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, flags, pagep, ufs_getfrag_block); if (unlikely(ret)) ufs_write_failed(mapping, pos + len); return ret; } static sector_t ufs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,ufs_getfrag_block); } const struct address_space_operations ufs_aops = { .readpage = ufs_readpage, .writepage = ufs_writepage, .write_begin = ufs_write_begin, .write_end = generic_write_end, .bmap = ufs_bmap }; static void ufs_set_inode_ops(struct inode *inode) { if (S_ISREG(inode->i_mode)) { inode->i_op = &ufs_file_inode_operations; inode->i_fop = &ufs_file_operations; inode->i_mapping->a_ops = &ufs_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &ufs_dir_inode_operations; inode->i_fop = &ufs_dir_operations; inode->i_mapping->a_ops = &ufs_aops; } else if (S_ISLNK(inode->i_mode)) { if (!inode->i_blocks) inode->i_op = &ufs_fast_symlink_inode_operations; else { inode->i_op = &ufs_symlink_inode_operations; inode->i_mapping->a_ops = &ufs_aops; } } else init_special_inode(inode, inode->i_mode, ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); } static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) { struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; umode_t mode; /* * Copy data to the in-core inode. */ inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); if (inode->i_nlink == 0) { ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); return -1; } /* * Linux now has 32-bit uid and gid, so we can support EFT. */ i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode)); i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); inode->i_mtime.tv_nsec = 0; inode->i_atime.tv_nsec = 0; inode->i_ctime.tv_nsec = 0; inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen); ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr, sizeof(ufs_inode->ui_u2.ui_addr)); } else { memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink, sizeof(ufs_inode->ui_u2.ui_symlink) - 1); ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0; } return 0; } static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) { struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; umode_t mode; UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); /* * Copy data to the in-core inode. */ inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); if (inode->i_nlink == 0) { ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); return -1; } /* * Linux now has 32-bit uid and gid, so we can support EFT. */ i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid)); i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid)); inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime); inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime); inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime); inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec); inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec); inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec); inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen); ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); /* ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); */ if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr, sizeof(ufs2_inode->ui_u2.ui_addr)); } else { memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink, sizeof(ufs2_inode->ui_u2.ui_symlink) - 1); ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0; } return 0; } struct inode *ufs_iget(struct super_block *sb, unsigned long ino) { struct ufs_inode_info *ufsi; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct buffer_head * bh; struct inode *inode; int err; UFSD("ENTER, ino %lu\n", ino); if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) { ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n", ino); return ERR_PTR(-EIO); } inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; ufsi = UFS_I(inode); bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); if (!bh) { ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); goto bad_inode; } if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; err = ufs2_read_inode(inode, ufs2_inode + ufs_inotofsbo(inode->i_ino)); } else { struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data; err = ufs1_read_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); } if (err) goto bad_inode; inode->i_version++; ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; ufsi->i_dir_start_lookup = 0; ufsi->i_osync = 0; ufs_set_inode_ops(inode); brelse(bh); UFSD("EXIT\n"); unlock_new_inode(inode); return inode; bad_inode: iget_failed(inode); return ERR_PTR(-EIO); } static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) { struct super_block *sb = inode->i_sb; struct ufs_inode_info *ufsi = UFS_I(inode); ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode)); ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode)); ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec); ufs_inode->ui_atime.tv_usec = 0; ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec); ufs_inode->ui_ctime.tv_usec = 0; ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec); ufs_inode->ui_mtime.tv_usec = 0; ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks); ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) { ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow); ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag); } if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; } else if (inode->i_blocks) { memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data, sizeof(ufs_inode->ui_u2.ui_addr)); } else { memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, sizeof(ufs_inode->ui_u2.ui_symlink)); } if (!inode->i_nlink) memset (ufs_inode, 0, sizeof(struct ufs_inode)); } static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode) { struct super_block *sb = inode->i_sb; struct ufs_inode_info *ufsi = UFS_I(inode); UFSD("ENTER\n"); ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode)); ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode)); ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec); ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec); ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec); ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec); ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec); ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec); ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks); ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0]; } else if (inode->i_blocks) { memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data, sizeof(ufs_inode->ui_u2.ui_addr)); } else { memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, sizeof(ufs_inode->ui_u2.ui_symlink)); } if (!inode->i_nlink) memset (ufs_inode, 0, sizeof(struct ufs2_inode)); UFSD("EXIT\n"); } static int ufs_update_inode(struct inode * inode, int do_sync) { struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct buffer_head * bh; UFSD("ENTER, ino %lu\n", inode->i_ino); if (inode->i_ino < UFS_ROOTINO || inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); return -1; } bh = sb_bread(sb, ufs_inotofsba(inode->i_ino)); if (!bh) { ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); return -1; } if (uspi->fs_magic == UFS2_MAGIC) { struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; ufs2_update_inode(inode, ufs2_inode + ufs_inotofsbo(inode->i_ino)); } else { struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data; ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); } mark_buffer_dirty(bh); if (do_sync) sync_dirty_buffer(bh); brelse (bh); UFSD("EXIT\n"); return 0; } int ufs_write_inode(struct inode *inode, struct writeback_control *wbc) { int ret; lock_ufs(inode->i_sb); ret = ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); unlock_ufs(inode->i_sb); return ret; } int ufs_sync_inode (struct inode *inode) { return ufs_update_inode (inode, 1); } void ufs_evict_inode(struct inode * inode) { int want_delete = 0; if (!inode->i_nlink && !is_bad_inode(inode)) want_delete = 1; truncate_inode_pages(&inode->i_data, 0); if (want_delete) { loff_t old_i_size; /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/ lock_ufs(inode->i_sb); mark_inode_dirty(inode); ufs_update_inode(inode, IS_SYNC(inode)); old_i_size = inode->i_size; inode->i_size = 0; if (inode->i_blocks && ufs_truncate(inode, old_i_size)) ufs_warning(inode->i_sb, __func__, "ufs_truncate failed\n"); unlock_ufs(inode->i_sb); } invalidate_inode_buffers(inode); clear_inode(inode); if (want_delete) { lock_ufs(inode->i_sb); ufs_free_inode (inode); unlock_ufs(inode->i_sb); } }
gpl-2.0
kgp700/neok-op3d-cuberoid-ics
drivers/staging/iio/dds/ad9951.c
2308
4833
/* * Driver for ADI Direct Digital Synthesis ad9951 * * Copyright (c) 2010 Analog Devices Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/types.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include "../iio.h" #include "../sysfs.h" #define DRV_NAME "ad9951" #define CFR1 0x0 #define CFR2 0x1 #define AUTO_OSK (1) #define OSKEN (1 << 1) #define LOAD_ARR (1 << 2) #define AUTO_SYNC (1 << 7) #define LSB_FST (1) #define SDIO_IPT (1 << 1) #define CLR_PHA (1 << 2) #define SINE_OPT (1 << 4) #define ACLR_PHA (1 << 5) #define VCO_RANGE (1 << 2) #define CRS_OPT (1 << 1) #define HMANU_SYNC (1 << 2) #define HSPD_SYNC (1 << 3) /* Register format: 1 byte addr + value */ struct ad9951_config { u8 asf[3]; u8 arr[2]; u8 ftw0[5]; u8 ftw1[3]; }; struct ad9951_state { struct mutex lock; struct iio_dev *idev; struct spi_device *sdev; }; static ssize_t ad9951_set_parameter(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct spi_message msg; struct spi_transfer xfer; int ret; struct ad9951_config *config = (struct ad9951_config *)buf; struct iio_dev *idev = dev_get_drvdata(dev); struct ad9951_state *st = idev->dev_data; xfer.len = 3; xfer.tx_buf = &config->asf[0]; mutex_lock(&st->lock); spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; xfer.len = 2; xfer.tx_buf = &config->arr[0]; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; xfer.len = 5; xfer.tx_buf = &config->ftw0[0]; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; xfer.len = 3; xfer.tx_buf = &config->ftw1[0]; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; error_ret: mutex_unlock(&st->lock); return ret ? ret : len; } static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad9951_set_parameter, 0); static void ad9951_init(struct ad9951_state *st) { struct spi_message msg; struct spi_transfer xfer; int ret; u8 cfr[5]; cfr[0] = CFR1; cfr[1] = 0; cfr[2] = LSB_FST | CLR_PHA | SINE_OPT | ACLR_PHA; cfr[3] = AUTO_OSK | OSKEN | LOAD_ARR; cfr[4] = 0; mutex_lock(&st->lock); xfer.len = 5; xfer.tx_buf = &cfr; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; cfr[0] = CFR2; cfr[1] = VCO_RANGE; cfr[2] = HSPD_SYNC; cfr[3] = 0; xfer.len = 4; xfer.tx_buf = &cfr; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; error_ret: mutex_unlock(&st->lock); } static struct attribute *ad9951_attributes[] = { &iio_dev_attr_dds.dev_attr.attr, NULL, }; static const struct attribute_group ad9951_attribute_group = { .name = DRV_NAME, .attrs = ad9951_attributes, }; static const struct iio_info ad9951_info = { .attrs = &ad9951_attribute_group, .driver_module = THIS_MODULE, }; static int __devinit ad9951_probe(struct spi_device *spi) { struct ad9951_state *st; int ret = 0; st = kzalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) { ret = -ENOMEM; goto error_ret; } spi_set_drvdata(spi, st); mutex_init(&st->lock); st->sdev = spi; st->idev = iio_allocate_device(0); if (st->idev == NULL) { ret = -ENOMEM; goto error_free_st; } st->idev->dev.parent = &spi->dev; st->idev->info = &ad9951_info; st->idev->dev_data = (void *)(st); st->idev->modes = INDIO_DIRECT_MODE; ret = iio_device_register(st->idev); if (ret) goto error_free_dev; spi->max_speed_hz = 2000000; spi->mode = SPI_MODE_3; spi->bits_per_word = 8; spi_setup(spi); ad9951_init(st); return 0; error_free_dev: iio_free_device(st->idev); error_free_st: kfree(st); error_ret: return ret; } static int __devexit ad9951_remove(struct spi_device *spi) { struct ad9951_state *st = spi_get_drvdata(spi); iio_device_unregister(st->idev); kfree(st); return 0; } static struct spi_driver ad9951_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = ad9951_probe, .remove = __devexit_p(ad9951_remove), }; static __init int ad9951_spi_init(void) { return spi_register_driver(&ad9951_driver); } module_init(ad9951_spi_init); static __exit void ad9951_spi_exit(void) { spi_unregister_driver(&ad9951_driver); } module_exit(ad9951_spi_exit); MODULE_AUTHOR("Cliff Cai"); MODULE_DESCRIPTION("Analog Devices ad9951 driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
CaptainThrowback/kernel_htc_shooter-ics
net/netfilter/ipset/ip_set_hash_ip.c
2564
11187
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Kernel module implementing an IP set type: the hash:ip type */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/random.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlink.h> #include <net/tcp.h> #include <linux/netfilter.h> #include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_timeout.h> #include <linux/netfilter/ipset/ip_set_hash.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_DESCRIPTION("hash:ip type of IP sets"); MODULE_ALIAS("ip_set_hash:ip"); /* Type specific function prefix */ #define TYPE hash_ip static bool hash_ip_same_set(const struct ip_set *a, const struct ip_set *b); #define hash_ip4_same_set hash_ip_same_set #define hash_ip6_same_set hash_ip_same_set /* The type variant functions: IPv4 */ /* Member elements without timeout */ struct hash_ip4_elem { __be32 ip; }; /* Member elements with timeout support */ struct hash_ip4_telem { __be32 ip; unsigned long timeout; }; static inline bool hash_ip4_data_equal(const struct hash_ip4_elem *ip1, const struct hash_ip4_elem *ip2) { return ip1->ip == ip2->ip; } static inline bool hash_ip4_data_isnull(const struct hash_ip4_elem *elem) { return elem->ip == 0; } static inline void hash_ip4_data_copy(struct hash_ip4_elem *dst, const struct hash_ip4_elem *src) { dst->ip = src->ip; } /* Zero valued IP addresses cannot be stored */ static inline void hash_ip4_data_zero_out(struct hash_ip4_elem *elem) { elem->ip = 0; } static inline bool hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data) { NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); return 0; nla_put_failure: return 1; } static bool hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data) { const struct hash_ip4_telem *tdata = (const struct hash_ip4_telem *)data; NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(ip_set_timeout_get(tdata->timeout))); return 0; nla_put_failure: return 1; } #define IP_SET_HASH_WITH_NETMASK #define PF 4 #define HOST_MASK 32 #include <linux/netfilter/ipset/ip_set_ahash.h> static int hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb, enum ipset_adt adt, u8 pf, u8 dim, u8 flags) { const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; __be32 ip; ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip); ip &= ip_set_netmask(h->netmask); if (ip == 0) return -EINVAL; return adtfn(set, &ip, h->timeout); } static int hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags) { const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; u32 ip, ip_to, hosts, timeout = h->timeout; __be32 nip; int ret = 0; if (unlikely(!tb[IPSET_ATTR_IP] || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); if (ret) return ret; ip &= ip_set_hostmask(h->netmask); if (tb[IPSET_ATTR_TIMEOUT]) { if (!with_timeout(h->timeout)) return -IPSET_ERR_TIMEOUT; timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } if (adt == IPSET_TEST) { nip = htonl(ip); if (nip == 0) return -IPSET_ERR_HASH_ELEM; return adtfn(set, &nip, timeout); } if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip > ip_to) swap(ip, ip_to); } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (cidr > 32) return -IPSET_ERR_INVALID_CIDR; ip &= ip_set_hostmask(cidr); ip_to = ip | ~ip_set_hostmask(cidr); } else ip_to = ip; hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); for (; !before(ip_to, ip); ip += hosts) { nip = htonl(ip); if (nip == 0) return -IPSET_ERR_HASH_ELEM; ret = adtfn(set, &nip, timeout); if (ret && !ip_set_eexist(ret, flags)) return ret; else ret = 0; } return ret; } static bool hash_ip_same_set(const struct ip_set *a, const struct ip_set *b) { const struct ip_set_hash *x = a->data; const struct ip_set_hash *y = b->data; /* Resizing changes htable_bits, so we ignore it */ return x->maxelem == y->maxelem && x->timeout == y->timeout && x->netmask == y->netmask; } /* The type variant functions: IPv6 */ struct hash_ip6_elem { union nf_inet_addr ip; }; struct hash_ip6_telem { union nf_inet_addr ip; unsigned long timeout; }; static inline bool hash_ip6_data_equal(const struct hash_ip6_elem *ip1, const struct hash_ip6_elem *ip2) { return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0; } static inline bool hash_ip6_data_isnull(const struct hash_ip6_elem *elem) { return ipv6_addr_any(&elem->ip.in6); } static inline void hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src) { ipv6_addr_copy(&dst->ip.in6, &src->ip.in6); } static inline void hash_ip6_data_zero_out(struct hash_ip6_elem *elem) { ipv6_addr_set(&elem->ip.in6, 0, 0, 0, 0); } static inline void ip6_netmask(union nf_inet_addr *ip, u8 prefix) { ip->ip6[0] &= ip_set_netmask6(prefix)[0]; ip->ip6[1] &= ip_set_netmask6(prefix)[1]; ip->ip6[2] &= ip_set_netmask6(prefix)[2]; ip->ip6[3] &= ip_set_netmask6(prefix)[3]; } static bool hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data) { NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); return 0; nla_put_failure: return 1; } static bool hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data) { const struct hash_ip6_telem *e = (const struct hash_ip6_telem *)data; NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(ip_set_timeout_get(e->timeout))); return 0; nla_put_failure: return 1; } #undef PF #undef HOST_MASK #define PF 6 #define HOST_MASK 128 #include <linux/netfilter/ipset/ip_set_ahash.h> static int hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb, enum ipset_adt adt, u8 pf, u8 dim, u8 flags) { const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; union nf_inet_addr ip; ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip.in6); ip6_netmask(&ip, h->netmask); if (ipv6_addr_any(&ip.in6)) return -EINVAL; return adtfn(set, &ip, h->timeout); } static const struct nla_policy hash_ip6_adt_policy[IPSET_ATTR_ADT_MAX + 1] = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, }; static int hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags) { const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; union nf_inet_addr ip; u32 timeout = h->timeout; int ret; if (unlikely(!tb[IPSET_ATTR_IP] || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &ip); if (ret) return ret; ip6_netmask(&ip, h->netmask); if (ipv6_addr_any(&ip.in6)) return -IPSET_ERR_HASH_ELEM; if (tb[IPSET_ATTR_TIMEOUT]) { if (!with_timeout(h->timeout)) return -IPSET_ERR_TIMEOUT; timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } ret = adtfn(set, &ip, timeout); return ip_set_eexist(ret, flags) ? 0 : ret; } /* Create hash:ip type of sets */ static int hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags) { u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; u8 netmask, hbits; struct ip_set_hash *h; if (!(set->family == AF_INET || set->family == AF_INET6)) return -IPSET_ERR_INVALID_FAMILY; netmask = set->family == AF_INET ? 32 : 128; pr_debug("Create set %s with family %s\n", set->name, set->family == AF_INET ? "inet" : "inet6"); if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_HASHSIZE]) { hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); if (hashsize < IPSET_MIMINAL_HASHSIZE) hashsize = IPSET_MIMINAL_HASHSIZE; } if (tb[IPSET_ATTR_MAXELEM]) maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); if (tb[IPSET_ATTR_NETMASK]) { netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]); if ((set->family == AF_INET && netmask > 32) || (set->family == AF_INET6 && netmask > 128) || netmask == 0) return -IPSET_ERR_INVALID_NETMASK; } h = kzalloc(sizeof(*h), GFP_KERNEL); if (!h) return -ENOMEM; h->maxelem = maxelem; h->netmask = netmask; get_random_bytes(&h->initval, sizeof(h->initval)); h->timeout = IPSET_NO_TIMEOUT; hbits = htable_bits(hashsize); h->table = ip_set_alloc( sizeof(struct htable) + jhash_size(hbits) * sizeof(struct hbucket)); if (!h->table) { kfree(h); return -ENOMEM; } h->table->htable_bits = hbits; set->data = h; if (tb[IPSET_ATTR_TIMEOUT]) { h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); set->variant = set->family == AF_INET ? &hash_ip4_tvariant : &hash_ip6_tvariant; if (set->family == AF_INET) hash_ip4_gc_init(set); else hash_ip6_gc_init(set); } else { set->variant = set->family == AF_INET ? &hash_ip4_variant : &hash_ip6_variant; } pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", set->name, jhash_size(h->table->htable_bits), h->table->htable_bits, h->maxelem, set->data, h->table); return 0; } static struct ip_set_type hash_ip_type __read_mostly = { .name = "hash:ip", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_IP, .dimension = IPSET_DIM_ONE, .family = AF_UNSPEC, .revision = 0, .create = hash_ip_create, .create_policy = { [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_NETMASK] = { .type = NLA_U8 }, }, .adt_policy = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, }, .me = THIS_MODULE, }; static int __init hash_ip_init(void) { return ip_set_type_register(&hash_ip_type); } static void __exit hash_ip_fini(void) { ip_set_type_unregister(&hash_ip_type); } module_init(hash_ip_init); module_exit(hash_ip_fini);
gpl-2.0
CyanogenMod/android_kernel_sony_flamingo
arch/arm/mach-omap2/opp2430_data.c
4868
4942
/* * opp2430_data.c - old-style "OPP" table for OMAP2430 * * Copyright (C) 2005-2009 Texas Instruments, Inc. * Copyright (C) 2004-2009 Nokia Corporation * * Richard Woodruff <r-woodruff2@ti.com> * * The OMAP2 processor can be run at several discrete 'PRCM configurations'. * These configurations are characterized by voltage and speed for clocks. * The device is only validated for certain combinations. One way to express * these combinations is via the 'ratios' which the clocks operate with * respect to each other. These ratio sets are for a given voltage/DPLL * setting. All configurations can be described by a DPLL setting and a ratio. * * 2430 differs from 2420 in that there are no more phase synchronizers used. * They both have a slightly different clock domain setup. 2420(iva1,dsp) vs * 2430 (iva2.1, NOdsp, mdm) * * XXX Missing voltage data. * XXX Missing 19.2MHz sys_clk rate sets. * * THe format described in this file is deprecated. Once a reasonable * OPP API exists, the data in this file should be converted to use it. * * This is technically part of the OMAP2xxx clock code. */ #include <plat/hardware.h> #include "opp2xxx.h" #include "sdrc.h" #include "clock.h" /* * Key dividers which make up a PRCM set. Ratios for a PRCM are mandated. * xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU, * CM_CLKSEL_DSP, CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL, * CM_CLKSEL2_PLL, CM_CLKSEL_MDM * * Filling in table based on 2430-SDPs variants available. There are * quite a few more rate combinations which could be defined. * * When multiple values are defined the start up will try and choose * the fastest one. If a 'fast' value is defined, then automatically, * the /2 one should be included as it can be used. Generally having * more than one fast set does not make sense, as static timings need * to be changed to change the set. The exception is the bypass * setting which is available for low power bypass. * * Note: This table needs to be sorted, fastest to slowest. */ const struct prcm_config omap2430_rate_table[] = { /* PRCM #4 - ratio2 (ES2.1) - FAST */ {S13M, S798M, S399M, R2_CM_CLKSEL_MPU_VAL, /* 399MHz ARM */ R2_CM_CLKSEL_DSP_VAL, R2_CM_CLKSEL_GFX_VAL, R2_CM_CLKSEL1_CORE_VAL, M4_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, R2_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_133MHz, RATE_IN_243X}, /* PRCM #2 - ratio1 (ES2) - FAST */ {S13M, S658M, S329M, R1_CM_CLKSEL_MPU_VAL, /* 330MHz ARM */ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL, R1_CM_CLKSEL1_CORE_VAL, M2_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_165MHz, RATE_IN_243X}, /* PRCM #5a - ratio1 - FAST */ {S13M, S532M, S266M, R1_CM_CLKSEL_MPU_VAL, /* 266MHz ARM */ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL, R1_CM_CLKSEL1_CORE_VAL, M5A_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_133MHz, RATE_IN_243X}, /* PRCM #5b - ratio1 - FAST */ {S13M, S400M, S200M, R1_CM_CLKSEL_MPU_VAL, /* 200MHz ARM */ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL, R1_CM_CLKSEL1_CORE_VAL, M5B_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_100MHz, RATE_IN_243X}, /* PRCM #4 - ratio1 (ES2.1) - SLOW */ {S13M, S399M, S199M, R2_CM_CLKSEL_MPU_VAL, /* 200MHz ARM */ R2_CM_CLKSEL_DSP_VAL, R2_CM_CLKSEL_GFX_VAL, R2_CM_CLKSEL1_CORE_VAL, M4_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_1x_VAL, R2_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_133MHz, RATE_IN_243X}, /* PRCM #2 - ratio1 (ES2) - SLOW */ {S13M, S329M, S164M, R1_CM_CLKSEL_MPU_VAL, /* 165MHz ARM */ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL, R1_CM_CLKSEL1_CORE_VAL, M2_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_165MHz, RATE_IN_243X}, /* PRCM #5a - ratio1 - SLOW */ {S13M, S266M, S133M, R1_CM_CLKSEL_MPU_VAL, /* 133MHz ARM */ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL, R1_CM_CLKSEL1_CORE_VAL, M5A_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_133MHz, RATE_IN_243X}, /* PRCM #5b - ratio1 - SLOW*/ {S13M, S200M, S100M, R1_CM_CLKSEL_MPU_VAL, /* 100MHz ARM */ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL, R1_CM_CLKSEL1_CORE_VAL, M5B_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_100MHz, RATE_IN_243X}, /* PRCM-boot/bypass */ {S13M, S13M, S13M, RB_CM_CLKSEL_MPU_VAL, /* 13Mhz */ RB_CM_CLKSEL_DSP_VAL, RB_CM_CLKSEL_GFX_VAL, RB_CM_CLKSEL1_CORE_VAL, MB_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, RB_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_BYPASS, RATE_IN_243X}, /* PRCM-boot/bypass */ {S12M, S12M, S12M, RB_CM_CLKSEL_MPU_VAL, /* 12Mhz */ RB_CM_CLKSEL_DSP_VAL, RB_CM_CLKSEL_GFX_VAL, RB_CM_CLKSEL1_CORE_VAL, MB_CM_CLKSEL1_PLL_12_VAL, MX_CLKSEL2_PLL_2x_VAL, RB_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_BYPASS, RATE_IN_243X}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, };
gpl-2.0
jbott/android_kernel_rpi_rpi
arch/powerpc/platforms/pasemi/dma_lib.c
7172
16307
/* * Copyright (C) 2006-2007 PA Semi, Inc * * Common functions for DMA access on PA Semi PWRficient * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/export.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/sched.h> #include <asm/pasemi_dma.h> #define MAX_TXCH 64 #define MAX_RXCH 64 #define MAX_FLAGS 64 #define MAX_FUN 8 static struct pasdma_status *dma_status; static void __iomem *iob_regs; static void __iomem *mac_regs[6]; static void __iomem *dma_regs; static int base_hw_irq; static int num_txch, num_rxch; static struct pci_dev *dma_pdev; /* Bitmaps to handle allocation of channels */ static DECLARE_BITMAP(txch_free, MAX_TXCH); static DECLARE_BITMAP(rxch_free, MAX_RXCH); static DECLARE_BITMAP(flags_free, MAX_FLAGS); static DECLARE_BITMAP(fun_free, MAX_FUN); /* pasemi_read_iob_reg - read IOB register * @reg: Register to read (offset into PCI CFG space) */ unsigned int pasemi_read_iob_reg(unsigned int reg) { return in_le32(iob_regs+reg); } EXPORT_SYMBOL(pasemi_read_iob_reg); /* pasemi_write_iob_reg - write IOB register * @reg: Register to write to (offset into PCI CFG space) * @val: Value to write */ void pasemi_write_iob_reg(unsigned int reg, unsigned int val) { out_le32(iob_regs+reg, val); } EXPORT_SYMBOL(pasemi_write_iob_reg); /* pasemi_read_mac_reg - read MAC register * @intf: MAC interface * @reg: Register to read (offset into PCI CFG space) */ unsigned int pasemi_read_mac_reg(int intf, unsigned int reg) { return in_le32(mac_regs[intf]+reg); } EXPORT_SYMBOL(pasemi_read_mac_reg); /* pasemi_write_mac_reg - write MAC register * @intf: MAC interface * @reg: Register to write to (offset into PCI CFG space) * @val: Value to write */ void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val) { out_le32(mac_regs[intf]+reg, val); } EXPORT_SYMBOL(pasemi_write_mac_reg); /* pasemi_read_dma_reg - read DMA register * @reg: Register to read (offset into PCI CFG space) */ unsigned int pasemi_read_dma_reg(unsigned int reg) { return in_le32(dma_regs+reg); } EXPORT_SYMBOL(pasemi_read_dma_reg); /* pasemi_write_dma_reg - write DMA register * @reg: Register to write to (offset into PCI CFG space) * @val: Value to write */ void pasemi_write_dma_reg(unsigned int reg, unsigned int val) { out_le32(dma_regs+reg, val); } EXPORT_SYMBOL(pasemi_write_dma_reg); static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type) { int bit; int start, limit; switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) { case TXCHAN_EVT0: start = 0; limit = 10; break; case TXCHAN_EVT1: start = 10; limit = MAX_TXCH; break; default: start = 0; limit = MAX_TXCH; break; } retry: bit = find_next_bit(txch_free, MAX_TXCH, start); if (bit >= limit) return -ENOSPC; if (!test_and_clear_bit(bit, txch_free)) goto retry; return bit; } static void pasemi_free_tx_chan(int chan) { BUG_ON(test_bit(chan, txch_free)); set_bit(chan, txch_free); } static int pasemi_alloc_rx_chan(void) { int bit; retry: bit = find_first_bit(rxch_free, MAX_RXCH); if (bit >= MAX_TXCH) return -ENOSPC; if (!test_and_clear_bit(bit, rxch_free)) goto retry; return bit; } static void pasemi_free_rx_chan(int chan) { BUG_ON(test_bit(chan, rxch_free)); set_bit(chan, rxch_free); } /* pasemi_dma_alloc_chan - Allocate a DMA channel * @type: Type of channel to allocate * @total_size: Total size of structure to allocate (to allow for more * room behind the structure to be used by the client) * @offset: Offset in bytes from start of the total structure to the beginning * of struct pasemi_dmachan. Needed when struct pasemi_dmachan is * not the first member of the client structure. * * pasemi_dma_alloc_chan allocates a DMA channel for use by a client. The * type argument specifies whether it's a RX or TX channel, and in the case * of TX channels which group it needs to belong to (if any). * * Returns a pointer to the total structure allocated on success, NULL * on failure. */ void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type, int total_size, int offset) { void *buf; struct pasemi_dmachan *chan; int chno; BUG_ON(total_size < sizeof(struct pasemi_dmachan)); buf = kzalloc(total_size, GFP_KERNEL); if (!buf) return NULL; chan = buf + offset; chan->priv = buf; switch (type & (TXCHAN|RXCHAN)) { case RXCHAN: chno = pasemi_alloc_rx_chan(); chan->chno = chno; chan->irq = irq_create_mapping(NULL, base_hw_irq + num_txch + chno); chan->status = &dma_status->rx_sta[chno]; break; case TXCHAN: chno = pasemi_alloc_tx_chan(type); chan->chno = chno; chan->irq = irq_create_mapping(NULL, base_hw_irq + chno); chan->status = &dma_status->tx_sta[chno]; break; } chan->chan_type = type; return chan; } EXPORT_SYMBOL(pasemi_dma_alloc_chan); /* pasemi_dma_free_chan - Free a previously allocated channel * @chan: Channel to free * * Frees a previously allocated channel. It will also deallocate any * descriptor ring associated with the channel, if allocated. */ void pasemi_dma_free_chan(struct pasemi_dmachan *chan) { if (chan->ring_virt) pasemi_dma_free_ring(chan); switch (chan->chan_type & (RXCHAN|TXCHAN)) { case RXCHAN: pasemi_free_rx_chan(chan->chno); break; case TXCHAN: pasemi_free_tx_chan(chan->chno); break; } kfree(chan->priv); } EXPORT_SYMBOL(pasemi_dma_free_chan); /* pasemi_dma_alloc_ring - Allocate descriptor ring for a channel * @chan: Channel for which to allocate * @ring_size: Ring size in 64-bit (8-byte) words * * Allocate a descriptor ring for a channel. Returns 0 on success, errno * on failure. The passed in struct pasemi_dmachan is updated with the * virtual and DMA addresses of the ring. */ int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size) { BUG_ON(chan->ring_virt); chan->ring_size = ring_size; chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev, ring_size * sizeof(u64), &chan->ring_dma, GFP_KERNEL); if (!chan->ring_virt) return -ENOMEM; memset(chan->ring_virt, 0, ring_size * sizeof(u64)); return 0; } EXPORT_SYMBOL(pasemi_dma_alloc_ring); /* pasemi_dma_free_ring - Free an allocated descriptor ring for a channel * @chan: Channel for which to free the descriptor ring * * Frees a previously allocated descriptor ring for a channel. */ void pasemi_dma_free_ring(struct pasemi_dmachan *chan) { BUG_ON(!chan->ring_virt); dma_free_coherent(&dma_pdev->dev, chan->ring_size * sizeof(u64), chan->ring_virt, chan->ring_dma); chan->ring_virt = NULL; chan->ring_size = 0; chan->ring_dma = 0; } EXPORT_SYMBOL(pasemi_dma_free_ring); /* pasemi_dma_start_chan - Start a DMA channel * @chan: Channel to start * @cmdsta: Additional CCMDSTA/TCMDSTA bits to write * * Enables (starts) a DMA channel with optional additional arguments. */ void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, const u32 cmdsta) { if (chan->chan_type == RXCHAN) pasemi_write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno), cmdsta | PAS_DMA_RXCHAN_CCMDSTA_EN); else pasemi_write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno), cmdsta | PAS_DMA_TXCHAN_TCMDSTA_EN); } EXPORT_SYMBOL(pasemi_dma_start_chan); /* pasemi_dma_stop_chan - Stop a DMA channel * @chan: Channel to stop * * Stops (disables) a DMA channel. This is done by setting the ST bit in the * CMDSTA register and waiting on the ACT (active) bit to clear, then * finally disabling the whole channel. * * This function will only try for a short while for the channel to stop, if * it doesn't it will return failure. * * Returns 1 on success, 0 on failure. */ #define MAX_RETRIES 5000 int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan) { int reg, retries; u32 sta; if (chan->chan_type == RXCHAN) { reg = PAS_DMA_RXCHAN_CCMDSTA(chan->chno); pasemi_write_dma_reg(reg, PAS_DMA_RXCHAN_CCMDSTA_ST); for (retries = 0; retries < MAX_RETRIES; retries++) { sta = pasemi_read_dma_reg(reg); if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) { pasemi_write_dma_reg(reg, 0); return 1; } cond_resched(); } } else { reg = PAS_DMA_TXCHAN_TCMDSTA(chan->chno); pasemi_write_dma_reg(reg, PAS_DMA_TXCHAN_TCMDSTA_ST); for (retries = 0; retries < MAX_RETRIES; retries++) { sta = pasemi_read_dma_reg(reg); if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) { pasemi_write_dma_reg(reg, 0); return 1; } cond_resched(); } } return 0; } EXPORT_SYMBOL(pasemi_dma_stop_chan); /* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA * @chan: Channel to allocate for * @size: Size of buffer in bytes * @handle: DMA handle * * Allocate a buffer to be used by the DMA engine for read/write, * similar to dma_alloc_coherent(). * * Returns the virtual address of the buffer, or NULL in case of failure. */ void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size, dma_addr_t *handle) { return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL); } EXPORT_SYMBOL(pasemi_dma_alloc_buf); /* pasemi_dma_free_buf - Free a buffer used for DMA * @chan: Channel the buffer was allocated for * @size: Size of buffer in bytes * @handle: DMA handle * * Frees a previously allocated buffer. */ void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, dma_addr_t *handle) { dma_free_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL); } EXPORT_SYMBOL(pasemi_dma_free_buf); /* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization * * Allocates a flag for use with channel synchronization (event descriptors). * Returns allocated flag (0-63), < 0 on error. */ int pasemi_dma_alloc_flag(void) { int bit; retry: bit = find_next_bit(flags_free, MAX_FLAGS, 0); if (bit >= MAX_FLAGS) return -ENOSPC; if (!test_and_clear_bit(bit, flags_free)) goto retry; return bit; } EXPORT_SYMBOL(pasemi_dma_alloc_flag); /* pasemi_dma_free_flag - Deallocates a flag (event) * @flag: Flag number to deallocate * * Frees up a flag so it can be reused for other purposes. */ void pasemi_dma_free_flag(int flag) { BUG_ON(test_bit(flag, flags_free)); BUG_ON(flag >= MAX_FLAGS); set_bit(flag, flags_free); } EXPORT_SYMBOL(pasemi_dma_free_flag); /* pasemi_dma_set_flag - Sets a flag (event) to 1 * @flag: Flag number to set active * * Sets the flag provided to 1. */ void pasemi_dma_set_flag(int flag) { BUG_ON(flag >= MAX_FLAGS); if (flag < 32) pasemi_write_dma_reg(PAS_DMA_TXF_SFLG0, 1 << flag); else pasemi_write_dma_reg(PAS_DMA_TXF_SFLG1, 1 << flag); } EXPORT_SYMBOL(pasemi_dma_set_flag); /* pasemi_dma_clear_flag - Sets a flag (event) to 0 * @flag: Flag number to set inactive * * Sets the flag provided to 0. */ void pasemi_dma_clear_flag(int flag) { BUG_ON(flag >= MAX_FLAGS); if (flag < 32) pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 1 << flag); else pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 1 << flag); } EXPORT_SYMBOL(pasemi_dma_clear_flag); /* pasemi_dma_alloc_fun - Allocate a function engine * * Allocates a function engine to use for crypto/checksum offload * Returns allocated engine (0-8), < 0 on error. */ int pasemi_dma_alloc_fun(void) { int bit; retry: bit = find_next_bit(fun_free, MAX_FLAGS, 0); if (bit >= MAX_FLAGS) return -ENOSPC; if (!test_and_clear_bit(bit, fun_free)) goto retry; return bit; } EXPORT_SYMBOL(pasemi_dma_alloc_fun); /* pasemi_dma_free_fun - Deallocates a function engine * @flag: Engine number to deallocate * * Frees up a function engine so it can be used for other purposes. */ void pasemi_dma_free_fun(int fun) { BUG_ON(test_bit(fun, fun_free)); BUG_ON(fun >= MAX_FLAGS); set_bit(fun, fun_free); } EXPORT_SYMBOL(pasemi_dma_free_fun); static void *map_onedev(struct pci_dev *p, int index) { struct device_node *dn; void __iomem *ret; dn = pci_device_to_OF_node(p); if (!dn) goto fallback; ret = of_iomap(dn, index); if (!ret) goto fallback; return ret; fallback: /* This is hardcoded and ugly, but we have some firmware versions * that don't provide the register space in the device tree. Luckily * they are at well-known locations so we can just do the math here. */ return ioremap(0xe0000000 + (p->devfn << 12), 0x2000); } /* pasemi_dma_init - Initialize the PA Semi DMA library * * This function initializes the DMA library. It must be called before * any other function in the library. * * Returns 0 on success, errno on failure. */ int pasemi_dma_init(void) { static DEFINE_SPINLOCK(init_lock); struct pci_dev *iob_pdev; struct pci_dev *pdev; struct resource res; struct device_node *dn; int i, intf, err = 0; unsigned long timeout; u32 tmp; if (!machine_is(pasemi)) return -ENODEV; spin_lock(&init_lock); /* Make sure we haven't already initialized */ if (dma_pdev) goto out; iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); if (!iob_pdev) { BUG(); printk(KERN_WARNING "Can't find I/O Bridge\n"); err = -ENODEV; goto out; } iob_regs = map_onedev(iob_pdev, 0); dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); if (!dma_pdev) { BUG(); printk(KERN_WARNING "Can't find DMA controller\n"); err = -ENODEV; goto out; } dma_regs = map_onedev(dma_pdev, 0); base_hw_irq = virq_to_hw(dma_pdev->irq); pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp); num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S; pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp); num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S; intf = 0; for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL); pdev; pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev)) mac_regs[intf++] = map_onedev(pdev, 0); pci_dev_put(pdev); for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL); pdev; pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev)) mac_regs[intf++] = map_onedev(pdev, 0); pci_dev_put(pdev); dn = pci_device_to_OF_node(iob_pdev); if (dn) err = of_address_to_resource(dn, 1, &res); if (!dn || err) { /* Fallback for old firmware */ res.start = 0xfd800000; res.end = res.start + 0x1000; } dma_status = __ioremap(res.start, resource_size(&res), 0); pci_dev_put(iob_pdev); for (i = 0; i < MAX_TXCH; i++) __set_bit(i, txch_free); for (i = 0; i < MAX_RXCH; i++) __set_bit(i, rxch_free); timeout = jiffies + HZ; pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, 0); while (pasemi_read_dma_reg(PAS_DMA_COM_RXSTA) & 1) { if (time_after(jiffies, timeout)) { pr_warning("Warning: Could not disable RX section\n"); break; } } timeout = jiffies + HZ; pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, 0); while (pasemi_read_dma_reg(PAS_DMA_COM_TXSTA) & 1) { if (time_after(jiffies, timeout)) { pr_warning("Warning: Could not disable TX section\n"); break; } } /* setup resource allocations for the different DMA sections */ tmp = pasemi_read_dma_reg(PAS_DMA_COM_CFG); pasemi_write_dma_reg(PAS_DMA_COM_CFG, tmp | 0x18000000); /* enable tx section */ pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN); /* enable rx section */ pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN); for (i = 0; i < MAX_FLAGS; i++) __set_bit(i, flags_free); for (i = 0; i < MAX_FUN; i++) __set_bit(i, fun_free); /* clear all status flags */ pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 0xffffffff); pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 0xffffffff); printk(KERN_INFO "PA Semi PWRficient DMA library initialized " "(%d tx, %d rx channels)\n", num_txch, num_rxch); out: spin_unlock(&init_lock); return err; } EXPORT_SYMBOL(pasemi_dma_init);
gpl-2.0
markolino631/lge_kernel_msm7x27
drivers/media/video/sn9c102/sn9c102_ov7630.c
12804
19528
/*************************************************************************** * Plug-in for OV7630 image sensor connected to the SN9C1xx PC Camera * * Controllers * * * * Copyright (C) 2006-2007 by Luca Risolia <luca.risolia@studio.unibo.it> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ***************************************************************************/ #include "sn9c102_sensor.h" #include "sn9c102_devtable.h" static int ov7630_init(struct sn9c102_device* cam) { int err = 0; switch (sn9c102_get_bridge(cam)) { case BRIDGE_SN9C101: case BRIDGE_SN9C102: err = sn9c102_write_const_regs(cam, {0x00, 0x14}, {0x60, 0x17}, {0x0f, 0x18}, {0x50, 0x19}); err += sn9c102_i2c_write(cam, 0x12, 0x8d); err += sn9c102_i2c_write(cam, 0x12, 0x0d); err += sn9c102_i2c_write(cam, 0x11, 0x00); err += sn9c102_i2c_write(cam, 0x15, 0x35); err += sn9c102_i2c_write(cam, 0x16, 0x03); err += sn9c102_i2c_write(cam, 0x17, 0x1c); err += sn9c102_i2c_write(cam, 0x18, 0xbd); err += sn9c102_i2c_write(cam, 0x19, 0x06); err += sn9c102_i2c_write(cam, 0x1a, 0xf6); err += sn9c102_i2c_write(cam, 0x1b, 0x04); err += sn9c102_i2c_write(cam, 0x20, 0x44); err += sn9c102_i2c_write(cam, 0x23, 0xee); err += sn9c102_i2c_write(cam, 0x26, 0xa0); err += sn9c102_i2c_write(cam, 0x27, 0x9a); err += sn9c102_i2c_write(cam, 0x28, 0x20); err += sn9c102_i2c_write(cam, 0x29, 0x30); err += sn9c102_i2c_write(cam, 0x2f, 0x3d); err += sn9c102_i2c_write(cam, 0x30, 0x24); err += sn9c102_i2c_write(cam, 0x32, 0x86); err += sn9c102_i2c_write(cam, 0x60, 0xa9); err += sn9c102_i2c_write(cam, 0x61, 0x42); err += sn9c102_i2c_write(cam, 0x65, 0x00); err += sn9c102_i2c_write(cam, 0x69, 0x38); err += sn9c102_i2c_write(cam, 0x6f, 0x88); err += sn9c102_i2c_write(cam, 0x70, 0x0b); err += sn9c102_i2c_write(cam, 0x71, 0x00); err += sn9c102_i2c_write(cam, 0x74, 0x21); err += sn9c102_i2c_write(cam, 0x7d, 0xf7); break; case BRIDGE_SN9C103: err = sn9c102_write_const_regs(cam, {0x00, 0x02}, {0x00, 0x03}, {0x1a, 0x04}, {0x20, 0x05}, {0x20, 0x06}, {0x20, 0x07}, {0x03, 0x10}, {0x0a, 0x14}, {0x60, 0x17}, {0x0f, 0x18}, {0x50, 0x19}, {0x1d, 0x1a}, {0x10, 0x1b}, {0x02, 0x1c}, {0x03, 0x1d}, {0x0f, 0x1e}, {0x0c, 0x1f}, {0x00, 0x20}, {0x10, 0x21}, {0x20, 0x22}, {0x30, 0x23}, {0x40, 0x24}, {0x50, 0x25}, {0x60, 0x26}, {0x70, 0x27}, {0x80, 0x28}, {0x90, 0x29}, {0xa0, 0x2a}, {0xb0, 0x2b}, {0xc0, 0x2c}, {0xd0, 0x2d}, {0xe0, 0x2e}, {0xf0, 0x2f}, {0xff, 0x30}); err += sn9c102_i2c_write(cam, 0x12, 0x8d); err += sn9c102_i2c_write(cam, 0x12, 0x0d); err += sn9c102_i2c_write(cam, 0x15, 0x34); err += sn9c102_i2c_write(cam, 0x11, 0x01); err += sn9c102_i2c_write(cam, 0x1b, 0x04); err += sn9c102_i2c_write(cam, 0x20, 0x44); err += sn9c102_i2c_write(cam, 0x23, 0xee); err += sn9c102_i2c_write(cam, 0x26, 0xa0); err += sn9c102_i2c_write(cam, 0x27, 0x9a); err += sn9c102_i2c_write(cam, 0x28, 0x20); err += sn9c102_i2c_write(cam, 0x29, 0x30); err += sn9c102_i2c_write(cam, 0x2f, 0x3d); err += sn9c102_i2c_write(cam, 0x30, 0x24); err += sn9c102_i2c_write(cam, 0x32, 0x86); err += sn9c102_i2c_write(cam, 0x60, 0xa9); err += sn9c102_i2c_write(cam, 0x61, 0x42); err += sn9c102_i2c_write(cam, 0x65, 0x00); err += sn9c102_i2c_write(cam, 0x69, 0x38); err += sn9c102_i2c_write(cam, 0x6f, 0x88); err += sn9c102_i2c_write(cam, 0x70, 0x0b); err += sn9c102_i2c_write(cam, 0x71, 0x00); err += sn9c102_i2c_write(cam, 0x74, 0x21); err += sn9c102_i2c_write(cam, 0x7d, 0xf7); break; case BRIDGE_SN9C105: case BRIDGE_SN9C120: err = sn9c102_write_const_regs(cam, {0x40, 0x02}, {0x00, 0x03}, {0x1a, 0x04}, {0x03, 0x10}, {0x0a, 0x14}, {0xe2, 0x17}, {0x0b, 0x18}, {0x00, 0x19}, {0x1d, 0x1a}, {0x10, 0x1b}, {0x02, 0x1c}, {0x03, 0x1d}, {0x0f, 0x1e}, {0x0c, 0x1f}, {0x00, 0x20}, {0x24, 0x21}, {0x3b, 0x22}, {0x47, 0x23}, {0x60, 0x24}, {0x71, 0x25}, {0x80, 0x26}, {0x8f, 0x27}, {0x9d, 0x28}, {0xaa, 0x29}, {0xb8, 0x2a}, {0xc4, 0x2b}, {0xd1, 0x2c}, {0xdd, 0x2d}, {0xe8, 0x2e}, {0xf4, 0x2f}, {0xff, 0x30}, {0x00, 0x3f}, {0xc7, 0x40}, {0x01, 0x41}, {0x44, 0x42}, {0x00, 0x43}, {0x44, 0x44}, {0x00, 0x45}, {0x44, 0x46}, {0x00, 0x47}, {0xc7, 0x48}, {0x01, 0x49}, {0xc7, 0x4a}, {0x01, 0x4b}, {0xc7, 0x4c}, {0x01, 0x4d}, {0x44, 0x4e}, {0x00, 0x4f}, {0x44, 0x50}, {0x00, 0x51}, {0x44, 0x52}, {0x00, 0x53}, {0xc7, 0x54}, {0x01, 0x55}, {0xc7, 0x56}, {0x01, 0x57}, {0xc7, 0x58}, {0x01, 0x59}, {0x44, 0x5a}, {0x00, 0x5b}, {0x44, 0x5c}, {0x00, 0x5d}, {0x44, 0x5e}, {0x00, 0x5f}, {0xc7, 0x60}, {0x01, 0x61}, {0xc7, 0x62}, {0x01, 0x63}, {0xc7, 0x64}, {0x01, 0x65}, {0x44, 0x66}, {0x00, 0x67}, {0x44, 0x68}, {0x00, 0x69}, {0x44, 0x6a}, {0x00, 0x6b}, {0xc7, 0x6c}, {0x01, 0x6d}, {0xc7, 0x6e}, {0x01, 0x6f}, {0xc7, 0x70}, {0x01, 0x71}, {0x44, 0x72}, {0x00, 0x73}, {0x44, 0x74}, {0x00, 0x75}, {0x44, 0x76}, {0x00, 0x77}, {0xc7, 0x78}, {0x01, 0x79}, {0xc7, 0x7a}, {0x01, 0x7b}, {0xc7, 0x7c}, {0x01, 0x7d}, {0x44, 0x7e}, {0x00, 0x7f}, {0x17, 0x84}, {0x00, 0x85}, {0x2e, 0x86}, {0x00, 0x87}, {0x09, 0x88}, {0x00, 0x89}, {0xe8, 0x8a}, {0x0f, 0x8b}, {0xda, 0x8c}, {0x0f, 0x8d}, {0x40, 0x8e}, {0x00, 0x8f}, {0x37, 0x90}, {0x00, 0x91}, {0xcf, 0x92}, {0x0f, 0x93}, {0xfa, 0x94}, {0x0f, 0x95}, {0x00, 0x96}, {0x00, 0x97}, {0x00, 0x98}, {0x66, 0x99}, {0x00, 0x9a}, {0x40, 0x9b}, {0x20, 0x9c}, {0x00, 0x9d}, {0x00, 0x9e}, {0x00, 0x9f}, {0x2d, 0xc0}, {0x2d, 0xc1}, {0x3a, 0xc2}, {0x00, 0xc3}, {0x04, 0xc4}, {0x3f, 0xc5}, {0x00, 0xc6}, {0x00, 0xc7}, {0x50, 0xc8}, {0x3c, 0xc9}, {0x28, 0xca}, {0xd8, 0xcb}, {0x14, 0xcc}, {0xec, 0xcd}, {0x32, 0xce}, {0xdd, 0xcf}, {0x32, 0xd0}, {0xdd, 0xd1}, {0x6a, 0xd2}, {0x50, 0xd3}, {0x60, 0xd4}, {0x00, 0xd5}, {0x00, 0xd6}); err += sn9c102_i2c_write(cam, 0x12, 0x80); err += sn9c102_i2c_write(cam, 0x12, 0x48); err += sn9c102_i2c_write(cam, 0x01, 0x80); err += sn9c102_i2c_write(cam, 0x02, 0x80); err += sn9c102_i2c_write(cam, 0x03, 0x80); err += sn9c102_i2c_write(cam, 0x04, 0x10); err += sn9c102_i2c_write(cam, 0x05, 0x20); err += sn9c102_i2c_write(cam, 0x06, 0x80); err += sn9c102_i2c_write(cam, 0x11, 0x00); err += sn9c102_i2c_write(cam, 0x0c, 0x20); err += sn9c102_i2c_write(cam, 0x0d, 0x20); err += sn9c102_i2c_write(cam, 0x15, 0x80); err += sn9c102_i2c_write(cam, 0x16, 0x03); err += sn9c102_i2c_write(cam, 0x17, 0x1b); err += sn9c102_i2c_write(cam, 0x18, 0xbd); err += sn9c102_i2c_write(cam, 0x19, 0x05); err += sn9c102_i2c_write(cam, 0x1a, 0xf6); err += sn9c102_i2c_write(cam, 0x1b, 0x04); err += sn9c102_i2c_write(cam, 0x21, 0x1b); err += sn9c102_i2c_write(cam, 0x22, 0x00); err += sn9c102_i2c_write(cam, 0x23, 0xde); err += sn9c102_i2c_write(cam, 0x24, 0x10); err += sn9c102_i2c_write(cam, 0x25, 0x8a); err += sn9c102_i2c_write(cam, 0x26, 0xa0); err += sn9c102_i2c_write(cam, 0x27, 0xca); err += sn9c102_i2c_write(cam, 0x28, 0xa2); err += sn9c102_i2c_write(cam, 0x29, 0x74); err += sn9c102_i2c_write(cam, 0x2a, 0x88); err += sn9c102_i2c_write(cam, 0x2b, 0x34); err += sn9c102_i2c_write(cam, 0x2c, 0x88); err += sn9c102_i2c_write(cam, 0x2e, 0x00); err += sn9c102_i2c_write(cam, 0x2f, 0x00); err += sn9c102_i2c_write(cam, 0x30, 0x00); err += sn9c102_i2c_write(cam, 0x32, 0xc2); err += sn9c102_i2c_write(cam, 0x33, 0x08); err += sn9c102_i2c_write(cam, 0x4c, 0x40); err += sn9c102_i2c_write(cam, 0x4d, 0xf3); err += sn9c102_i2c_write(cam, 0x60, 0x05); err += sn9c102_i2c_write(cam, 0x61, 0x40); err += sn9c102_i2c_write(cam, 0x62, 0x12); err += sn9c102_i2c_write(cam, 0x63, 0x57); err += sn9c102_i2c_write(cam, 0x64, 0x73); err += sn9c102_i2c_write(cam, 0x65, 0x00); err += sn9c102_i2c_write(cam, 0x66, 0x55); err += sn9c102_i2c_write(cam, 0x67, 0x01); err += sn9c102_i2c_write(cam, 0x68, 0xac); err += sn9c102_i2c_write(cam, 0x69, 0x38); err += sn9c102_i2c_write(cam, 0x6f, 0x1f); err += sn9c102_i2c_write(cam, 0x70, 0x01); err += sn9c102_i2c_write(cam, 0x71, 0x00); err += sn9c102_i2c_write(cam, 0x72, 0x10); err += sn9c102_i2c_write(cam, 0x73, 0x50); err += sn9c102_i2c_write(cam, 0x74, 0x20); err += sn9c102_i2c_write(cam, 0x76, 0x01); err += sn9c102_i2c_write(cam, 0x77, 0xf3); err += sn9c102_i2c_write(cam, 0x78, 0x90); err += sn9c102_i2c_write(cam, 0x79, 0x98); err += sn9c102_i2c_write(cam, 0x7a, 0x98); err += sn9c102_i2c_write(cam, 0x7b, 0x00); err += sn9c102_i2c_write(cam, 0x7c, 0x38); err += sn9c102_i2c_write(cam, 0x7d, 0xff); break; default: break; } return err; } static int ov7630_get_ctrl(struct sn9c102_device* cam, struct v4l2_control* ctrl) { enum sn9c102_bridge bridge = sn9c102_get_bridge(cam); int err = 0; switch (ctrl->id) { case V4L2_CID_EXPOSURE: if ((ctrl->value = sn9c102_i2c_read(cam, 0x10)) < 0) return -EIO; break; case V4L2_CID_RED_BALANCE: if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120) ctrl->value = sn9c102_pread_reg(cam, 0x05); else ctrl->value = sn9c102_pread_reg(cam, 0x07); break; case V4L2_CID_BLUE_BALANCE: ctrl->value = sn9c102_pread_reg(cam, 0x06); break; case SN9C102_V4L2_CID_GREEN_BALANCE: if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120) ctrl->value = sn9c102_pread_reg(cam, 0x07); else ctrl->value = sn9c102_pread_reg(cam, 0x05); break; break; case V4L2_CID_GAIN: if ((ctrl->value = sn9c102_i2c_read(cam, 0x00)) < 0) return -EIO; ctrl->value &= 0x3f; break; case V4L2_CID_DO_WHITE_BALANCE: if ((ctrl->value = sn9c102_i2c_read(cam, 0x0c)) < 0) return -EIO; ctrl->value &= 0x3f; break; case V4L2_CID_WHITENESS: if ((ctrl->value = sn9c102_i2c_read(cam, 0x0d)) < 0) return -EIO; ctrl->value &= 0x3f; break; case V4L2_CID_AUTOGAIN: if ((ctrl->value = sn9c102_i2c_read(cam, 0x13)) < 0) return -EIO; ctrl->value &= 0x01; break; case V4L2_CID_VFLIP: if ((ctrl->value = sn9c102_i2c_read(cam, 0x75)) < 0) return -EIO; ctrl->value = (ctrl->value & 0x80) ? 1 : 0; break; case SN9C102_V4L2_CID_GAMMA: if ((ctrl->value = sn9c102_i2c_read(cam, 0x14)) < 0) return -EIO; ctrl->value = (ctrl->value & 0x02) ? 1 : 0; break; case SN9C102_V4L2_CID_BAND_FILTER: if ((ctrl->value = sn9c102_i2c_read(cam, 0x2d)) < 0) return -EIO; ctrl->value = (ctrl->value & 0x02) ? 1 : 0; break; default: return -EINVAL; } return err ? -EIO : 0; } static int ov7630_set_ctrl(struct sn9c102_device* cam, const struct v4l2_control* ctrl) { enum sn9c102_bridge bridge = sn9c102_get_bridge(cam); int err = 0; switch (ctrl->id) { case V4L2_CID_EXPOSURE: err += sn9c102_i2c_write(cam, 0x10, ctrl->value); break; case V4L2_CID_RED_BALANCE: if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120) err += sn9c102_write_reg(cam, ctrl->value, 0x05); else err += sn9c102_write_reg(cam, ctrl->value, 0x07); break; case V4L2_CID_BLUE_BALANCE: err += sn9c102_write_reg(cam, ctrl->value, 0x06); break; case SN9C102_V4L2_CID_GREEN_BALANCE: if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120) err += sn9c102_write_reg(cam, ctrl->value, 0x07); else err += sn9c102_write_reg(cam, ctrl->value, 0x05); break; case V4L2_CID_GAIN: err += sn9c102_i2c_write(cam, 0x00, ctrl->value); break; case V4L2_CID_DO_WHITE_BALANCE: err += sn9c102_i2c_write(cam, 0x0c, ctrl->value); break; case V4L2_CID_WHITENESS: err += sn9c102_i2c_write(cam, 0x0d, ctrl->value); break; case V4L2_CID_AUTOGAIN: err += sn9c102_i2c_write(cam, 0x13, ctrl->value | (ctrl->value << 1)); break; case V4L2_CID_VFLIP: err += sn9c102_i2c_write(cam, 0x75, 0x0e | (ctrl->value << 7)); break; case SN9C102_V4L2_CID_GAMMA: err += sn9c102_i2c_write(cam, 0x14, ctrl->value << 2); break; case SN9C102_V4L2_CID_BAND_FILTER: err += sn9c102_i2c_write(cam, 0x2d, ctrl->value << 2); break; default: return -EINVAL; } return err ? -EIO : 0; } static int ov7630_set_crop(struct sn9c102_device* cam, const struct v4l2_rect* rect) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; u8 h_start = 0, v_start = (u8)(rect->top - s->cropcap.bounds.top) + 1; switch (sn9c102_get_bridge(cam)) { case BRIDGE_SN9C101: case BRIDGE_SN9C102: case BRIDGE_SN9C103: h_start = (u8)(rect->left - s->cropcap.bounds.left) + 1; break; case BRIDGE_SN9C105: case BRIDGE_SN9C120: h_start = (u8)(rect->left - s->cropcap.bounds.left) + 4; break; default: break; } err += sn9c102_write_reg(cam, h_start, 0x12); err += sn9c102_write_reg(cam, v_start, 0x13); return err; } static int ov7630_set_pix_format(struct sn9c102_device* cam, const struct v4l2_pix_format* pix) { int err = 0; switch (sn9c102_get_bridge(cam)) { case BRIDGE_SN9C101: case BRIDGE_SN9C102: case BRIDGE_SN9C103: if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8) err += sn9c102_write_reg(cam, 0x50, 0x19); else err += sn9c102_write_reg(cam, 0x20, 0x19); break; case BRIDGE_SN9C105: case BRIDGE_SN9C120: if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8) { err += sn9c102_write_reg(cam, 0xe5, 0x17); err += sn9c102_i2c_write(cam, 0x11, 0x04); } else { err += sn9c102_write_reg(cam, 0xe2, 0x17); err += sn9c102_i2c_write(cam, 0x11, 0x02); } break; default: break; } return err; } static const struct sn9c102_sensor ov7630 = { .name = "OV7630", .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102 | BRIDGE_SN9C103 | BRIDGE_SN9C105 | BRIDGE_SN9C120, .sysfs_ops = SN9C102_I2C_READ | SN9C102_I2C_WRITE, .frequency = SN9C102_I2C_100KHZ, .interface = SN9C102_I2C_2WIRES, .i2c_slave_id = 0x21, .init = &ov7630_init, .qctrl = { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "global gain", .minimum = 0x00, .maximum = 0x3f, .step = 0x01, .default_value = 0x14, .flags = 0, }, { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "exposure", .minimum = 0x00, .maximum = 0xff, .step = 0x01, .default_value = 0x60, .flags = 0, }, { .id = V4L2_CID_WHITENESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "white balance background: red", .minimum = 0x00, .maximum = 0x3f, .step = 0x01, .default_value = 0x20, .flags = 0, }, { .id = V4L2_CID_DO_WHITE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "white balance background: blue", .minimum = 0x00, .maximum = 0x3f, .step = 0x01, .default_value = 0x20, .flags = 0, }, { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "red balance", .minimum = 0x00, .maximum = 0x7f, .step = 0x01, .default_value = 0x20, .flags = 0, }, { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "blue balance", .minimum = 0x00, .maximum = 0x7f, .step = 0x01, .default_value = 0x20, .flags = 0, }, { .id = V4L2_CID_AUTOGAIN, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "auto adjust", .minimum = 0x00, .maximum = 0x01, .step = 0x01, .default_value = 0x00, .flags = 0, }, { .id = V4L2_CID_VFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "vertical flip", .minimum = 0x00, .maximum = 0x01, .step = 0x01, .default_value = 0x01, .flags = 0, }, { .id = SN9C102_V4L2_CID_GREEN_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "green balance", .minimum = 0x00, .maximum = 0x7f, .step = 0x01, .default_value = 0x20, .flags = 0, }, { .id = SN9C102_V4L2_CID_BAND_FILTER, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "band filter", .minimum = 0x00, .maximum = 0x01, .step = 0x01, .default_value = 0x00, .flags = 0, }, { .id = SN9C102_V4L2_CID_GAMMA, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "rgb gamma", .minimum = 0x00, .maximum = 0x01, .step = 0x01, .default_value = 0x00, .flags = 0, }, }, .get_ctrl = &ov7630_get_ctrl, .set_ctrl = &ov7630_set_ctrl, .cropcap = { .bounds = { .left = 0, .top = 0, .width = 640, .height = 480, }, .defrect = { .left = 0, .top = 0, .width = 640, .height = 480, }, }, .set_crop = &ov7630_set_crop, .pix_format = { .width = 640, .height = 480, .pixelformat = V4L2_PIX_FMT_SN9C10X, .priv = 8, }, .set_pix_format = &ov7630_set_pix_format }; int sn9c102_probe_ov7630(struct sn9c102_device* cam) { int pid, ver, err = 0; switch (sn9c102_get_bridge(cam)) { case BRIDGE_SN9C101: case BRIDGE_SN9C102: err = sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x00, 0x01}, {0x28, 0x17}); break; case BRIDGE_SN9C103: /* do _not_ change anything! */ err = sn9c102_write_const_regs(cam, {0x09, 0x01}, {0x42, 0x01}, {0x28, 0x17}, {0x44, 0x02}); pid = sn9c102_i2c_try_read(cam, &ov7630, 0x0a); if (err || pid < 0) /* try a different initialization */ err += sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x00, 0x01}); break; case BRIDGE_SN9C105: case BRIDGE_SN9C120: err = sn9c102_write_const_regs(cam, {0x01, 0xf1}, {0x00, 0xf1}, {0x29, 0x01}, {0x74, 0x02}, {0x0e, 0x01}, {0x44, 0x01}); break; default: break; } pid = sn9c102_i2c_try_read(cam, &ov7630, 0x0a); ver = sn9c102_i2c_try_read(cam, &ov7630, 0x0b); if (err || pid < 0 || ver < 0) return -EIO; if (pid != 0x76 || ver != 0x31) return -ENODEV; sn9c102_attach_sensor(cam, &ov7630); return 0; }
gpl-2.0
coreboot-gs45/coreboot
src/drivers/ti/tps65913/tps65913rtc.c
5
4348
/* * This file is part of the coreboot project. * * Copyright 2014 Google Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <bcd.h> #include <console/console.h> #include <device/i2c.h> #include <rtc.h> #include <stdint.h> enum TPS65913_RTC_REG { TPS65913_SECONDS_REG = 0x00, TPS65913_MINUTES_REG = 0x01, TPS65913_HOURS_REG = 0x02, TPS65913_DAYS_REG = 0x03, TPS65913_MONTHS_REG = 0x04, TPS65913_YEARS_REG = 0x05, TPS65913_WEEKS_REG = 0x06, TPS65913_RTC_CTRL_REG = 0x10, TPS65913_RTC_STATUS_REG = 0x11, TPS65913_RTC_INTERRUPS_REG = 0x12, }; enum { TPS65913_RTC_CTRL_STOP = (1 << 0), TPS65913_RTC_CTRL_GET_TIME = (1 << 6), TPS65913_RTC_STATUS_RUN = (1 << 1), TPS65913_RTC_RUNNING = (1 << 1), TPS65913_RTC_FROZEN = (0 << 1), }; static inline uint8_t tps65913_read(enum TPS65913_RTC_REG reg) { uint8_t val; i2c_readb(CONFIG_DRIVERS_TI_TPS65913_RTC_BUS, CONFIG_DRIVERS_TI_TPS65913_RTC_ADDR, reg, &val); return val; } static inline void tps65913_write(enum TPS65913_RTC_REG reg, uint8_t val) { i2c_writeb(CONFIG_DRIVERS_TI_TPS65913_RTC_BUS, CONFIG_DRIVERS_TI_TPS65913_RTC_ADDR, reg, val); } static void tps65913_rtc_ctrl_clear(uint8_t bit) { uint8_t control = tps65913_read(TPS65913_RTC_CTRL_REG); control &= ~bit; tps65913_write(TPS65913_RTC_CTRL_REG, control); } static void tps65913_rtc_ctrl_set(uint8_t bit) { uint8_t control = tps65913_read(TPS65913_RTC_CTRL_REG); control |= TPS65913_RTC_CTRL_GET_TIME; tps65913_write(TPS65913_RTC_CTRL_REG, control); } static int tps65913_is_rtc_running(void) { uint8_t status = tps65913_read(TPS65913_RTC_STATUS_REG); return ((status & TPS65913_RTC_STATUS_RUN) == TPS65913_RTC_RUNNING); } /* * This function ensures that current time is copied to shadow registers. Then a * normal read on TC registers reads from the shadow instead of current TC * registers. This helps prevent the accidental change in counters while * reading. In order to ensure that the current TC registers are copied into * shadow registers, GET_TIME bit needs to be set to 0 and then to 1. */ static void tps65913_rtc_shadow(void) { tps65913_rtc_ctrl_clear(TPS65913_RTC_CTRL_GET_TIME); tps65913_rtc_ctrl_set(TPS65913_RTC_CTRL_GET_TIME); } static int tps65913_rtc_stop(void) { /* Clearing stop bit freezes RTC */ tps65913_rtc_ctrl_clear(TPS65913_RTC_CTRL_STOP); if (tps65913_is_rtc_running()) { printk(BIOS_ERR, "Could not stop RTC\n"); return 1; } return 0; } static int tps65913_rtc_start(void) { /* Setting stop bit starts RTC */ tps65913_rtc_ctrl_set(TPS65913_RTC_CTRL_STOP); if (!tps65913_is_rtc_running()) { printk(BIOS_ERR, "Could not start RTC\n"); return 1; } return 0; } int rtc_set(const struct rtc_time *time) { /* Before setting the time, ensure that rtc is stopped */ if (tps65913_rtc_stop()) return 1; tps65913_write(TPS65913_SECONDS_REG, bin2bcd(time->sec)); tps65913_write(TPS65913_MINUTES_REG, bin2bcd(time->min)); tps65913_write(TPS65913_HOURS_REG, bin2bcd(time->hour)); tps65913_write(TPS65913_DAYS_REG, bin2bcd(time->mday)); tps65913_write(TPS65913_MONTHS_REG, bin2bcd(time->mon)); tps65913_write(TPS65913_YEARS_REG, bin2bcd(time->year)); /* Re-start rtc */ if (tps65913_rtc_start()) return 1; return 0; } int rtc_get(struct rtc_time *time) { tps65913_rtc_shadow(); time->sec = bcd2bin(tps65913_read(TPS65913_SECONDS_REG) & 0x7f); time->min = bcd2bin(tps65913_read(TPS65913_MINUTES_REG) & 0x7f); time->hour = bcd2bin(tps65913_read(TPS65913_HOURS_REG) & 0x3f); time->mday = bcd2bin(tps65913_read(TPS65913_DAYS_REG) & 0x3f); time->mon = bcd2bin(tps65913_read(TPS65913_MONTHS_REG) & 0x1f); time->year = bcd2bin(tps65913_read(TPS65913_YEARS_REG) & 0xff); return 0; }
gpl-2.0
Chilledheart/glibc
sysdeps/aarch64/fpu/e_sqrtf.c
5
1025
/* Single-precision floating point square root. Copyright (C) 2015-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <math_private.h> float __ieee754_sqrtf (float s) { float res; asm ("fsqrt %s0, %s1" : "=w" (res) : "w" (s)); return res; } strong_alias (__ieee754_sqrtf, __sqrtf_finite)
gpl-2.0
ironhead123/DeathCore_3.3.5
src/server/scripts/Outland/BlackTemple/illidari_council.cpp
5
30976
/* * Copyright (C) 2013-2015 DeathCore <http://www.noffearrdeathproject.net/> * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: Illidari_Council SD%Complete: 95 SDComment: Circle of Healing not working properly. SDCategory: Black Temple EndScriptData */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "SpellScript.h" #include "SpellAuraEffects.h" #include "black_temple.h" enum IllidariCouncil { //Speech'n'Sounds SAY_GATH_SPECIAL1 = 2, SAY_GATH_SPECIAL2 = 3, SAY_GATH_SLAY = 4, SAY_GATH_COMNT = 5, SAY_GATH_DEATH = 6, SAY_MALA_SPECIAL1 = 2, SAY_MALA_SPECIAL2 = 3, SAY_MALA_SLAY = 4, SAY_MALA_COMNT = 5, SAY_MALA_DEATH = 6, SAY_ZERE_SPECIAL1 = 2, SAY_ZERE_SPECIAL2 = 3, SAY_ZERE_SLAY = 4, SAY_ZERE_COMNT = 5, SAY_ZERE_DEATH = 6, SAY_VERA_SPECIAL1 = 2, SAY_VERA_SPECIAL2 = 3, SAY_VERA_SLAY = 4, SAY_VERA_COMNT = 5, SAY_VERA_DEATH = 6, AKAMAID = 23089, // High Nethermancer Zerevor's spells SPELL_FLAMESTRIKE = 41481, SPELL_BLIZZARD = 41482, SPELL_ARCANE_BOLT = 41483, SPELL_ARCANE_EXPLOSION = 41524, SPELL_DAMPEN_MAGIC = 41478, // Lady Malande's spells SPELL_EMPOWERED_SMITE = 41471, SPELL_CIRCLE_OF_HEALING = 41455, SPELL_REFLECTIVE_SHIELD = 41475, SPELL_REFLECTIVE_SHIELD_T = 33619, SPELL_DIVINE_WRATH = 41472, SPELL_HEAL_VISUAL = 24171, // Gathios the Shatterer's spells SPELL_BLESS_PROTECTION = 41450, SPELL_BLESS_SPELLWARD = 41451, SPELL_CONSECRATION = 41541, SPELL_HAMMER_OF_JUSTICE = 41468, SPELL_SEAL_OF_COMMAND = 41469, SPELL_SEAL_OF_BLOOD = 41459, SPELL_CHROMATIC_AURA = 41453, SPELL_DEVOTION_AURA = 41452, // Veras Darkshadow's spells SPELL_DEADLY_POISON = 41485, SPELL_ENVENOM = 41487, SPELL_VANISH = 41479, SPELL_BERSERK = 45078 }; #define ERROR_INST_DATA "SD2 ERROR: Instance Data for Black Temple not set properly; Illidari Council event will not function properly." struct CouncilYells { int32 entry; uint32 timer; }; static CouncilYells CouncilAggro[]= { {0, 5000}, // Gathios {0, 5500}, // Veras {0, 5000}, // Malande {0, 0}, // Zerevor }; // Need to get proper timers for this later static CouncilYells CouncilEnrage[]= { {1, 2000}, // Gathios {1, 6000}, // Veras {1, 5000}, // Malande {1, 0}, // Zerevor }; class npc_blood_elf_council_voice_trigger : public CreatureScript { public: npc_blood_elf_council_voice_trigger() : CreatureScript("npc_blood_elf_council_voice_trigger") { } CreatureAI* GetAI(Creature* c) const override { return new npc_blood_elf_council_voice_triggerAI(c); } struct npc_blood_elf_council_voice_triggerAI : public ScriptedAI { npc_blood_elf_council_voice_triggerAI(Creature* creature) : ScriptedAI(creature) { Initialize(); } void Initialize() { EnrageTimer = 900000; // 15 minutes AggroYellTimer = 500; YellCounter = 0; EventStarted = false; } ObjectGuid Council[4]; uint32 EnrageTimer; uint32 AggroYellTimer; uint8 YellCounter; // Serves as the counter for both the aggro and enrage yells bool EventStarted; void Reset() override { Initialize(); } // finds and stores the GUIDs for each Council member using instance data system. void LoadCouncilGUIDs() { if (InstanceScript* instance = me->GetInstanceScript()) { Council[0] = instance->GetGuidData(DATA_GATHIOS_THE_SHATTERER); Council[1] = instance->GetGuidData(DATA_VERAS_DARKSHADOW); Council[2] = instance->GetGuidData(DATA_LADY_MALANDE); Council[3] = instance->GetGuidData(DATA_HIGH_NETHERMANCER_ZEREVOR); } else TC_LOG_ERROR("scripts", ERROR_INST_DATA); } void EnterCombat(Unit* /*who*/) override { } void AttackStart(Unit* /*who*/) override { } void MoveInLineOfSight(Unit* /*who*/) override { } void UpdateAI(uint32 diff) override { if (!EventStarted) return; if (YellCounter > 3) return; if (AggroYellTimer) { if (AggroYellTimer <= diff) { if (Creature* pMember = ObjectAccessor::GetCreature(*me, Council[YellCounter])) { pMember->AI()->Talk(CouncilAggro[YellCounter].entry); AggroYellTimer = CouncilAggro[YellCounter].timer; } ++YellCounter; if (YellCounter > 3) YellCounter = 0; // Reuse for Enrage Yells } else AggroYellTimer -= diff; } if (EnrageTimer) { if (EnrageTimer <= diff) { if (Creature* pMember = ObjectAccessor::GetCreature(*me, Council[YellCounter])) { pMember->CastSpell(pMember, SPELL_BERSERK, true); pMember->AI()->Talk(CouncilEnrage[YellCounter].entry); EnrageTimer = CouncilEnrage[YellCounter].timer; } ++YellCounter; } else EnrageTimer -= diff; } } }; }; class npc_illidari_council : public CreatureScript { public: npc_illidari_council() : CreatureScript("npc_illidari_council") { } CreatureAI* GetAI(Creature* creature) const override { return GetInstanceAI<npc_illidari_councilAI>(creature); } struct npc_illidari_councilAI : public ScriptedAI { npc_illidari_councilAI(Creature* creature) : ScriptedAI(creature) { Initialize(); instance = creature->GetInstanceScript(); } void Initialize() { CheckTimer = 2000; EndEventTimer = 0; DeathCount = 0; EventBegun = false; } InstanceScript* instance; ObjectGuid Council[4]; uint32 CheckTimer; uint32 EndEventTimer; uint8 DeathCount; bool EventBegun; void Reset() override { Initialize(); Creature* pMember = NULL; for (uint8 i = 0; i < 4; ++i) { pMember = ObjectAccessor::GetCreature((*me), Council[i]); if (!pMember) continue; if (!pMember->IsAlive()) { pMember->RemoveCorpse(); pMember->Respawn(); } pMember->AI()->EnterEvadeMode(); } instance->SetBossState(DATA_ILLIDARI_COUNCIL, NOT_STARTED); if (Creature* VoiceTrigger = ObjectAccessor::GetCreature(*me, instance->GetGuidData(DATA_BLOOD_ELF_COUNCIL_VOICE))) VoiceTrigger->AI()->EnterEvadeMode(); me->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); me->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); me->SetDisplayId(11686); } void EnterCombat(Unit* /*who*/) override { } void AttackStart(Unit* /*who*/) override { } void MoveInLineOfSight(Unit* /*who*/) override { } void StartEvent(Unit* target) { if (target && target->IsAlive()) { Council[0] = instance->GetGuidData(DATA_GATHIOS_THE_SHATTERER); Council[1] = instance->GetGuidData(DATA_HIGH_NETHERMANCER_ZEREVOR); Council[2] = instance->GetGuidData(DATA_LADY_MALANDE); Council[3] = instance->GetGuidData(DATA_VERAS_DARKSHADOW); // Start the event for the Voice Trigger if (Creature* VoiceTrigger = ObjectAccessor::GetCreature(*me, instance->GetGuidData(DATA_BLOOD_ELF_COUNCIL_VOICE))) { ENSURE_AI(npc_blood_elf_council_voice_trigger::npc_blood_elf_council_voice_triggerAI, VoiceTrigger->AI())->LoadCouncilGUIDs(); ENSURE_AI(npc_blood_elf_council_voice_trigger::npc_blood_elf_council_voice_triggerAI, VoiceTrigger->AI())->EventStarted = true; } for (uint8 i = 0; i < 4; ++i) { if (Council[i]) { if (Creature* member = ObjectAccessor::GetCreature(*me, Council[i])) if (member->IsAlive()) member->AI()->AttackStart(target); } } instance->SetBossState(DATA_ILLIDARI_COUNCIL, IN_PROGRESS); EventBegun = true; } } void UpdateAI(uint32 diff) override { if (!EventBegun) return; if (EndEventTimer) { if (EndEventTimer <= diff) { if (DeathCount > 3) { if (Creature* VoiceTrigger = ObjectAccessor::GetCreature(*me, instance->GetGuidData(DATA_BLOOD_ELF_COUNCIL_VOICE))) VoiceTrigger->DealDamage(VoiceTrigger, VoiceTrigger->GetHealth(), NULL, DIRECT_DAMAGE, SPELL_SCHOOL_MASK_NORMAL, NULL, false); instance->SetBossState(DATA_ILLIDARI_COUNCIL, DONE); //me->SummonCreature(AKAMAID, 746.466980f, 304.394989f, 311.90208f, 6.272870f, TEMPSUMMON_DEAD_DESPAWN, 0); me->DealDamage(me, me->GetHealth(), NULL, DIRECT_DAMAGE, SPELL_SCHOOL_MASK_NORMAL, NULL, false); return; } Creature* pMember = (ObjectAccessor::GetCreature(*me, Council[DeathCount])); if (pMember && pMember->IsAlive()) pMember->DealDamage(pMember, pMember->GetHealth(), NULL, DIRECT_DAMAGE, SPELL_SCHOOL_MASK_NORMAL, NULL, false); ++DeathCount; EndEventTimer = 1500; } else EndEventTimer -= diff; } if (CheckTimer) { if (CheckTimer <= diff) { uint8 EvadeCheck = 0; for (uint8 i = 0; i < 4; ++i) { if (Council[i]) { if (Creature* Member = (ObjectAccessor::GetCreature((*me), Council[i]))) { // This is the evade/death check. if (Member->IsAlive() && !Member->GetVictim()) ++EvadeCheck; //If all members evade, we reset so that players can properly reset the event else if (!Member->IsAlive()) // If even one member dies, kill the rest, set instance data, and kill self. { EndEventTimer = 1000; CheckTimer = 0; return; } } } } if (EvadeCheck > 3) Reset(); CheckTimer = 2000; } else CheckTimer -= diff; } } }; }; struct boss_illidari_councilAI : public ScriptedAI { boss_illidari_councilAI(Creature* creature) : ScriptedAI(creature) { instance = creature->GetInstanceScript(); LoadedGUIDs = false; } ObjectGuid Council[4]; InstanceScript* instance; bool LoadedGUIDs; void EnterCombat(Unit* who) override { if (Creature* controller = ObjectAccessor::GetCreature(*me, instance->GetGuidData(DATA_ILLIDARI_COUNCIL))) ENSURE_AI(npc_illidari_council::npc_illidari_councilAI, controller->AI())->StartEvent(who); DoZoneInCombat(); // Load GUIDs on first aggro because the Creature guids are only set as the creatures are created in world- // this means that for each creature, it will attempt to LoadGUIDs even though some of the other creatures are // not in world, and thus have no GUID set in the instance data system. Putting it in aggro ensures that all the creatures // have been loaded and have their GUIDs set in the instance data system. if (!LoadedGUIDs) LoadGUIDs(); } void EnterEvadeMode() override { for (uint8 i = 0; i < 4; ++i) { if (Unit* unit = ObjectAccessor::GetUnit(*me, Council[i])) if (unit != me && unit->GetVictim()) { AttackStart(unit->GetVictim()); return; } } ScriptedAI::EnterEvadeMode(); } void DamageTaken(Unit* done_by, uint32 &damage) override { if (done_by == me) return; damage /= 4; for (uint8 i = 0; i < 4; ++i) { if (Creature* unit = ObjectAccessor::GetCreature(*me, Council[i])) if (unit != me && damage < unit->GetHealth()) { unit->ModifyHealth(-int32(damage)); unit->LowerPlayerDamageReq(damage); } } } void LoadGUIDs() { Council[0] = instance->GetGuidData(DATA_LADY_MALANDE); Council[1] = instance->GetGuidData(DATA_HIGH_NETHERMANCER_ZEREVOR); Council[2] = instance->GetGuidData(DATA_GATHIOS_THE_SHATTERER); Council[3] = instance->GetGuidData(DATA_VERAS_DARKSHADOW); LoadedGUIDs = true; } }; class boss_gathios_the_shatterer : public CreatureScript { public: boss_gathios_the_shatterer() : CreatureScript("boss_gathios_the_shatterer") { } CreatureAI* GetAI(Creature* creature) const override { return GetInstanceAI<boss_gathios_the_shattererAI>(creature); } struct boss_gathios_the_shattererAI : public boss_illidari_councilAI { boss_gathios_the_shattererAI(Creature* creature) : boss_illidari_councilAI(creature) { Initialize(); } void Initialize() { ConsecrationTimer = 40000; HammerOfJusticeTimer = 10000; SealTimer = 40000; AuraTimer = 90000; BlessingTimer = 60000; } uint32 ConsecrationTimer; uint32 HammerOfJusticeTimer; uint32 SealTimer; uint32 AuraTimer; uint32 BlessingTimer; void Reset() override { Initialize(); } void KilledUnit(Unit* /*victim*/) override { Talk(SAY_GATH_SLAY); } void JustDied(Unit* /*killer*/) override { Talk(SAY_GATH_DEATH); } Unit* SelectCouncilMember() { Unit* unit = me; uint32 member = 0; // He chooses Lady Malande most often if (rand32() % 10 == 0) // But there is a chance he picks someone else. member = urand(1, 3); if (member != 2) // No need to create another pointer to us using Unit::GetUnit unit = ObjectAccessor::GetUnit(*me, Council[member]); return unit; } void CastAuraOnCouncil() { uint32 spellid = 0; switch (urand(0, 1)) { case 0: spellid = SPELL_DEVOTION_AURA; break; case 1: spellid = SPELL_CHROMATIC_AURA; break; } for (uint8 i = 0; i < 4; ++i) { Unit* unit = ObjectAccessor::GetUnit(*me, Council[i]); if (unit) unit->CastSpell(unit, spellid, true, 0, 0, me->GetGUID()); } } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; if (BlessingTimer <= diff) { if (Unit* unit = SelectCouncilMember()) { switch (urand(0, 1)) { case 0: DoCast(unit, SPELL_BLESS_SPELLWARD); break; case 1: DoCast(unit, SPELL_BLESS_PROTECTION); break; } } BlessingTimer = 60000; } else BlessingTimer -= diff; if (ConsecrationTimer <= diff) { DoCast(me, SPELL_CONSECRATION); ConsecrationTimer = 40000; } else ConsecrationTimer -= diff; if (HammerOfJusticeTimer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0)) { // is in ~10-40 yd range if (me->IsInRange(target, 10.0f, 40.0f, false)) { DoCast(target, SPELL_HAMMER_OF_JUSTICE); HammerOfJusticeTimer = 20000; } } } else HammerOfJusticeTimer -= diff; if (SealTimer <= diff) { switch (urand(0, 1)) { case 0: DoCast(me, SPELL_SEAL_OF_COMMAND); break; case 1: DoCast(me, SPELL_SEAL_OF_BLOOD); break; } SealTimer = 40000; } else SealTimer -= diff; if (AuraTimer <= diff) { CastAuraOnCouncil(); AuraTimer = 90000; } else AuraTimer -= diff; DoMeleeAttackIfReady(); } }; }; class boss_high_nethermancer_zerevor : public CreatureScript { public: boss_high_nethermancer_zerevor() : CreatureScript("boss_high_nethermancer_zerevor") { } CreatureAI* GetAI(Creature* creature) const override { return GetInstanceAI<boss_high_nethermancer_zerevorAI>(creature); } struct boss_high_nethermancer_zerevorAI : public boss_illidari_councilAI { boss_high_nethermancer_zerevorAI(Creature* creature) : boss_illidari_councilAI(creature) { Initialize(); } void Initialize() { BlizzardTimer = urand(30, 91) * 1000; FlamestrikeTimer = urand(30, 91) * 1000; ArcaneBoltTimer = 10000; DampenMagicTimer = 2000; ArcaneExplosionTimer = 14000; Cooldown = 0; } uint32 BlizzardTimer; uint32 FlamestrikeTimer; uint32 ArcaneBoltTimer; uint32 DampenMagicTimer; uint32 Cooldown; uint32 ArcaneExplosionTimer; void Reset() override { Initialize(); } void KilledUnit(Unit* /*victim*/) override { Talk(SAY_ZERE_SLAY); } void JustDied(Unit* /*killer*/) override { Talk(SAY_ZERE_DEATH); } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; if (Cooldown) { if (Cooldown <= diff) Cooldown = 0; else { Cooldown -= diff; return; // Don't cast any other spells if global cooldown is still ticking } } if (DampenMagicTimer <= diff) { DoCast(me, SPELL_DAMPEN_MAGIC); Cooldown = 1000; DampenMagicTimer = 67200; // almost 1, 12 minutes ArcaneBoltTimer += 1000; // Give the Mage some time to spellsteal Dampen. } else DampenMagicTimer -= diff; if (ArcaneExplosionTimer <= diff) { DoCastVictim(SPELL_ARCANE_EXPLOSION); Cooldown = 1000; ArcaneExplosionTimer = 14000; } else ArcaneExplosionTimer -= diff; if (ArcaneBoltTimer <= diff) { DoCastVictim(SPELL_ARCANE_BOLT); ArcaneBoltTimer = 3000; Cooldown = 2000; } else ArcaneBoltTimer -= diff; if (BlizzardTimer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0)) { DoCast(target, SPELL_BLIZZARD); BlizzardTimer = urand(45, 91) * 1000; FlamestrikeTimer += 10000; Cooldown = 1000; } } else BlizzardTimer -= diff; if (FlamestrikeTimer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0)) { DoCast(target, SPELL_FLAMESTRIKE); FlamestrikeTimer = urand(55, 101) * 1000; BlizzardTimer += 10000; Cooldown = 2000; } } else FlamestrikeTimer -= diff; } }; }; class boss_lady_malande : public CreatureScript { public: boss_lady_malande() : CreatureScript("boss_lady_malande") { } CreatureAI* GetAI(Creature* creature) const override { return GetInstanceAI<boss_lady_malandeAI>(creature); } struct boss_lady_malandeAI : public boss_illidari_councilAI { boss_lady_malandeAI(Creature* creature) : boss_illidari_councilAI(creature) { Initialize(); } void Initialize() { EmpoweredSmiteTimer = 38000; CircleOfHealingTimer = 20000; DivineWrathTimer = 40000; ReflectiveShieldTimer = 0; } uint32 EmpoweredSmiteTimer; uint32 CircleOfHealingTimer; uint32 DivineWrathTimer; uint32 ReflectiveShieldTimer; void Reset() override { Initialize(); } void KilledUnit(Unit* /*victim*/) override { Talk(SAY_MALA_SLAY); } void JustDied(Unit* /*killer*/) override { Talk(SAY_MALA_DEATH); } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; if (EmpoweredSmiteTimer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0)) { DoCast(target, SPELL_EMPOWERED_SMITE); EmpoweredSmiteTimer = 38000; } } else EmpoweredSmiteTimer -= diff; if (CircleOfHealingTimer <= diff) { DoCast(me, SPELL_CIRCLE_OF_HEALING); CircleOfHealingTimer = 60000; } else CircleOfHealingTimer -= diff; if (DivineWrathTimer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0)) { DoCast(target, SPELL_DIVINE_WRATH); DivineWrathTimer = urand(40, 81) * 1000; } } else DivineWrathTimer -= diff; if (ReflectiveShieldTimer <= diff) { DoCast(me, SPELL_REFLECTIVE_SHIELD); ReflectiveShieldTimer = 65000; } else ReflectiveShieldTimer -= diff; DoMeleeAttackIfReady(); } }; }; class boss_veras_darkshadow : public CreatureScript { public: boss_veras_darkshadow() : CreatureScript("boss_veras_darkshadow") { } CreatureAI* GetAI(Creature* creature) const override { return GetInstanceAI<boss_veras_darkshadowAI>(creature); } struct boss_veras_darkshadowAI : public boss_illidari_councilAI { boss_veras_darkshadowAI(Creature* creature) : boss_illidari_councilAI(creature) { Initialize(); } void Initialize() { DeadlyPoisonTimer = 20000; VanishTimer = urand(60, 121) * 1000; AppearEnvenomTimer = 150000; HasVanished = false; } uint32 DeadlyPoisonTimer; uint32 VanishTimer; uint32 AppearEnvenomTimer; bool HasVanished; void Reset() override { Initialize(); me->SetVisible(true); me->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); } void KilledUnit(Unit* /*victim*/) override { Talk(SAY_VERA_SLAY); } void JustDied(Unit* /*killer*/) override { Talk(SAY_VERA_DEATH); } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; if (!HasVanished) { if (DeadlyPoisonTimer <= diff) { DoCastVictim(SPELL_DEADLY_POISON); DeadlyPoisonTimer = urand(15, 46) * 1000; } else DeadlyPoisonTimer -= diff; if (AppearEnvenomTimer <= diff) // Cast Envenom. This is cast 4 seconds after Vanish is over { DoCastVictim(SPELL_ENVENOM); AppearEnvenomTimer = 90000; } else AppearEnvenomTimer -= diff; if (VanishTimer <= diff) // Disappear and stop attacking, but follow a random unit { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0)) { VanishTimer = 30000; AppearEnvenomTimer= 28000; HasVanished = true; me->SetVisible(false); me->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); DoResetThreat(); // Chase a unit. Check before DoMeleeAttackIfReady prevents from attacking me->AddThreat(target, 500000.0f); me->GetMotionMaster()->MoveChase(target); } } else VanishTimer -= diff; DoMeleeAttackIfReady(); } else { if (VanishTimer <= diff) // Become attackable and poison current target { Unit* target = me->GetVictim(); DoCast(target, SPELL_DEADLY_POISON); me->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); DoResetThreat(); me->AddThreat(target, 3000.0f); // Make Veras attack his target for a while, he will cast Envenom 4 seconds after. DeadlyPoisonTimer += 6000; VanishTimer = 90000; AppearEnvenomTimer = 4000; HasVanished = false; } else VanishTimer -= diff; if (AppearEnvenomTimer <= diff) // Appear 2 seconds before becoming attackable (Shifting out of vanish) { me->GetMotionMaster()->Clear(); me->GetMotionMaster()->MoveChase(me->GetVictim()); me->SetVisible(true); AppearEnvenomTimer = 6000; } else AppearEnvenomTimer -= diff; } } }; }; // SPELL_REFLECTIVE_SHIELD class spell_boss_lady_malande_shield : public SpellScriptLoader { public: spell_boss_lady_malande_shield() : SpellScriptLoader("spell_boss_lady_malande_shield") { } class spell_boss_lady_malande_shield_AuraScript : public AuraScript { PrepareAuraScript(spell_boss_lady_malande_shield_AuraScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return sSpellMgr->GetSpellInfo(SPELL_REFLECTIVE_SHIELD_T) != nullptr; } void Trigger(AuraEffect* aurEff, DamageInfo & dmgInfo, uint32 & absorbAmount) { Unit* target = GetTarget(); if (dmgInfo.GetAttacker() == target) return; int32 bp = absorbAmount / 2; target->CastCustomSpell(dmgInfo.GetAttacker(), SPELL_REFLECTIVE_SHIELD_T, &bp, NULL, NULL, true, NULL, aurEff); } void Register() override { AfterEffectAbsorb += AuraEffectAbsorbFn(spell_boss_lady_malande_shield_AuraScript::Trigger, EFFECT_0); } }; AuraScript* GetAuraScript() const override { return new spell_boss_lady_malande_shield_AuraScript(); } }; void AddSC_boss_illidari_council() { new npc_illidari_council(); new npc_blood_elf_council_voice_trigger(); new boss_gathios_the_shatterer(); new boss_lady_malande(); new boss_veras_darkshadow(); new boss_high_nethermancer_zerevor(); new spell_boss_lady_malande_shield(); }
gpl-2.0
iSerge/rc-controller
server/lighttpd-1.4.35/src/mod_cgi.c
5
35360
#include "server.h" #include "stat_cache.h" #include "keyvalue.h" #include "log.h" #include "connections.h" #include "joblist.h" #include "http_chunk.h" #include "plugin.h" #include <sys/types.h> #ifdef __WIN32 # include <winsock2.h> #else # include <sys/socket.h> # include <sys/wait.h> # include <sys/mman.h> # include <netinet/in.h> # include <arpa/inet.h> #endif #include <unistd.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include <fdevent.h> #include <signal.h> #include <ctype.h> #include <assert.h> #include <stdio.h> #include <fcntl.h> #ifdef HAVE_SYS_FILIO_H # include <sys/filio.h> #endif #include "version.h" enum {EOL_UNSET, EOL_N, EOL_RN}; typedef struct { char **ptr; size_t size; size_t used; } char_array; typedef struct { pid_t *ptr; size_t used; size_t size; } buffer_pid_t; typedef struct { array *cgi; unsigned short execute_x_only; } plugin_config; typedef struct { PLUGIN_DATA; buffer_pid_t cgi_pid; buffer *tmp_buf; buffer *parse_response; plugin_config **config_storage; plugin_config conf; } plugin_data; typedef struct { pid_t pid; int fd; int fde_ndx; /* index into the fd-event buffer */ connection *remote_conn; /* dumb pointer */ plugin_data *plugin_data; /* dumb pointer */ buffer *response; buffer *response_header; } handler_ctx; static handler_ctx * cgi_handler_ctx_init(void) { handler_ctx *hctx = calloc(1, sizeof(*hctx)); force_assert(hctx); hctx->response = buffer_init(); hctx->response_header = buffer_init(); return hctx; } static void cgi_handler_ctx_free(handler_ctx *hctx) { buffer_free(hctx->response); buffer_free(hctx->response_header); free(hctx); } enum {FDEVENT_HANDLED_UNSET, FDEVENT_HANDLED_FINISHED, FDEVENT_HANDLED_NOT_FINISHED, FDEVENT_HANDLED_ERROR}; INIT_FUNC(mod_cgi_init) { plugin_data *p; p = calloc(1, sizeof(*p)); force_assert(p); p->tmp_buf = buffer_init(); p->parse_response = buffer_init(); return p; } FREE_FUNC(mod_cgi_free) { plugin_data *p = p_d; buffer_pid_t *r = &(p->cgi_pid); UNUSED(srv); if (p->config_storage) { size_t i; for (i = 0; i < srv->config_context->used; i++) { plugin_config *s = p->config_storage[i]; array_free(s->cgi); free(s); } free(p->config_storage); } if (r->ptr) free(r->ptr); buffer_free(p->tmp_buf); buffer_free(p->parse_response); free(p); return HANDLER_GO_ON; } SETDEFAULTS_FUNC(mod_fastcgi_set_defaults) { plugin_data *p = p_d; size_t i = 0; config_values_t cv[] = { { "cgi.assign", NULL, T_CONFIG_ARRAY, T_CONFIG_SCOPE_CONNECTION }, /* 0 */ { "cgi.execute-x-only", NULL, T_CONFIG_BOOLEAN, T_CONFIG_SCOPE_CONNECTION }, /* 1 */ { NULL, NULL, T_CONFIG_UNSET, T_CONFIG_SCOPE_UNSET} }; if (!p) return HANDLER_ERROR; p->config_storage = calloc(1, srv->config_context->used * sizeof(plugin_config *)); for (i = 0; i < srv->config_context->used; i++) { plugin_config *s; s = calloc(1, sizeof(plugin_config)); force_assert(s); s->cgi = array_init(); s->execute_x_only = 0; cv[0].destination = s->cgi; cv[1].destination = &(s->execute_x_only); p->config_storage[i] = s; if (0 != config_insert_values_global(srv, ((data_config *)srv->config_context->data[i])->value, cv)) { return HANDLER_ERROR; } } return HANDLER_GO_ON; } static int cgi_pid_add(server *srv, plugin_data *p, pid_t pid) { int m = -1; size_t i; buffer_pid_t *r = &(p->cgi_pid); UNUSED(srv); for (i = 0; i < r->used; i++) { if (r->ptr[i] > m) m = r->ptr[i]; } if (r->size == 0) { r->size = 16; r->ptr = malloc(sizeof(*r->ptr) * r->size); } else if (r->used == r->size) { r->size += 16; r->ptr = realloc(r->ptr, sizeof(*r->ptr) * r->size); } r->ptr[r->used++] = pid; return m; } static int cgi_pid_del(server *srv, plugin_data *p, pid_t pid) { size_t i; buffer_pid_t *r = &(p->cgi_pid); UNUSED(srv); for (i = 0; i < r->used; i++) { if (r->ptr[i] == pid) break; } if (i != r->used) { /* found */ if (i != r->used - 1) { r->ptr[i] = r->ptr[r->used - 1]; } r->used--; } return 0; } static int cgi_response_parse(server *srv, connection *con, plugin_data *p, buffer *in) { char *ns; const char *s; int line = 0; UNUSED(srv); buffer_copy_string_buffer(p->parse_response, in); for (s = p->parse_response->ptr; NULL != (ns = strchr(s, '\n')); s = ns + 1, line++) { const char *key, *value; int key_len; data_string *ds; /* strip the \n */ ns[0] = '\0'; if (ns > s && ns[-1] == '\r') ns[-1] = '\0'; if (line == 0 && 0 == strncmp(s, "HTTP/1.", 7)) { /* non-parsed header ... we parse them anyway */ if ((s[7] == '1' || s[7] == '0') && s[8] == ' ') { int status; /* after the space should be a status code for us */ status = strtol(s+9, NULL, 10); if (status >= 100 && status < 1000) { /* we expected 3 digits and didn't got them */ con->parsed_response |= HTTP_STATUS; con->http_status = status; } } } else { /* parse the headers */ key = s; if (NULL == (value = strchr(s, ':'))) { /* we expect: "<key>: <value>\r\n" */ continue; } key_len = value - key; value += 1; /* skip LWS */ while (*value == ' ' || *value == '\t') value++; if (NULL == (ds = (data_string *)array_get_unused_element(con->response.headers, TYPE_STRING))) { ds = data_response_init(); } buffer_copy_string_len(ds->key, key, key_len); buffer_copy_string(ds->value, value); array_insert_unique(con->response.headers, (data_unset *)ds); switch(key_len) { case 4: if (0 == strncasecmp(key, "Date", key_len)) { con->parsed_response |= HTTP_DATE; } break; case 6: if (0 == strncasecmp(key, "Status", key_len)) { con->http_status = strtol(value, NULL, 10); con->parsed_response |= HTTP_STATUS; } break; case 8: if (0 == strncasecmp(key, "Location", key_len)) { con->parsed_response |= HTTP_LOCATION; } break; case 10: if (0 == strncasecmp(key, "Connection", key_len)) { con->response.keep_alive = (0 == strcasecmp(value, "Keep-Alive")) ? 1 : 0; con->parsed_response |= HTTP_CONNECTION; } break; case 14: if (0 == strncasecmp(key, "Content-Length", key_len)) { con->response.content_length = strtol(value, NULL, 10); con->parsed_response |= HTTP_CONTENT_LENGTH; } break; default: break; } } } /* CGI/1.1 rev 03 - 7.2.1.2 */ if ((con->parsed_response & HTTP_LOCATION) && !(con->parsed_response & HTTP_STATUS)) { con->http_status = 302; } return 0; } static int cgi_demux_response(server *srv, handler_ctx *hctx) { plugin_data *p = hctx->plugin_data; connection *con = hctx->remote_conn; while(1) { int n; int toread; #if defined(__WIN32) buffer_prepare_copy(hctx->response, 4 * 1024); #else if (ioctl(con->fd, FIONREAD, &toread) || toread == 0 || toread <= 4*1024) { buffer_prepare_copy(hctx->response, 4 * 1024); } else { if (toread > MAX_READ_LIMIT) toread = MAX_READ_LIMIT; buffer_prepare_copy(hctx->response, toread + 1); } #endif if (-1 == (n = read(hctx->fd, hctx->response->ptr, hctx->response->size - 1))) { if (errno == EAGAIN || errno == EINTR) { /* would block, wait for signal */ return FDEVENT_HANDLED_NOT_FINISHED; } /* error */ log_error_write(srv, __FILE__, __LINE__, "sdd", strerror(errno), con->fd, hctx->fd); return FDEVENT_HANDLED_ERROR; } if (n == 0) { /* read finished */ con->file_finished = 1; /* send final chunk */ http_chunk_append_mem(srv, con, NULL, 0); joblist_append(srv, con); return FDEVENT_HANDLED_FINISHED; } hctx->response->ptr[n] = '\0'; hctx->response->used = n+1; /* split header from body */ if (con->file_started == 0) { int is_header = 0; int is_header_end = 0; size_t last_eol = 0; size_t i; buffer_append_string_buffer(hctx->response_header, hctx->response); /** * we have to handle a few cases: * * nph: * * HTTP/1.0 200 Ok\n * Header: Value\n * \n * * CGI: * Header: Value\n * Status: 200\n * \n * * and different mixes of \n and \r\n combinations * * Some users also forget about CGI and just send a response and hope * we handle it. No headers, no header-content seperator * */ /* nph (non-parsed headers) */ if (0 == strncmp(hctx->response_header->ptr, "HTTP/1.", 7)) is_header = 1; for (i = 0; !is_header_end && i < hctx->response_header->used - 1; i++) { char c = hctx->response_header->ptr[i]; switch (c) { case ':': /* we found a colon * * looks like we have a normal header */ is_header = 1; break; case '\n': /* EOL */ if (is_header == 0) { /* we got a EOL but we don't seem to got a HTTP header */ is_header_end = 1; break; } /** * check if we saw a \n(\r)?\n sequence */ if (last_eol > 0 && ((i - last_eol == 1) || (i - last_eol == 2 && hctx->response_header->ptr[i - 1] == '\r'))) { is_header_end = 1; break; } last_eol = i; break; } } if (is_header_end) { if (!is_header) { /* no header, but a body */ if (con->request.http_version == HTTP_VERSION_1_1) { con->response.transfer_encoding = HTTP_TRANSFER_ENCODING_CHUNKED; } http_chunk_append_mem(srv, con, hctx->response_header->ptr, hctx->response_header->used); joblist_append(srv, con); } else { const char *bstart; size_t blen; /** * i still points to the char after the terminating EOL EOL * * put it on the last \n again */ i--; /* the body starts after the EOL */ bstart = hctx->response_header->ptr + (i + 1); blen = (hctx->response_header->used - 1) - (i + 1); /* string the last \r?\n */ if (i > 0 && (hctx->response_header->ptr[i - 1] == '\r')) { i--; } hctx->response_header->ptr[i] = '\0'; hctx->response_header->used = i + 1; /* the string + \0 */ /* parse the response header */ cgi_response_parse(srv, con, p, hctx->response_header); /* enable chunked-transfer-encoding */ if (con->request.http_version == HTTP_VERSION_1_1 && !(con->parsed_response & HTTP_CONTENT_LENGTH)) { con->response.transfer_encoding = HTTP_TRANSFER_ENCODING_CHUNKED; } if (blen > 0) { http_chunk_append_mem(srv, con, bstart, blen + 1); joblist_append(srv, con); } } con->file_started = 1; } } else { http_chunk_append_mem(srv, con, hctx->response->ptr, hctx->response->used); joblist_append(srv, con); } #if 0 log_error_write(srv, __FILE__, __LINE__, "ddss", con->fd, hctx->fd, connection_get_state(con->state), b->ptr); #endif } return FDEVENT_HANDLED_NOT_FINISHED; } static handler_t cgi_connection_close(server *srv, handler_ctx *hctx) { int status; pid_t pid; plugin_data *p; connection *con; if (NULL == hctx) return HANDLER_GO_ON; p = hctx->plugin_data; con = hctx->remote_conn; if (con->mode != p->id) return HANDLER_GO_ON; #ifndef __WIN32 /* the connection to the browser went away, but we still have a connection * to the CGI script * * close cgi-connection */ if (hctx->fd != -1) { /* close connection to the cgi-script */ fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd); fdevent_unregister(srv->ev, hctx->fd); if (close(hctx->fd)) { log_error_write(srv, __FILE__, __LINE__, "sds", "cgi close failed ", hctx->fd, strerror(errno)); } hctx->fd = -1; hctx->fde_ndx = -1; } pid = hctx->pid; con->plugin_ctx[p->id] = NULL; /* is this a good idea ? */ cgi_handler_ctx_free(hctx); /* if waitpid hasn't been called by response.c yet, do it here */ if (pid) { /* check if the CGI-script is already gone */ switch(waitpid(pid, &status, WNOHANG)) { case 0: /* not finished yet */ #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "(debug) child isn't done yet, pid:", pid); #endif break; case -1: /* */ if (errno == EINTR) break; /* * errno == ECHILD happens if _subrequest catches the process-status before * we have read the response of the cgi process * * -> catch status * -> WAIT_FOR_EVENT * -> read response * -> we get here with waitpid == ECHILD * */ if (errno == ECHILD) return HANDLER_GO_ON; log_error_write(srv, __FILE__, __LINE__, "ss", "waitpid failed: ", strerror(errno)); return HANDLER_ERROR; default: /* Send an error if we haven't sent any data yet */ if (0 == con->file_started) { connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); con->http_status = 500; con->mode = DIRECT; } else { con->file_finished = 1; } if (WIFEXITED(status)) { #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "(debug) cgi exited fine, pid:", pid); #endif return HANDLER_GO_ON; } else { log_error_write(srv, __FILE__, __LINE__, "sd", "cgi died, pid:", pid); return HANDLER_GO_ON; } } kill(pid, SIGTERM); /* cgi-script is still alive, queue the PID for removal */ cgi_pid_add(srv, p, pid); } #endif return HANDLER_GO_ON; } static handler_t cgi_connection_close_callback(server *srv, connection *con, void *p_d) { plugin_data *p = p_d; return cgi_connection_close(srv, con->plugin_ctx[p->id]); } static handler_t cgi_handle_fdevent(server *srv, void *ctx, int revents) { handler_ctx *hctx = ctx; connection *con = hctx->remote_conn; joblist_append(srv, con); if (hctx->fd == -1) { log_error_write(srv, __FILE__, __LINE__, "ddss", con->fd, hctx->fd, connection_get_state(con->state), "invalid cgi-fd"); return HANDLER_ERROR; } if (revents & FDEVENT_IN) { switch (cgi_demux_response(srv, hctx)) { case FDEVENT_HANDLED_NOT_FINISHED: break; case FDEVENT_HANDLED_FINISHED: /* we are done */ #if 0 log_error_write(srv, __FILE__, __LINE__, "ddss", con->fd, hctx->fd, connection_get_state(con->state), "finished"); #endif cgi_connection_close(srv, hctx); /* if we get a IN|HUP and have read everything don't exec the close twice */ return HANDLER_FINISHED; case FDEVENT_HANDLED_ERROR: /* Send an error if we haven't sent any data yet */ if (0 == con->file_started) { connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); con->http_status = 500; con->mode = DIRECT; } else { con->file_finished = 1; } log_error_write(srv, __FILE__, __LINE__, "s", "demuxer failed: "); break; } } if (revents & FDEVENT_OUT) { /* nothing to do */ } /* perhaps this issue is already handled */ if (revents & FDEVENT_HUP) { /* check if we still have a unfinished header package which is a body in reality */ if (con->file_started == 0 && hctx->response_header->used) { con->file_started = 1; http_chunk_append_mem(srv, con, hctx->response_header->ptr, hctx->response_header->used); joblist_append(srv, con); } if (con->file_finished == 0) { http_chunk_append_mem(srv, con, NULL, 0); joblist_append(srv, con); } con->file_finished = 1; if (chunkqueue_is_empty(con->write_queue)) { /* there is nothing left to write */ connection_set_state(srv, con, CON_STATE_RESPONSE_END); } else { /* used the write-handler to finish the request on demand */ } # if 0 log_error_write(srv, __FILE__, __LINE__, "sddd", "got HUP from cgi", con->fd, hctx->fd, revents); # endif /* rtsigs didn't liked the close */ cgi_connection_close(srv, hctx); } else if (revents & FDEVENT_ERR) { con->file_finished = 1; /* kill all connections to the cgi process */ cgi_connection_close(srv, hctx); #if 1 log_error_write(srv, __FILE__, __LINE__, "s", "cgi-FDEVENT_ERR"); #endif return HANDLER_ERROR; } return HANDLER_FINISHED; } static int cgi_env_add(char_array *env, const char *key, size_t key_len, const char *val, size_t val_len) { char *dst; if (!key || !val) return -1; dst = malloc(key_len + val_len + 2); memcpy(dst, key, key_len); dst[key_len] = '='; memcpy(dst + key_len + 1, val, val_len); dst[key_len + 1 + val_len] = '\0'; if (env->size == 0) { env->size = 16; env->ptr = malloc(env->size * sizeof(*env->ptr)); } else if (env->size == env->used) { env->size += 16; env->ptr = realloc(env->ptr, env->size * sizeof(*env->ptr)); } env->ptr[env->used++] = dst; return 0; } static int cgi_create_env(server *srv, connection *con, plugin_data *p, buffer *cgi_handler) { pid_t pid; #ifdef HAVE_IPV6 char b2[INET6_ADDRSTRLEN + 1]; #endif int to_cgi_fds[2]; int from_cgi_fds[2]; struct stat st; #ifndef __WIN32 if (cgi_handler->used > 1) { /* stat the exec file */ if (-1 == (stat(cgi_handler->ptr, &st))) { log_error_write(srv, __FILE__, __LINE__, "sbss", "stat for cgi-handler", cgi_handler, "failed:", strerror(errno)); return -1; } } if (pipe(to_cgi_fds)) { log_error_write(srv, __FILE__, __LINE__, "ss", "pipe failed:", strerror(errno)); return -1; } if (pipe(from_cgi_fds)) { close(to_cgi_fds[0]); close(to_cgi_fds[1]); log_error_write(srv, __FILE__, __LINE__, "ss", "pipe failed:", strerror(errno)); return -1; } /* fork, execve */ switch (pid = fork()) { case 0: { /* child */ char **args; int argc; int i = 0; char buf[32]; size_t n; char_array env; char *c; const char *s; server_socket *srv_sock = con->srv_socket; /* move stdout to from_cgi_fd[1] */ close(STDOUT_FILENO); dup2(from_cgi_fds[1], STDOUT_FILENO); close(from_cgi_fds[1]); /* not needed */ close(from_cgi_fds[0]); /* move the stdin to to_cgi_fd[0] */ close(STDIN_FILENO); dup2(to_cgi_fds[0], STDIN_FILENO); close(to_cgi_fds[0]); /* not needed */ close(to_cgi_fds[1]); /* create environment */ env.ptr = NULL; env.size = 0; env.used = 0; if (buffer_is_empty(con->conf.server_tag)) { cgi_env_add(&env, CONST_STR_LEN("SERVER_SOFTWARE"), CONST_STR_LEN(PACKAGE_DESC)); } else { cgi_env_add(&env, CONST_STR_LEN("SERVER_SOFTWARE"), CONST_BUF_LEN(con->conf.server_tag)); } if (!buffer_is_empty(con->server_name)) { size_t len = con->server_name->used - 1; if (con->server_name->ptr[0] == '[') { const char *colon = strstr(con->server_name->ptr, "]:"); if (colon) len = (colon + 1) - con->server_name->ptr; } else { const char *colon = strchr(con->server_name->ptr, ':'); if (colon) len = colon - con->server_name->ptr; } cgi_env_add(&env, CONST_STR_LEN("SERVER_NAME"), con->server_name->ptr, len); } else { #ifdef HAVE_IPV6 s = inet_ntop(srv_sock->addr.plain.sa_family, srv_sock->addr.plain.sa_family == AF_INET6 ? (const void *) &(srv_sock->addr.ipv6.sin6_addr) : (const void *) &(srv_sock->addr.ipv4.sin_addr), b2, sizeof(b2)-1); #else s = inet_ntoa(srv_sock->addr.ipv4.sin_addr); #endif cgi_env_add(&env, CONST_STR_LEN("SERVER_NAME"), s, strlen(s)); } cgi_env_add(&env, CONST_STR_LEN("GATEWAY_INTERFACE"), CONST_STR_LEN("CGI/1.1")); s = get_http_version_name(con->request.http_version); cgi_env_add(&env, CONST_STR_LEN("SERVER_PROTOCOL"), s, strlen(s)); LI_ltostr(buf, #ifdef HAVE_IPV6 ntohs(srv_sock->addr.plain.sa_family == AF_INET6 ? srv_sock->addr.ipv6.sin6_port : srv_sock->addr.ipv4.sin_port) #else ntohs(srv_sock->addr.ipv4.sin_port) #endif ); cgi_env_add(&env, CONST_STR_LEN("SERVER_PORT"), buf, strlen(buf)); switch (srv_sock->addr.plain.sa_family) { #ifdef HAVE_IPV6 case AF_INET6: s = inet_ntop(srv_sock->addr.plain.sa_family, (const void *) &(srv_sock->addr.ipv6.sin6_addr), b2, sizeof(b2)-1); break; case AF_INET: s = inet_ntop(srv_sock->addr.plain.sa_family, (const void *) &(srv_sock->addr.ipv4.sin_addr), b2, sizeof(b2)-1); break; #else case AF_INET: s = inet_ntoa(srv_sock->addr.ipv4.sin_addr); break; #endif default: s = ""; break; } cgi_env_add(&env, CONST_STR_LEN("SERVER_ADDR"), s, strlen(s)); s = get_http_method_name(con->request.http_method); cgi_env_add(&env, CONST_STR_LEN("REQUEST_METHOD"), s, strlen(s)); if (!buffer_is_empty(con->request.pathinfo)) { cgi_env_add(&env, CONST_STR_LEN("PATH_INFO"), CONST_BUF_LEN(con->request.pathinfo)); } cgi_env_add(&env, CONST_STR_LEN("REDIRECT_STATUS"), CONST_STR_LEN("200")); if (!buffer_is_empty(con->uri.query)) { cgi_env_add(&env, CONST_STR_LEN("QUERY_STRING"), CONST_BUF_LEN(con->uri.query)); } if (!buffer_is_empty(con->request.orig_uri)) { cgi_env_add(&env, CONST_STR_LEN("REQUEST_URI"), CONST_BUF_LEN(con->request.orig_uri)); } switch (con->dst_addr.plain.sa_family) { #ifdef HAVE_IPV6 case AF_INET6: s = inet_ntop(con->dst_addr.plain.sa_family, (const void *) &(con->dst_addr.ipv6.sin6_addr), b2, sizeof(b2)-1); break; case AF_INET: s = inet_ntop(con->dst_addr.plain.sa_family, (const void *) &(con->dst_addr.ipv4.sin_addr), b2, sizeof(b2)-1); break; #else case AF_INET: s = inet_ntoa(con->dst_addr.ipv4.sin_addr); break; #endif default: s = ""; break; } cgi_env_add(&env, CONST_STR_LEN("REMOTE_ADDR"), s, strlen(s)); LI_ltostr(buf, #ifdef HAVE_IPV6 ntohs(con->dst_addr.plain.sa_family == AF_INET6 ? con->dst_addr.ipv6.sin6_port : con->dst_addr.ipv4.sin_port) #else ntohs(con->dst_addr.ipv4.sin_port) #endif ); cgi_env_add(&env, CONST_STR_LEN("REMOTE_PORT"), buf, strlen(buf)); if (buffer_is_equal_caseless_string(con->uri.scheme, CONST_STR_LEN("https"))) { cgi_env_add(&env, CONST_STR_LEN("HTTPS"), CONST_STR_LEN("on")); } /* request.content_length < SSIZE_MAX, see request.c */ LI_ltostr(buf, con->request.content_length); cgi_env_add(&env, CONST_STR_LEN("CONTENT_LENGTH"), buf, strlen(buf)); cgi_env_add(&env, CONST_STR_LEN("SCRIPT_FILENAME"), CONST_BUF_LEN(con->physical.path)); cgi_env_add(&env, CONST_STR_LEN("SCRIPT_NAME"), CONST_BUF_LEN(con->uri.path)); cgi_env_add(&env, CONST_STR_LEN("DOCUMENT_ROOT"), CONST_BUF_LEN(con->physical.basedir)); /* for valgrind */ if (NULL != (s = getenv("LD_PRELOAD"))) { cgi_env_add(&env, CONST_STR_LEN("LD_PRELOAD"), s, strlen(s)); } if (NULL != (s = getenv("LD_LIBRARY_PATH"))) { cgi_env_add(&env, CONST_STR_LEN("LD_LIBRARY_PATH"), s, strlen(s)); } #ifdef __CYGWIN__ /* CYGWIN needs SYSTEMROOT */ if (NULL != (s = getenv("SYSTEMROOT"))) { cgi_env_add(&env, CONST_STR_LEN("SYSTEMROOT"), s, strlen(s)); } #endif for (n = 0; n < con->request.headers->used; n++) { data_string *ds; ds = (data_string *)con->request.headers->data[n]; if (ds->value->used && ds->key->used) { size_t j; buffer_reset(p->tmp_buf); if (0 != strcasecmp(ds->key->ptr, "CONTENT-TYPE")) { buffer_copy_string_len(p->tmp_buf, CONST_STR_LEN("HTTP_")); p->tmp_buf->used--; /* strip \0 after HTTP_ */ } buffer_prepare_append(p->tmp_buf, ds->key->used + 2); for (j = 0; j < ds->key->used - 1; j++) { char cr = '_'; if (light_isalpha(ds->key->ptr[j])) { /* upper-case */ cr = ds->key->ptr[j] & ~32; } else if (light_isdigit(ds->key->ptr[j])) { /* copy */ cr = ds->key->ptr[j]; } p->tmp_buf->ptr[p->tmp_buf->used++] = cr; } p->tmp_buf->ptr[p->tmp_buf->used++] = '\0'; cgi_env_add(&env, CONST_BUF_LEN(p->tmp_buf), CONST_BUF_LEN(ds->value)); } } for (n = 0; n < con->environment->used; n++) { data_string *ds; ds = (data_string *)con->environment->data[n]; if (ds->value->used && ds->key->used) { size_t j; buffer_reset(p->tmp_buf); buffer_prepare_append(p->tmp_buf, ds->key->used + 2); for (j = 0; j < ds->key->used - 1; j++) { char cr = '_'; if (light_isalpha(ds->key->ptr[j])) { /* upper-case */ cr = ds->key->ptr[j] & ~32; } else if (light_isdigit(ds->key->ptr[j])) { /* copy */ cr = ds->key->ptr[j]; } p->tmp_buf->ptr[p->tmp_buf->used++] = cr; } p->tmp_buf->ptr[p->tmp_buf->used++] = '\0'; cgi_env_add(&env, CONST_BUF_LEN(p->tmp_buf), CONST_BUF_LEN(ds->value)); } } if (env.size == env.used) { env.size += 16; env.ptr = realloc(env.ptr, env.size * sizeof(*env.ptr)); } env.ptr[env.used] = NULL; /* set up args */ argc = 3; args = malloc(sizeof(*args) * argc); i = 0; if (cgi_handler->used > 1) { args[i++] = cgi_handler->ptr; } args[i++] = con->physical.path->ptr; args[i ] = NULL; /* search for the last / */ if (NULL != (c = strrchr(con->physical.path->ptr, '/'))) { *c = '\0'; /* change to the physical directory */ if (-1 == chdir(con->physical.path->ptr)) { log_error_write(srv, __FILE__, __LINE__, "ssb", "chdir failed:", strerror(errno), con->physical.path); } *c = '/'; } /* we don't need the client socket */ for (i = 3; i < 256; i++) { if (i != srv->errorlog_fd) close(i); } /* exec the cgi */ execve(args[0], args, env.ptr); /* log_error_write(srv, __FILE__, __LINE__, "sss", "CGI failed:", strerror(errno), args[0]); */ /* */ SEGFAULT(); break; } case -1: /* error */ log_error_write(srv, __FILE__, __LINE__, "ss", "fork failed:", strerror(errno)); close(from_cgi_fds[0]); close(from_cgi_fds[1]); close(to_cgi_fds[0]); close(to_cgi_fds[1]); return -1; break; default: { handler_ctx *hctx; /* father */ close(from_cgi_fds[1]); close(to_cgi_fds[0]); if (con->request.content_length) { chunkqueue *cq = con->request_content_queue; chunk *c; assert(chunkqueue_length(cq) == (off_t)con->request.content_length); /* there is content to send */ for (c = cq->first; c; c = cq->first) { int r = 0; /* copy all chunks */ switch(c->type) { case FILE_CHUNK: if (c->file.mmap.start == MAP_FAILED) { if (-1 == c->file.fd && /* open the file if not already open */ -1 == (c->file.fd = open(c->file.name->ptr, O_RDONLY))) { log_error_write(srv, __FILE__, __LINE__, "ss", "open failed: ", strerror(errno)); close(from_cgi_fds[0]); close(to_cgi_fds[1]); return -1; } c->file.mmap.length = c->file.length; if (MAP_FAILED == (c->file.mmap.start = mmap(NULL, c->file.mmap.length, PROT_READ, MAP_SHARED, c->file.fd, 0))) { log_error_write(srv, __FILE__, __LINE__, "ssbd", "mmap failed: ", strerror(errno), c->file.name, c->file.fd); close(from_cgi_fds[0]); close(to_cgi_fds[1]); return -1; } close(c->file.fd); c->file.fd = -1; /* chunk_reset() or chunk_free() will cleanup for us */ } if ((r = write(to_cgi_fds[1], c->file.mmap.start + c->offset, c->file.length - c->offset)) < 0) { switch(errno) { case ENOSPC: con->http_status = 507; break; case EINTR: continue; default: con->http_status = 403; break; } } break; case MEM_CHUNK: if ((r = write(to_cgi_fds[1], c->mem->ptr + c->offset, c->mem->used - c->offset - 1)) < 0) { switch(errno) { case ENOSPC: con->http_status = 507; break; case EINTR: continue; default: con->http_status = 403; break; } } break; case UNUSED_CHUNK: break; } if (r > 0) { c->offset += r; cq->bytes_out += r; } else { log_error_write(srv, __FILE__, __LINE__, "ss", "write() failed due to: ", strerror(errno)); con->http_status = 500; break; } chunkqueue_remove_finished_chunks(cq); } } close(to_cgi_fds[1]); /* register PID and wait for them asyncronously */ con->mode = p->id; buffer_reset(con->physical.path); hctx = cgi_handler_ctx_init(); hctx->remote_conn = con; hctx->plugin_data = p; hctx->pid = pid; hctx->fd = from_cgi_fds[0]; hctx->fde_ndx = -1; con->plugin_ctx[p->id] = hctx; fdevent_register(srv->ev, hctx->fd, cgi_handle_fdevent, hctx); fdevent_event_set(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_IN); if (-1 == fdevent_fcntl_set(srv->ev, hctx->fd)) { log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed: ", strerror(errno)); fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd); fdevent_unregister(srv->ev, hctx->fd); log_error_write(srv, __FILE__, __LINE__, "sd", "cgi close:", hctx->fd); close(hctx->fd); cgi_handler_ctx_free(hctx); con->plugin_ctx[p->id] = NULL; return -1; } break; } } return 0; #else return -1; #endif } #define PATCH(x) \ p->conf.x = s->x; static int mod_cgi_patch_connection(server *srv, connection *con, plugin_data *p) { size_t i, j; plugin_config *s = p->config_storage[0]; PATCH(cgi); PATCH(execute_x_only); /* skip the first, the global context */ for (i = 1; i < srv->config_context->used; i++) { data_config *dc = (data_config *)srv->config_context->data[i]; s = p->config_storage[i]; /* condition didn't match */ if (!config_check_cond(srv, con, dc)) continue; /* merge config */ for (j = 0; j < dc->value->used; j++) { data_unset *du = dc->value->data[j]; if (buffer_is_equal_string(du->key, CONST_STR_LEN("cgi.assign"))) { PATCH(cgi); } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("cgi.execute-x-only"))) { PATCH(execute_x_only); } } } return 0; } #undef PATCH URIHANDLER_FUNC(cgi_is_handled) { size_t k, s_len; plugin_data *p = p_d; buffer *fn = con->physical.path; stat_cache_entry *sce = NULL; if (con->mode != DIRECT) return HANDLER_GO_ON; if (fn->used == 0) return HANDLER_GO_ON; mod_cgi_patch_connection(srv, con, p); if (HANDLER_ERROR == stat_cache_get_entry(srv, con, con->physical.path, &sce)) return HANDLER_GO_ON; if (!S_ISREG(sce->st.st_mode)) return HANDLER_GO_ON; if (p->conf.execute_x_only == 1 && (sce->st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) return HANDLER_GO_ON; s_len = fn->used - 1; for (k = 0; k < p->conf.cgi->used; k++) { data_string *ds = (data_string *)p->conf.cgi->data[k]; size_t ct_len = ds->key->used - 1; if (ds->key->used == 0) continue; if (s_len < ct_len) continue; if (0 == strncmp(fn->ptr + s_len - ct_len, ds->key->ptr, ct_len)) { if (cgi_create_env(srv, con, p, ds->value)) { con->mode = DIRECT; con->http_status = 500; buffer_reset(con->physical.path); return HANDLER_FINISHED; } /* one handler is enough for the request */ break; } } return HANDLER_GO_ON; } TRIGGER_FUNC(cgi_trigger) { plugin_data *p = p_d; size_t ndx; /* the trigger handle only cares about lonely PID which we have to wait for */ #ifndef __WIN32 for (ndx = 0; ndx < p->cgi_pid.used; ndx++) { int status; switch(waitpid(p->cgi_pid.ptr[ndx], &status, WNOHANG)) { case 0: /* not finished yet */ #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "(debug) child isn't done yet, pid:", p->cgi_pid.ptr[ndx]); #endif break; case -1: if (errno == ECHILD) { /* someone else called waitpid... remove the pid to stop looping the error each time */ log_error_write(srv, __FILE__, __LINE__, "s", "cgi child vanished, probably someone else called waitpid"); cgi_pid_del(srv, p, p->cgi_pid.ptr[ndx]); ndx--; continue; } log_error_write(srv, __FILE__, __LINE__, "ss", "waitpid failed: ", strerror(errno)); return HANDLER_ERROR; default: if (WIFEXITED(status)) { #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "(debug) cgi exited fine, pid:", p->cgi_pid.ptr[ndx]); #endif } else if (WIFSIGNALED(status)) { /* FIXME: what if we killed the CGI script with a kill(..., SIGTERM) ? */ if (WTERMSIG(status) != SIGTERM) { log_error_write(srv, __FILE__, __LINE__, "sd", "cleaning up CGI: process died with signal", WTERMSIG(status)); } } else { log_error_write(srv, __FILE__, __LINE__, "s", "cleaning up CGI: ended unexpectedly"); } cgi_pid_del(srv, p, p->cgi_pid.ptr[ndx]); /* del modified the buffer structure * and copies the last entry to the current one * -> recheck the current index */ ndx--; } } #endif return HANDLER_GO_ON; } /* * - HANDLER_GO_ON : not our job * - HANDLER_FINISHED: got response header * - HANDLER_WAIT_FOR_EVENT: waiting for response header */ SUBREQUEST_FUNC(mod_cgi_handle_subrequest) { int status; plugin_data *p = p_d; handler_ctx *hctx = con->plugin_ctx[p->id]; if (con->mode != p->id) return HANDLER_GO_ON; if (NULL == hctx) return HANDLER_GO_ON; #if 0 log_error_write(srv, __FILE__, __LINE__, "sdd", "subrequest, pid =", hctx, hctx->pid); #endif if (hctx->pid == 0) { /* cgi already dead */ if (!con->file_started) return HANDLER_WAIT_FOR_EVENT; return HANDLER_FINISHED; } #ifndef __WIN32 switch(waitpid(hctx->pid, &status, WNOHANG)) { case 0: /* we only have for events here if we don't have the header yet, * otherwise the event-handler will send us the incoming data */ if (con->file_started) return HANDLER_FINISHED; return HANDLER_WAIT_FOR_EVENT; case -1: if (errno == EINTR) return HANDLER_WAIT_FOR_EVENT; if (errno == ECHILD && con->file_started == 0) { /* * second round but still not response */ return HANDLER_WAIT_FOR_EVENT; } log_error_write(srv, __FILE__, __LINE__, "ss", "waitpid failed: ", strerror(errno)); con->mode = DIRECT; con->http_status = 500; hctx->pid = 0; fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd); fdevent_unregister(srv->ev, hctx->fd); if (close(hctx->fd)) { log_error_write(srv, __FILE__, __LINE__, "sds", "cgi close failed ", hctx->fd, strerror(errno)); } cgi_handler_ctx_free(hctx); con->plugin_ctx[p->id] = NULL; return HANDLER_FINISHED; default: /* cgi process exited */ hctx->pid = 0; /* we already have response headers? just continue */ if (con->file_started) return HANDLER_FINISHED; if (WIFEXITED(status)) { /* clean exit - just continue */ return HANDLER_WAIT_FOR_EVENT; } /* cgi proc died, and we didn't get any data yet - send error message and close cgi con */ log_error_write(srv, __FILE__, __LINE__, "s", "cgi died ?"); con->http_status = 500; con->mode = DIRECT; fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd); fdevent_unregister(srv->ev, hctx->fd); if (close(hctx->fd)) { log_error_write(srv, __FILE__, __LINE__, "sds", "cgi close failed ", hctx->fd, strerror(errno)); } cgi_handler_ctx_free(hctx); con->plugin_ctx[p->id] = NULL; return HANDLER_FINISHED; } #else return HANDLER_ERROR; #endif } int mod_cgi_plugin_init(plugin *p); int mod_cgi_plugin_init(plugin *p) { p->version = LIGHTTPD_VERSION_ID; p->name = buffer_init_string("cgi"); p->connection_reset = cgi_connection_close_callback; p->handle_subrequest_start = cgi_is_handled; p->handle_subrequest = mod_cgi_handle_subrequest; #if 0 p->handle_fdevent = cgi_handle_fdevent; #endif p->handle_trigger = cgi_trigger; p->init = mod_cgi_init; p->cleanup = mod_cgi_free; p->set_defaults = mod_fastcgi_set_defaults; p->data = NULL; return 0; }
gpl-2.0
fulcrum7/mq107-kernel
drivers/video/ti81xx/tlc59108/tlc59108_drv.c
5
4351
/* * tlc59108_drv.c * * Copyright (C) 2011 Texas Instruments * Author: Senthil Natarajan * * tlc59108 Backlight Driver * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. * History: * * Senthil Natarajan<senthil.n@ti.com> July 2011 I2C driver for tlc59108 * backlight control */ #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/backlight.h> #include <linux/fb.h> #define tlc59108_MODULE_NAME "tlc59108" #define TLC59108_MODE1 0x00 #define TLC59108_PWM2 0x04 #define TLC59108_LEDOUT0 0x0c #define TLC59108_LEDOUT1 0x0d #define TLC59108_MAX_BRIGHTNESS 0xFF struct tlc59108_bl { struct i2c_client *client; struct backlight_device *bl; }; static void tlc59108_bl_set_backlight(struct tlc59108_bl *data, int brightness) { /* Set Mode1 Register */ i2c_smbus_write_byte_data(data->client, TLC59108_MODE1, 0x00); /* Set LEDOUT0 Register */ i2c_smbus_write_byte_data(data->client, TLC59108_LEDOUT0, 0x21); /* Set Backlight Duty Cycle*/ i2c_smbus_write_byte_data(data->client, TLC59108_PWM2, brightness & 0xff); /* Set MODE3, UPDN*/ i2c_smbus_write_byte_data(data->client, TLC59108_LEDOUT1, 0x11); } static int tlc59108_bl_get_brightness(struct backlight_device *dev) { struct backlight_properties *props = &dev->props; return props->brightness; } static int tlc59108_bl_update_status(struct backlight_device *dev) { struct backlight_properties *props = &dev->props; struct tlc59108_bl *data = dev_get_drvdata(&dev->dev); int brightness = props->brightness; tlc59108_bl_set_backlight(data, brightness); return 0; } static const struct backlight_ops bl_ops = { .get_brightness = tlc59108_bl_get_brightness, .update_status = tlc59108_bl_update_status, }; static int tlc59108_probe(struct i2c_client *c, const struct i2c_device_id *id) { struct backlight_properties props; struct tlc59108_bl *data = kzalloc(sizeof(struct tlc59108_bl), GFP_KERNEL); int ret = 0; if (!data) return -ENOMEM; i2c_set_clientdata(c, data); data->client = c; memset(&props, 0, sizeof(struct backlight_properties)); props.max_brightness = TLC59108_MAX_BRIGHTNESS; data->bl = backlight_device_register("tlc59108-bl", &c->dev, data, &bl_ops, &props); if (IS_ERR(data->bl)) { ret = PTR_ERR(data->bl); goto err_reg; } data->bl->props.brightness = TLC59108_MAX_BRIGHTNESS; backlight_update_status(data->bl); return 0; err_reg: data->bl = NULL; kfree(data); return ret; } static int tlc59108_remove(struct i2c_client *c) { struct tlc59108_bl *data = i2c_get_clientdata(c); backlight_device_unregister(data->bl); data->bl = NULL; kfree(data); return 0; } static int tlc59108_bl_suspend(struct i2c_client *client, pm_message_t pm) { struct tlc59108_bl *data = i2c_get_clientdata(client); tlc59108_bl_set_backlight(data, 0); return 0; } static int tlc59108_bl_resume(struct i2c_client *client) { struct tlc59108_bl *data = i2c_get_clientdata(client); backlight_update_status(data->bl); return 0; } /* I2C Device ID table */ static const struct i2c_device_id tlc59108_id[] = { { "tlc59108", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tlc59108_id); /* I2C driver data */ static struct i2c_driver tlc59108_driver = { .driver = { .owner = THIS_MODULE, .name = tlc59108_MODULE_NAME, }, .probe = tlc59108_probe, .remove = tlc59108_remove, .suspend = tlc59108_bl_suspend, .resume = tlc59108_bl_resume, .id_table = tlc59108_id, }; static int __init tlc59108_init(void) { return i2c_add_driver(&tlc59108_driver); } static void __exit tlc59108_exit(void) { i2c_del_driver(&tlc59108_driver); } module_init(tlc59108_init); module_exit(tlc59108_exit); MODULE_DESCRIPTION("LCD/Backlight control for TLC59108"); MODULE_AUTHOR("Senthil Natarajan <senthil.n@ti.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
surkovalex/xbmc
xbmc/pictures/GUIWindowPictures.cpp
5
19056
/* * Copyright (C) 2005-2013 Team XBMC * http://xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "threads/SystemClock.h" #include "system.h" #include "GUIWindowPictures.h" #include "URL.h" #include "Util.h" #include "Application.h" #include "GUIPassword.h" #include "GUIDialogPictureInfo.h" #include "addons/GUIDialogAddonInfo.h" #include "dialogs/GUIDialogMediaSource.h" #include "dialogs/GUIDialogProgress.h" #include "playlists/PlayListFactory.h" #include "PictureInfoLoader.h" #include "guilib/GUIWindowManager.h" #include "input/Key.h" #include "dialogs/GUIDialogOK.h" #include "view/GUIViewState.h" #include "playlists/PlayList.h" #include "settings/MediaSourceSettings.h" #include "settings/Settings.h" #include "utils/log.h" #include "utils/URIUtils.h" #include "utils/Variant.h" #include "Autorun.h" #include "interfaces/AnnouncementManager.h" #include "utils/SortUtils.h" #include "utils/StringUtils.h" #include "GUIWindowSlideShow.h" #ifdef TARGET_POSIX #include "linux/XTimeUtils.h" #endif #define CONTROL_BTNVIEWASICONS 2 #define CONTROL_BTNSORTBY 3 #define CONTROL_BTNSORTASC 4 #define CONTROL_LABELFILES 12 using namespace XFILE; using namespace PLAYLIST; #define CONTROL_BTNSLIDESHOW 6 #define CONTROL_BTNSLIDESHOW_RECURSIVE 7 #define CONTROL_SHUFFLE 9 CGUIWindowPictures::CGUIWindowPictures(void) : CGUIMediaWindow(WINDOW_PICTURES, "MyPics.xml") { m_thumbLoader.SetObserver(this); m_slideShowStarted = false; m_dlgProgress = NULL; } void CGUIWindowPictures::OnInitWindow() { CGUIMediaWindow::OnInitWindow(); if (m_slideShowStarted) { CGUIWindowSlideShow* wndw = (CGUIWindowSlideShow*)g_windowManager.GetWindow(WINDOW_SLIDESHOW); std::string path; if (wndw && wndw->GetCurrentSlide()) path = URIUtils::GetDirectory(wndw->GetCurrentSlide()->GetPath()); if (m_vecItems->IsPath(path)) { if (wndw && wndw->GetCurrentSlide()) m_viewControl.SetSelectedItem(wndw->GetCurrentSlide()->GetPath()); SaveSelectedItemInHistory(); } m_slideShowStarted = false; } } CGUIWindowPictures::~CGUIWindowPictures(void) { } bool CGUIWindowPictures::OnMessage(CGUIMessage& message) { switch ( message.GetMessage() ) { case GUI_MSG_WINDOW_DEINIT: { if (m_thumbLoader.IsLoading()) m_thumbLoader.StopThread(); } break; case GUI_MSG_WINDOW_INIT: { // is this the first time accessing this window? if (m_vecItems->GetPath() == "?" && message.GetStringParam().empty()) message.SetStringParam(CMediaSourceSettings::GetInstance().GetDefaultSource("pictures")); m_dlgProgress = (CGUIDialogProgress*)g_windowManager.GetWindow(WINDOW_DIALOG_PROGRESS); if (!CGUIMediaWindow::OnMessage(message)) return false; return true; } break; case GUI_MSG_CLICKED: { int iControl = message.GetSenderId(); if (iControl == CONTROL_BTNSLIDESHOW) // Slide Show { OnSlideShow(); } else if (iControl == CONTROL_BTNSLIDESHOW_RECURSIVE) // Recursive Slide Show { OnSlideShowRecursive(); } else if (iControl == CONTROL_SHUFFLE) { CSettings::GetInstance().ToggleBool(CSettings::SETTING_SLIDESHOW_SHUFFLE); CSettings::GetInstance().Save(); } else if (m_viewControl.HasControl(iControl)) // list/thumb control { int iItem = m_viewControl.GetSelectedItem(); int iAction = message.GetParam1(); // iItem is checked for validity inside these routines if (iAction == ACTION_DELETE_ITEM) { // is delete allowed? if (CSettings::GetInstance().GetBool(CSettings::SETTING_FILELISTS_ALLOWFILEDELETION)) OnDeleteItem(iItem); else return false; } else if (iAction == ACTION_PLAYER_PLAY) { ShowPicture(iItem, true); return true; } else if (iAction == ACTION_SHOW_INFO) { OnItemInfo(iItem); return true; } } } break; } return CGUIMediaWindow::OnMessage(message); } void CGUIWindowPictures::UpdateButtons() { CGUIMediaWindow::UpdateButtons(); // Update the shuffle button SET_CONTROL_SELECTED(GetID(), CONTROL_SHUFFLE, CSettings::GetInstance().GetBool(CSettings::SETTING_SLIDESHOW_SHUFFLE)); // check we can slideshow or recursive slideshow int nFolders = m_vecItems->GetFolderCount(); if (nFolders == m_vecItems->Size() || m_vecItems->GetPath() == "addons://sources/image/") { CONTROL_DISABLE(CONTROL_BTNSLIDESHOW); } else { CONTROL_ENABLE(CONTROL_BTNSLIDESHOW); } if (m_guiState.get() && !m_guiState->HideParentDirItems()) nFolders--; if (m_vecItems->Size() == 0 || nFolders == 0 || m_vecItems->GetPath() == "addons://sources/image/") { CONTROL_DISABLE(CONTROL_BTNSLIDESHOW_RECURSIVE); } else { CONTROL_ENABLE(CONTROL_BTNSLIDESHOW_RECURSIVE); } } void CGUIWindowPictures::OnPrepareFileItems(CFileItemList& items) { CGUIMediaWindow::OnPrepareFileItems(items); for (int i=0;i<items.Size();++i ) if (StringUtils::EqualsNoCase(items[i]->GetLabel(), "folder.jpg")) items.Remove(i); if (items.GetFolderCount() == items.Size()) return; // Start the music info loader thread CPictureInfoLoader loader; loader.SetProgressCallback(m_dlgProgress); loader.Load(items); bool bShowProgress=!g_windowManager.HasModalDialog(); bool bProgressVisible=false; unsigned int tick=XbmcThreads::SystemClockMillis(); while (loader.IsLoading() && m_dlgProgress && !m_dlgProgress->IsCanceled()) { if (bShowProgress) { // Do we have to init a progress dialog? unsigned int elapsed=XbmcThreads::SystemClockMillis()-tick; if (!bProgressVisible && elapsed>1500 && m_dlgProgress) { // tag loading takes more then 1.5 secs, show a progress dialog CURL url(items.GetPath()); m_dlgProgress->SetHeading(CVariant{189}); m_dlgProgress->SetLine(0, CVariant{505}); m_dlgProgress->SetLine(1, CVariant{""}); m_dlgProgress->SetLine(2, CVariant{url.GetWithoutUserDetails()}); m_dlgProgress->Open(); m_dlgProgress->ShowProgressBar(true); bProgressVisible = true; } if (bProgressVisible && m_dlgProgress) { // keep GUI alive m_dlgProgress->Progress(); } } // if (bShowProgress) Sleep(1); } // while (loader.IsLoading()) if (bProgressVisible && m_dlgProgress) m_dlgProgress->Close(); } bool CGUIWindowPictures::Update(const std::string &strDirectory, bool updateFilterPath /* = true */) { if (m_thumbLoader.IsLoading()) m_thumbLoader.StopThread(); if (!CGUIMediaWindow::Update(strDirectory, updateFilterPath)) return false; m_vecItems->SetArt("thumb", ""); if (CSettings::GetInstance().GetBool(CSettings::SETTING_PICTURES_GENERATETHUMBS)) m_thumbLoader.Load(*m_vecItems); CPictureThumbLoader thumbLoader; std::string thumb = thumbLoader.GetCachedImage(*m_vecItems, "thumb"); m_vecItems->SetArt("thumb", thumb); return true; } bool CGUIWindowPictures::OnClick(int iItem, const std::string &player) { if ( iItem < 0 || iItem >= (int)m_vecItems->Size() ) return true; CFileItemPtr pItem = m_vecItems->Get(iItem); if (pItem->IsCBZ() || pItem->IsCBR()) { CURL pathToUrl; if (pItem->IsCBZ()) pathToUrl = URIUtils::CreateArchivePath("zip", pItem->GetURL(), ""); else pathToUrl = URIUtils::CreateArchivePath("rar", pItem->GetURL(), ""); OnShowPictureRecursive(pathToUrl.Get()); return true; } else if (CGUIMediaWindow::OnClick(iItem, player)) return true; return false; } bool CGUIWindowPictures::GetDirectory(const std::string &strDirectory, CFileItemList& items) { if (!CGUIMediaWindow::GetDirectory(strDirectory, items)) return false; std::string label; if (items.GetLabel().empty() && m_rootDir.IsSource(items.GetPath(), CMediaSourceSettings::GetInstance().GetSources("pictures"), &label)) items.SetLabel(label); if (items.GetContent().empty() && !items.IsVirtualDirectoryRoot() && !items.IsPlugin()) items.SetContent("images"); return true; } bool CGUIWindowPictures::OnPlayMedia(int iItem, const std::string &player) { if (m_vecItems->Get(iItem)->IsVideo()) return CGUIMediaWindow::OnPlayMedia(iItem); return ShowPicture(iItem, false); } bool CGUIWindowPictures::ShowPicture(int iItem, bool startSlideShow) { if ( iItem < 0 || iItem >= (int)m_vecItems->Size() ) return false; CFileItemPtr pItem = m_vecItems->Get(iItem); std::string strPicture = pItem->GetPath(); #ifdef HAS_DVD_DRIVE if (pItem->IsDVD()) return MEDIA_DETECT::CAutorun::PlayDiscAskResume(m_vecItems->Get(iItem)->GetPath()); #endif if (pItem->m_bIsShareOrDrive) return false; CGUIWindowSlideShow *pSlideShow = (CGUIWindowSlideShow *)g_windowManager.GetWindow(WINDOW_SLIDESHOW); if (!pSlideShow) return false; if (g_application.m_pPlayer->IsPlayingVideo()) g_application.StopPlaying(); pSlideShow->Reset(); for (int i = 0; i < (int)m_vecItems->Size();++i) { CFileItemPtr pItem = m_vecItems->Get(i); if (!pItem->m_bIsFolder && !(URIUtils::IsRAR(pItem->GetPath()) || URIUtils::IsZIP(pItem->GetPath())) && (pItem->IsPicture() || ( CSettings::GetInstance().GetBool(CSettings::SETTING_PICTURES_SHOWVIDEOS) && pItem->IsVideo()))) { pSlideShow->Add(pItem.get()); } } if (pSlideShow->NumSlides() == 0) return false; pSlideShow->Select(strPicture); if (startSlideShow) pSlideShow->StartSlideShow(); else { CVariant param; param["player"]["speed"] = 1; param["player"]["playerid"] = PLAYLIST_PICTURE; ANNOUNCEMENT::CAnnouncementManager::GetInstance().Announce(ANNOUNCEMENT::Player, "xbmc", "OnPlay", pSlideShow->GetCurrentSlide(), param); } m_slideShowStarted = true; g_windowManager.ActivateWindow(WINDOW_SLIDESHOW); return true; } void CGUIWindowPictures::OnShowPictureRecursive(const std::string& strPath) { CGUIWindowSlideShow *pSlideShow = (CGUIWindowSlideShow *)g_windowManager.GetWindow(WINDOW_SLIDESHOW); if (pSlideShow) { // stop any video if (g_application.m_pPlayer->IsPlayingVideo()) g_application.StopPlaying(); SortDescription sorting = m_guiState->GetSortMethod(); pSlideShow->AddFromPath(strPath, true, sorting.sortBy, sorting.sortOrder, sorting.sortAttributes); if (pSlideShow->NumSlides()) { m_slideShowStarted = true; g_windowManager.ActivateWindow(WINDOW_SLIDESHOW); } } } void CGUIWindowPictures::OnSlideShowRecursive(const std::string &strPicture) { CGUIWindowSlideShow *pSlideShow = (CGUIWindowSlideShow *)g_windowManager.GetWindow(WINDOW_SLIDESHOW); if (pSlideShow) { std::string strExtensions; CFileItemList items; CGUIViewState* viewState=CGUIViewState::GetViewState(GetID(), items); if (viewState) { strExtensions = viewState->GetExtensions(); delete viewState; } m_slideShowStarted = true; SortDescription sorting = m_guiState->GetSortMethod(); pSlideShow->RunSlideShow(strPicture, true, CSettings::GetInstance().GetBool(CSettings::SETTING_SLIDESHOW_SHUFFLE),false, "", true, sorting.sortBy, sorting.sortOrder, sorting.sortAttributes, strExtensions); } } void CGUIWindowPictures::OnSlideShowRecursive() { std::string strEmpty = ""; OnSlideShowRecursive(m_vecItems->GetPath()); } void CGUIWindowPictures::OnSlideShow() { OnSlideShow(m_vecItems->GetPath()); } void CGUIWindowPictures::OnSlideShow(const std::string &strPicture) { CGUIWindowSlideShow *pSlideShow = (CGUIWindowSlideShow *)g_windowManager.GetWindow(WINDOW_SLIDESHOW); if (pSlideShow) { std::string strExtensions; CFileItemList items; CGUIViewState* viewState=CGUIViewState::GetViewState(GetID(), items); if (viewState) { strExtensions = viewState->GetExtensions(); delete viewState; } m_slideShowStarted = true; SortDescription sorting = m_guiState->GetSortMethod(); pSlideShow->RunSlideShow(strPicture, false ,false, false, "", true, sorting.sortBy, sorting.sortOrder, sorting.sortAttributes, strExtensions); } } void CGUIWindowPictures::OnRegenerateThumbs() { if (m_thumbLoader.IsLoading()) return; m_thumbLoader.SetRegenerateThumbs(true); m_thumbLoader.Load(*m_vecItems); } void CGUIWindowPictures::GetContextButtons(int itemNumber, CContextButtons &buttons) { CFileItemPtr item; if (itemNumber >= 0 && itemNumber < m_vecItems->Size()) item = m_vecItems->Get(itemNumber); if (item) { if ( m_vecItems->IsVirtualDirectoryRoot() || m_vecItems->GetPath() == "sources://pictures/" ) { CGUIDialogContextMenu::GetContextButtons("pictures", item, buttons); } else { if (item) { if (!(item->m_bIsFolder || item->IsZIP() || item->IsRAR() || item->IsCBZ() || item->IsCBR() || item->IsScript())) { buttons.Add(CONTEXT_BUTTON_INFO, 13406); // picture info buttons.Add(CONTEXT_BUTTON_VIEW_SLIDESHOW, item->m_bIsFolder ? 13317 : 13422); // View Slideshow } if (item->m_bIsFolder) buttons.Add(CONTEXT_BUTTON_RECURSIVE_SLIDESHOW, 13318); // Recursive Slideshow if (!m_thumbLoader.IsLoading()) buttons.Add(CONTEXT_BUTTON_REFRESH_THUMBS, 13315); // Create Thumbnails if (CSettings::GetInstance().GetBool(CSettings::SETTING_FILELISTS_ALLOWFILEDELETION) && !item->IsReadOnly()) { buttons.Add(CONTEXT_BUTTON_DELETE, 117); buttons.Add(CONTEXT_BUTTON_RENAME, 118); } } if (!item->IsPlugin() && !item->IsScript() && !m_vecItems->IsPlugin()) buttons.Add(CONTEXT_BUTTON_SWITCH_MEDIA, 523); } } CGUIMediaWindow::GetContextButtons(itemNumber, buttons); } bool CGUIWindowPictures::OnContextButton(int itemNumber, CONTEXT_BUTTON button) { CFileItemPtr item = (itemNumber >= 0 && itemNumber < m_vecItems->Size()) ? m_vecItems->Get(itemNumber) : CFileItemPtr(); if (CGUIDialogContextMenu::OnContextButton("pictures", item, button)) { Update(""); return true; } switch (button) { case CONTEXT_BUTTON_VIEW_SLIDESHOW: if (item && item->m_bIsFolder) OnSlideShow(item->GetPath()); else ShowPicture(itemNumber, true); return true; case CONTEXT_BUTTON_RECURSIVE_SLIDESHOW: if (item) OnSlideShowRecursive(item->GetPath()); return true; case CONTEXT_BUTTON_INFO: OnItemInfo(itemNumber); return true; case CONTEXT_BUTTON_REFRESH_THUMBS: OnRegenerateThumbs(); return true; case CONTEXT_BUTTON_DELETE: OnDeleteItem(itemNumber); return true; case CONTEXT_BUTTON_RENAME: OnRenameItem(itemNumber); return true; case CONTEXT_BUTTON_SWITCH_MEDIA: CGUIDialogContextMenu::SwitchMedia("pictures", m_vecItems->GetPath()); return true; default: break; } return CGUIMediaWindow::OnContextButton(itemNumber, button); } bool CGUIWindowPictures::OnAddMediaSource() { return CGUIDialogMediaSource::ShowAndAddMediaSource("pictures"); } void CGUIWindowPictures::OnItemLoaded(CFileItem *pItem) { CPictureThumbLoader::ProcessFoldersAndArchives(pItem); } void CGUIWindowPictures::LoadPlayList(const std::string& strPlayList) { CLog::Log(LOGDEBUG,"CGUIWindowPictures::LoadPlayList()... converting playlist into slideshow: %s", strPlayList.c_str()); std::unique_ptr<CPlayList> pPlayList (CPlayListFactory::Create(strPlayList)); if ( NULL != pPlayList.get()) { if (!pPlayList->Load(strPlayList)) { CGUIDialogOK::ShowAndGetInput(CVariant{6}, CVariant{477}); return ; //hmmm unable to load playlist? } } CPlayList playlist = *pPlayList; if (playlist.size() > 0) { // set up slideshow CGUIWindowSlideShow *pSlideShow = (CGUIWindowSlideShow *)g_windowManager.GetWindow(WINDOW_SLIDESHOW); if (!pSlideShow) return; if (g_application.m_pPlayer->IsPlayingVideo()) g_application.StopPlaying(); // convert playlist items into slideshow items pSlideShow->Reset(); for (int i = 0; i < (int)playlist.size(); ++i) { CFileItemPtr pItem = playlist[i]; //CLog::Log(LOGDEBUG,"-- playlist item: %s", pItem->GetPath().c_str()); if (pItem->IsPicture() && !(pItem->IsZIP() || pItem->IsRAR() || pItem->IsCBZ() || pItem->IsCBR())) pSlideShow->Add(pItem.get()); } // start slideshow if there are items pSlideShow->StartSlideShow(); if (pSlideShow->NumSlides()) g_windowManager.ActivateWindow(WINDOW_SLIDESHOW); } } void CGUIWindowPictures::OnItemInfo(int itemNumber) { CFileItemPtr item = (itemNumber >= 0 && itemNumber < m_vecItems->Size()) ? m_vecItems->Get(itemNumber) : CFileItemPtr(); if (!item) return; if (!m_vecItems->IsPlugin() && (item->IsPlugin() || item->IsScript())) { CGUIDialogAddonInfo::ShowForItem(item); return; } if (item->m_bIsFolder || item->IsZIP() || item->IsRAR() || item->IsCBZ() || item->IsCBR() || !item->IsPicture()) return; CGUIDialogPictureInfo *pictureInfo = (CGUIDialogPictureInfo *)g_windowManager.GetWindow(WINDOW_DIALOG_PICTURE_INFO); if (pictureInfo) { pictureInfo->SetPicture(item.get()); pictureInfo->Open(); } } std::string CGUIWindowPictures::GetStartFolder(const std::string &dir) { std::string lower(dir); StringUtils::ToLower(lower); if (lower == "plugins" || lower == "addons") return "addons://sources/image/"; SetupShares(); VECSOURCES shares; m_rootDir.GetSources(shares); bool bIsSourceName = false; int iIndex = CUtil::GetMatchingSource(dir, shares, bIsSourceName); if (iIndex > -1) { if (iIndex < (int)shares.size() && shares[iIndex].m_iHasLock == 2) { CFileItem item(shares[iIndex]); if (!g_passwordManager.IsItemUnlocked(&item,"pictures")) return ""; } if (bIsSourceName) return shares[iIndex].strPath; return dir; } return CGUIMediaWindow::GetStartFolder(dir); }
gpl-2.0
samno1607/CyanPlus
drivers/cosmo/subpmic/lm3530.c
5
2625
#include <mach/lm3530.h> #define MODULE_NAME "LM3530" //#define DEBUG #ifdef DEBUG #define DBG(fmt, args...) \ printk(KERN_ERR "[%s] %s(%d): \n" \ fmt, MODULE_NAME, __func__, __LINE__, ## args); #else /* DEBUG */ #define DBG(...) #endif static int lm3530_read_byte(struct lm3530_private_data* pdata, int reg) { int ret; DBG(); mutex_lock(&pdata->update_lock); ret = i2c_smbus_read_byte_data(pdata->client, reg); mutex_unlock(&pdata->update_lock); return ret; } static int lm3530_write_byte(struct lm3530_private_data* pdata, int reg, int value) { int ret; DBG(); mutex_lock(&pdata->update_lock); ret = i2c_smbus_write_byte_data(pdata->client, reg, value); mutex_unlock(&pdata->update_lock); return ret; } static void lm3530_store(struct lm3530_private_data* pdata) { DBG(); lm3530_write_byte(pdata, LM3530_REG_GP, pdata->reg_gp); lm3530_write_byte(pdata, LM3530_REG_BRR, pdata->reg_brr); lm3530_write_byte(pdata, LM3530_REG_BRT, pdata->reg_brt); } static void lm3530_load(struct lm3530_private_data* pdata) { DBG(); pdata->reg_gp = lm3530_read_byte(pdata, LM3530_REG_GP); pdata->reg_brr = lm3530_read_byte(pdata, LM3530_REG_BRR); pdata->reg_brt = lm3530_read_byte(pdata, LM3530_REG_BRT); } int lm3530_set_hwen(struct lm3530_private_data* pdata, int gpio, int status) { DBG(); if (status == 0) { printk("[LM3530] backlight OFF!!!!! lm3530_set_hwen status = 0\n"); lm3530_load(pdata); gpio_set_value(gpio, 0); return 0; } printk("[LM3530] backlight ON!!!! lm3530_set_hwen status = 1\n"); gpio_set_value(gpio, 1); lm3530_store(pdata); return 1; } int lm3530_get_hwen(struct lm3530_private_data* pdata, int gpio) { DBG(); return gpio_get_value(gpio); } int lm3530_set_brightness_control(struct lm3530_private_data* pdata, int val) { DBG(); if ((val < 0) || (val > 255)) return -EINVAL; return lm3530_write_byte(pdata, LM3530_REG_BRT, val); } int lm3530_get_brightness_control(struct lm3530_private_data* pdata) { int val; DBG(); val = lm3530_read_byte(pdata, LM3530_REG_BRT); if (val < 0) return val; return (val & LM3530_BMASK); } int lm3530_init(struct lm3530_private_data* pdata, struct i2c_client* client) { DBG(); mutex_init(&pdata->update_lock); pdata->client = client; lm3530_load(pdata); return 0; } EXPORT_SYMBOL(lm3530_init); EXPORT_SYMBOL(lm3530_set_hwen); EXPORT_SYMBOL(lm3530_get_hwen); EXPORT_SYMBOL(lm3530_set_brightness_control); EXPORT_SYMBOL(lm3530_get_brightness_control); MODULE_AUTHOR("LG Electronics (dongjin73.kim@lge.com)"); MODULE_DESCRIPTION("Multi Display LED driver"); MODULE_LICENSE("GPL");
gpl-2.0
longturn/freeciv-S2_5
client/gui-gtk-2.0/gotodlg.c
5
16751
/********************************************************************** Freeciv - Copyright (C) 1996 - A Kjeldberg, L Gregersen, P Unold This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. ***********************************************************************/ #ifdef HAVE_CONFIG_H #include <fc_config.h> #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <gtk/gtk.h> #include <gdk/gdkkeysyms.h> /* utility */ #include "astring.h" #include "fcintl.h" #include "log.h" #include "support.h" /* common */ #include "game.h" #include "map.h" #include "packets.h" #include "player.h" #include "unit.h" #include "unitlist.h" /* client */ #include "client_main.h" #include "control.h" #include "goto.h" #include "options.h" #include "text.h" /* clien/gui-gtk-2.0 */ #include "plrdlg.h" #include "dialogs.h" #include "gui_main.h" #include "gui_stuff.h" #include "mapview.h" #include "gotodlg.h" static GtkWidget *dshell = NULL; static GtkWidget *view; static GtkWidget *source; static GtkWidget *all_toggle; static GtkListStore *store; static GtkTreeSelection *selection; struct tile *original_tile; static void update_goto_dialog(GtkToggleButton *button); static void update_source_label(void); static void refresh_airlift_column(void); static void refresh_airlift_button(void); static void goto_selection_callback(GtkTreeSelection *selection, gpointer data); static struct city *get_selected_city(void); enum { CMD_AIRLIFT = 1, CMD_GOTO }; enum { GD_COL_CITY_ID = 0, /* Not shown if not compiled with --enable-debug. */ GD_COL_CITY_NAME, GD_COL_FLAG, GD_COL_NATION, GD_COL_AIRLIFT, GD_COL_NUM }; /************************************************************************** User has responded to goto dialog **************************************************************************/ static void goto_cmd_callback(GtkWidget *dlg, gint arg) { switch (arg) { case GTK_RESPONSE_CANCEL: center_tile_mapcanvas(original_tile); break; case CMD_AIRLIFT: { struct city *pdestcity = get_selected_city(); if (pdestcity) { unit_list_iterate(get_units_in_focus(), punit) { if (unit_can_airlift_to(punit, pdestcity)) { request_unit_airlift(punit, pdestcity); } } unit_list_iterate_end; } } break; case CMD_GOTO: { struct city *pdestcity = get_selected_city(); if (pdestcity) { unit_list_iterate(get_units_in_focus(), punit) { send_goto_tile(punit, pdestcity->tile); } unit_list_iterate_end; } } break; default: break; } gtk_widget_destroy(dlg); dshell = NULL; } /************************************************************************** Create goto -dialog for gotoing or airlifting unit **************************************************************************/ static void create_goto_dialog(void) { GtkWidget *sw, *label, *frame, *vbox; GtkCellRenderer *rend; GtkTreeViewColumn *col; dshell = gtk_dialog_new_with_buttons(_("Goto/Airlift Unit"), NULL, 0, GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL, _("Air_lift"), CMD_AIRLIFT, _("_Goto"), CMD_GOTO, NULL); setup_dialog(dshell, toplevel); gtk_window_set_position(GTK_WINDOW(dshell), GTK_WIN_POS_MOUSE); gtk_dialog_set_default_response(GTK_DIALOG(dshell), CMD_GOTO); g_signal_connect(dshell, "destroy", G_CALLBACK(gtk_widget_destroyed), &dshell); g_signal_connect(dshell, "response", G_CALLBACK(goto_cmd_callback), NULL); source = gtk_label_new("" /* filled in later */); gtk_label_set_line_wrap(GTK_LABEL(source), TRUE); gtk_label_set_justify(GTK_LABEL(source), GTK_JUSTIFY_CENTER); gtk_box_pack_start(GTK_BOX(GTK_DIALOG(dshell)->vbox), source, FALSE, FALSE, 0); label = g_object_new(GTK_TYPE_LABEL, "use-underline", TRUE, "label", _("Select destination ci_ty"), "xalign", 0.0, "yalign", 0.5, NULL); frame = gtk_frame_new(""); gtk_frame_set_label_widget(GTK_FRAME(frame), label); gtk_box_pack_start(GTK_BOX(GTK_DIALOG(dshell)->vbox), frame, TRUE, TRUE, 0); vbox = gtk_vbox_new(FALSE, 6); gtk_container_add(GTK_CONTAINER(frame), vbox); store = gtk_list_store_new(GD_COL_NUM, G_TYPE_INT, G_TYPE_STRING, GDK_TYPE_PIXBUF, G_TYPE_STRING, G_TYPE_STRING); gtk_tree_sortable_set_sort_column_id(GTK_TREE_SORTABLE(store), GD_COL_CITY_NAME, GTK_SORT_ASCENDING); view = gtk_tree_view_new_with_model(GTK_TREE_MODEL(store)); g_object_unref(store); selection = gtk_tree_view_get_selection(GTK_TREE_VIEW(view)); gtk_tree_view_set_headers_visible(GTK_TREE_VIEW(view), TRUE); gtk_tree_view_set_search_column(GTK_TREE_VIEW(view), GD_COL_CITY_NAME); gtk_tree_view_set_enable_search(GTK_TREE_VIEW(view), TRUE); /* Set the mnemonic in the frame label to focus the city list */ gtk_label_set_mnemonic_widget(GTK_LABEL(label), view); #ifdef DEBUG rend = gtk_cell_renderer_text_new(); col = gtk_tree_view_column_new_with_attributes(_("Id"), rend, "text", GD_COL_CITY_ID, NULL); gtk_tree_view_append_column(GTK_TREE_VIEW(view), col); gtk_tree_view_column_set_sort_column_id(col, GD_COL_CITY_ID); #endif /* DEBUG */ rend = gtk_cell_renderer_text_new(); col = gtk_tree_view_column_new_with_attributes(_("City"), rend, "text", GD_COL_CITY_NAME, NULL); gtk_tree_view_append_column(GTK_TREE_VIEW(view), col); gtk_tree_view_column_set_sort_column_id(col, GD_COL_CITY_NAME); rend = gtk_cell_renderer_pixbuf_new(); col = gtk_tree_view_column_new_with_attributes(NULL, rend, "pixbuf", GD_COL_FLAG, NULL); gtk_tree_view_append_column(GTK_TREE_VIEW(view), col); rend = gtk_cell_renderer_text_new(); col = gtk_tree_view_column_new_with_attributes(_("Nation"), rend, "text", GD_COL_NATION, NULL); gtk_tree_view_append_column(GTK_TREE_VIEW(view), col); gtk_tree_view_column_set_sort_column_id(col, GD_COL_NATION); rend = gtk_cell_renderer_text_new(); col = gtk_tree_view_column_new_with_attributes(_("Airlift"), rend, "text", GD_COL_AIRLIFT, NULL); gtk_tree_view_append_column(GTK_TREE_VIEW(view), col); gtk_tree_view_column_set_sort_column_id(col, GD_COL_AIRLIFT); sw = gtk_scrolled_window_new(NULL, NULL); gtk_container_add(GTK_CONTAINER(sw), view); gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(sw), GTK_POLICY_NEVER, GTK_POLICY_ALWAYS); gtk_widget_set_size_request(sw, -1, 200); gtk_box_pack_start(GTK_BOX(vbox), sw, TRUE, TRUE, 0); all_toggle = gtk_check_button_new_with_mnemonic(_("Show _All Cities")); gtk_box_pack_start(GTK_BOX(vbox), all_toggle, FALSE, FALSE, 0); g_signal_connect(all_toggle, "toggled", G_CALLBACK(update_goto_dialog), NULL); g_signal_connect(selection, "changed", G_CALLBACK(goto_selection_callback), NULL); gtk_widget_show_all(GTK_DIALOG(dshell)->vbox); gtk_widget_show_all(GTK_DIALOG(dshell)->action_area); original_tile = get_center_tile_mapcanvas(); update_source_label(); update_goto_dialog(GTK_TOGGLE_BUTTON(all_toggle)); gtk_tree_view_focus(GTK_TREE_VIEW(view)); } /**************************************************************** popup the dialog *****************************************************************/ void popup_goto_dialog(void) { if (!can_client_issue_orders() || get_num_units_in_focus() == 0) { return; } if (!dshell) { create_goto_dialog(); } gtk_window_present(GTK_WINDOW(dshell)); } /************************************************************************** Return currently selected city **************************************************************************/ static struct city *get_selected_city(void) { GtkTreeModel *model; GtkTreeIter it; int city_id; if (!gtk_tree_selection_get_selected(selection, NULL, &it)) { return NULL; } model = gtk_tree_view_get_model(GTK_TREE_VIEW(view)); gtk_tree_model_get(model, &it, GD_COL_CITY_ID, &city_id, -1); return game_city_by_number(city_id); } /************************************************************************** Appends the list of the city owned by the player in the goto dialog. **************************************************************************/ static bool list_store_append_player_cities(GtkListStore *store, const struct player *pplayer) { GtkTreeIter it; struct nation_type *pnation = nation_of_player(pplayer); const char *nation = nation_adjective_translation(pnation); GdkPixbuf *pixbuf; if (city_list_size(pplayer->cities) == 0) { return FALSE; } pixbuf = get_flag(pnation); city_list_iterate(pplayer->cities, pcity) { gtk_list_store_append(store, &it); gtk_list_store_set(store, &it, GD_COL_CITY_ID, pcity->id, GD_COL_CITY_NAME, city_name(pcity), GD_COL_FLAG, pixbuf, GD_COL_NATION, nation, /* GD_COL_AIRLIFT is populated later */ -1); } city_list_iterate_end; g_object_unref(pixbuf); return TRUE; } /************************************************************************** Refresh the label that shows where the selected unit(s) currently are (and the relevant cities' airlift capacities, if relevant). **************************************************************************/ static void update_source_label(void) { /* Arbitrary limit to stop the label getting ridiculously long */ static const int max_cities = 10; struct { const struct city *city; struct unit_list *units; } cities[max_cities]; int ncities = 0; bool too_many = FALSE; bool no_city = FALSE; /* any units not in a city? */ struct astring strs[max_cities]; int nstrs; char *last_str; const char *descriptions[max_cities+1]; int i; /* Sanity check: if no units selected, give up */ if (unit_list_size(get_units_in_focus()) == 0) { gtk_label_set_text(GTK_LABEL(source), _("No units selected.")); return; } /* Divide selected units up into a list of unique cities */ unit_list_iterate(get_units_in_focus(), punit) { const struct city *pcity = tile_city(unit_tile(punit)); if (pcity) { /* Inefficient, but it's not a long list */ for (i = 0; i < ncities; i++) { if (cities[i].city == pcity) { unit_list_append(cities[i].units, punit); break; } } if (i == ncities) { if (ncities < max_cities) { cities[ncities].city = pcity; cities[ncities].units = unit_list_new(); unit_list_append(cities[ncities].units, punit); ncities++; } else { too_many = TRUE; break; } } } else { no_city = TRUE; } } unit_list_iterate_end; /* Describe the individual cities. */ for (i = 0; i < ncities; i++) { const char *air_text = get_airlift_text(cities[i].units, NULL); astr_init(&strs[i]); if (air_text != NULL) { astr_add(&strs[i], /* TRANS: goto/airlift dialog. "Paris (airlift: 2/4)". * A set of these appear in an "and"-separated list. */ _("%s (airlift: %s)"), city_name(cities[i].city), air_text); } else { astr_add(&strs[i], "%s", city_name(cities[i].city)); } descriptions[i] = astr_str(&strs[i]); unit_list_destroy(cities[i].units); } if (too_many) { /* TRANS: goto/airlift dialog. Too many cities to list, some omitted. * Appears at the end of an "and"-separated list. */ descriptions[ncities] = last_str = fc_strdup(Q_("?gotodlg:more")); nstrs = ncities+1; } else if (no_city) { /* TRANS: goto/airlift dialog. For units not currently in a city. * Appears at the end of an "and"-separated list. */ descriptions[ncities] = last_str = fc_strdup(Q_("?gotodlg:no city")); nstrs = ncities+1; } else { last_str = NULL; nstrs = ncities; } /* Finally, update the label. */ { struct astring label = ASTRING_INIT, list = ASTRING_INIT; astr_set(&label, /* TRANS: goto/airlift dialog. Current location of units; %s is an * "and"-separated list of cities and associated info */ _("Currently in: %s"), astr_build_and_list(&list, descriptions, nstrs)); astr_free(&list); gtk_label_set_text(GTK_LABEL(source), astr_str(&label)); astr_free(&label); } /* Clear up. */ for (i = 0; i < ncities; i++) { astr_free(&strs[i]); } free(last_str); /* might have been NULL */ } /************************************************************************** Refresh city list (in response to "all cities" checkbox changing). **************************************************************************/ static void update_goto_dialog(GtkToggleButton *button) { bool nonempty = FALSE; gtk_list_store_clear(store); if (!client_has_player()) { /* Case global observer. */ return; } if (gtk_toggle_button_get_active(button)) { players_iterate(pplayer) { nonempty |= list_store_append_player_cities(store, pplayer); } players_iterate_end; } else { nonempty |= list_store_append_player_cities(store, client_player()); } refresh_airlift_column(); if (!nonempty) { /* No selection causes callbacks to fire, causing also Airlift button * to update. Do it here. */ refresh_airlift_button(); } } /************************************************************************** Refresh airlift column in city list (without tearing everything down). **************************************************************************/ static void refresh_airlift_column(void) { GtkTreeIter iter; bool valid; valid = gtk_tree_model_get_iter_first(GTK_TREE_MODEL(store), &iter); while (valid) { int city_id; const struct city *pcity; const char *air_text; gtk_tree_model_get(GTK_TREE_MODEL(store), &iter, GD_COL_CITY_ID, &city_id, -1); pcity = game_city_by_number(city_id); fc_assert_ret(pcity != NULL); air_text = get_airlift_text(get_units_in_focus(), pcity); gtk_list_store_set(GTK_LIST_STORE(store), &iter, GD_COL_AIRLIFT, air_text ? air_text : "-", -1); valid = gtk_tree_model_iter_next(GTK_TREE_MODEL(store), &iter); } } /************************************************************************** Refresh the state of the "Airlift" button for the currently selected unit(s) and city. **************************************************************************/ static void refresh_airlift_button(void) { struct city *pdestcity = get_selected_city(); if (NULL != pdestcity) { bool can_airlift = FALSE; /* Allow action if any of the selected units can airlift. */ unit_list_iterate(get_units_in_focus(), punit) { if (unit_can_airlift_to(punit, pdestcity)) { can_airlift = TRUE; break; } } unit_list_iterate_end; if (can_airlift) { gtk_dialog_set_response_sensitive(GTK_DIALOG(dshell), CMD_AIRLIFT, TRUE); return; } } gtk_dialog_set_response_sensitive(GTK_DIALOG(dshell), CMD_AIRLIFT, FALSE); } /************************************************************************** Update goto dialog. button tells if cities of all players or just client's player should be listed. **************************************************************************/ static void goto_selection_callback(GtkTreeSelection *selection, gpointer data) { struct city *pdestcity = get_selected_city(); if (NULL != pdestcity) { center_tile_mapcanvas(city_tile(pdestcity)); } refresh_airlift_button(); } /************************************************************************** Called when the set of units in focus has changed; updates airlift info **************************************************************************/ void goto_dialog_focus_units_changed(void) { /* Is the dialog currently being displayed? */ if (dshell) { /* Location of current units and ability to airlift may have changed */ update_source_label(); refresh_airlift_column(); refresh_airlift_button(); } }
gpl-2.0
artefvck/X_Artefvck
drivers/idle/intel_idle.c
5
35017
/* * intel_idle.c - native hardware idle loop for modern Intel processors * * Copyright (c) 2010, Intel Corporation. * Len Brown <len.brown@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ /* * intel_idle is a cpuidle driver that loads on specific Intel processors * in lieu of the legacy ACPI processor_idle driver. The intent is to * make Linux more efficient on these processors, as intel_idle knows * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs. */ /* * Design Assumptions * * All CPUs have same idle states as boot CPU * * Chipset BM_STS (bus master status) bit is a NOP * for preventing entry into deep C-stats */ /* * Known limitations * * The driver currently initializes for_each_online_cpu() upon modprobe. * It it unaware of subsequent processors hot-added to the system. * This means that if you boot with maxcpus=n and later online * processors above n, those processors will use C1 only. * * ACPI has a .suspend hack to turn off deep c-statees during suspend * to avoid complications with the lapic timer workaround. * Have not seen issues with suspend, but may need same workaround here. * * There is currently no kernel-based automatic probing/loading mechanism * if the driver is built as a module. */ /* un-comment DEBUG to enable pr_debug() statements */ #define DEBUG #include <linux/kernel.h> #include <linux/cpuidle.h> #include <linux/clockchips.h> #include <trace/events/power.h> #include <linux/sched.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/intel_mid_pm.h> #include <linux/pm_qos.h> #include <asm/cpu_device_id.h> #include <asm/mwait.h> #include <asm/msr.h> #include <asm/io_apic.h> #include <asm/hypervisor.h> #include <asm/xen/hypercall.h> #define INTEL_IDLE_VERSION "0.4" #define PREFIX "intel_idle: " #define CLPU_CR_C6_POLICY_CONFIG 0x668 #define CLPU_MD_C6_POLICY_CONFIG 0x669 #define DISABLE_CORE_C6_DEMOTION 0x0 #define DISABLE_MODULE_C6_DEMOTION 0x0 #ifdef CONFIG_MOOREFIELD #define S0I1_DISPLAY_MODE (1 << 8) #define PUNIT_PORT 0x04 #define DSP_SS_PM 0x36 #define S0i1_LATENCY 1200 #define LOW_LATENCY_S0I1 1000 #define S0I1_STATE 0x60 #endif static struct cpuidle_driver intel_idle_driver = { .name = "intel_idle", .owner = THIS_MODULE, }; /* intel_idle.max_cstate=0 disables driver */ static int max_cstate = CPUIDLE_STATE_MAX - 1; static unsigned int mwait_substates; #define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ #if defined(CONFIG_REMOVEME_INTEL_ATOM_MDFLD_POWER) || \ defined(CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER) static int soc_s0ix_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); static atomic_t nr_cpus_in_c6; #endif struct idle_cpu { struct cpuidle_state *state_table; /* * Hardware C-state auto-demotion may not always be optimal. * Indicate which enable bits to clear here. */ unsigned long auto_demotion_disable_flags; bool disable_promotion_to_c1e; }; static const struct idle_cpu *icpu; static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; static int intel_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); static int intel_idle_cpu_init(int cpu); static struct cpuidle_state *cpuidle_state_table; /* * Set this flag for states where the HW flushes the TLB for us * and so we don't need cross-calls to keep it consistent. * If this flag is set, SW flushes the TLB, so even if the * HW doesn't do the flushing, this flag is safe to use. */ #define CPUIDLE_FLAG_TLB_FLUSHED 0x10000 /* * MWAIT takes an 8-bit "hint" in EAX "suggesting" * the C-state (top nibble) and sub-state (bottom nibble) * 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc. * * We store the hint at the top of our "flags" for each state. */ #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF) #define MWAIT2flg(eax) ((eax & 0xFF) << 24) /* * States are indexed by the cstate number, * which is also the index into the MWAIT hint array. * Thus C0 is a dummy. */ static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = { { .name = "C1-NHM", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = 3, .target_residency = 6, .enter = &intel_idle }, { .name = "C1E-NHM", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle }, { .name = "C3-NHM", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 20, .target_residency = 80, .enter = &intel_idle }, { .name = "C6-NHM", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, .target_residency = 800, .enter = &intel_idle }, { .enter = NULL } }; static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = { { .name = "C1-SNB", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = 2, .target_residency = 2, .enter = &intel_idle }, { .name = "C1E-SNB", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle }, { .name = "C3-SNB", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, .target_residency = 211, .enter = &intel_idle }, { .name = "C6-SNB", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 104, .target_residency = 345, .enter = &intel_idle }, { .name = "C7-SNB", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 109, .target_residency = 345, .enter = &intel_idle }, { .enter = NULL } }; static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = { { .name = "C1-IVB", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = 1, .target_residency = 1, .enter = &intel_idle }, { .name = "C1E-IVB", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle }, { .name = "C3-IVB", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, .target_residency = 156, .enter = &intel_idle }, { .name = "C6-IVB", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, .target_residency = 300, .enter = &intel_idle }, { .name = "C7-IVB", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 87, .target_residency = 300, .enter = &intel_idle }, { .enter = NULL } }; static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = { { .name = "C1-HSW", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = 2, .target_residency = 2, .enter = &intel_idle }, { .name = "C1E-HSW", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle }, { .name = "C3-HSW", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 33, .target_residency = 100, .enter = &intel_idle }, { .name = "C6-HSW", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, .target_residency = 400, .enter = &intel_idle }, { .name = "C7s-HSW", .desc = "MWAIT 0x32", .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 166, .target_residency = 500, .enter = &intel_idle }, { .name = "C8-HSW", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 300, .target_residency = 900, .enter = &intel_idle }, { .name = "C9-HSW", .desc = "MWAIT 0x50", .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 600, .target_residency = 1800, .enter = &intel_idle }, { .name = "C10-HSW", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 2600, .target_residency = 7700, .enter = &intel_idle }, { .enter = NULL } }; static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = { { .name = "C1E-ATM", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle }, { .name = "C2-ATM", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = 20, .target_residency = 80, .enter = &intel_idle }, { .name = "C4-ATM", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, .target_residency = 400, .enter = &intel_idle }, { .name = "C6-ATM", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, .target_residency = 560, .enter = &intel_idle }, { .enter = NULL } }; static struct cpuidle_state vlv_cstates[CPUIDLE_STATE_MAX] = { { /* MWAIT C1 */ .name = "C1-ATM", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = 1, .target_residency = 4, .enter = &intel_idle }, { /* MWAIT C4 */ .name = "C4-ATM", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, .target_residency = 400, .enter = &intel_idle }, { /* MWAIT C6 */ .name = "C6-ATM", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, .target_residency = 560, .enter = &intel_idle }, { /* MWAIT C7-S0i1 */ .name = "S0i1-ATM", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, .target_residency = 4000, .enter = &intel_idle }, { /* MWAIT C9-S0i3 */ .name = "S0i3-ATM", .desc = "MWAIT 0x64", .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, .target_residency = 20000, .enter = &intel_idle }, { .enter = NULL } }; static struct cpuidle_state chv_cstates[CPUIDLE_STATE_MAX] = { { /* MWAIT C1 */ .name = "C1-ATM", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = 1, .target_residency = 4, .enter = &intel_idle }, { /* MWAIT C4 */ .name = "C4-ATM", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, .target_residency = 400, .enter = &intel_idle }, { /* MWAIT C6 */ .name = "C6-ATM", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, .target_residency = 560, .enter = &intel_idle }, { /* MWAIT C7-S0i1 */ .name = "S0i1-ATM", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, .target_residency = 4000, .enter = &intel_idle }, { /* MWAIT C8-S0i2 */ .name = "S0i2-ATM", .desc = "MWAIT 0x62", .flags = MWAIT2flg(0x62) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 2000, .target_residency = 8000, .enter = &intel_idle }, { /* MWAIT C9-S0i3 */ .name = "S0i3-ATM", .desc = "MWAIT 0x64", .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, .target_residency = 20000, .enter = &intel_idle }, { .enter = NULL } }; #if defined(CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER) static struct cpuidle_state mrfld_cstates[CPUIDLE_STATE_MAX] = { { /* MWAIT C1 */ .name = "C1-ATM", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = 1, .target_residency = 4, .enter = &intel_idle }, { /* MWAIT C4 */ .name = "C4-ATM", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, .target_residency = 400, .enter = &intel_idle }, { /* MWAIT C6 */ .name = "C6-ATM", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, .target_residency = 560, .enter = &intel_idle }, { /* MWAIT C7-S0i1 */ .name = "S0i1-ATM", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, .target_residency = 4000, .enter = &intel_idle }, { /* MWAIT C9-S0i3 */ .name = "S0i3-ATM", .desc = "MWAIT 0x64", .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, .target_residency = 20000, .enter = &intel_idle }, { .enter = NULL } }; #else #define mrfld_cstates atom_cstates #endif static struct cpuidle_state moorfld_cstates[CPUIDLE_STATE_MAX] = { { /* MWAIT C1 */ .name = "C1-ATM", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = 1, .target_residency = 4, .enter = &intel_idle }, { /* MWAIT C6 */ .name = "C6-ATM", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, .target_residency = 560, .enter = &intel_idle }, { /* MWAIT C7-S0i1 */ .name = "S0i1-ATM", .desc = "MWAIT 0x60", .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, .target_residency = 4000, .enter = &intel_idle }, { /* MWAIT C9-S0i3 */ .name = "S0i3-ATM", .desc = "MWAIT 0x64", .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, .target_residency = 20000, .enter = &intel_idle }, { .enter = NULL } }; #if defined(CONFIG_REMOVEME_INTEL_ATOM_MDFLD_POWER) || \ defined(CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER) static struct cpuidle_state mfld_cstates[CPUIDLE_STATE_MAX] = { { /* MWAIT C1 */ .name = "ATM-C1", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = CSTATE_EXIT_LATENCY_C1, .target_residency = 4, .enter = &intel_idle }, { /* MWAIT C2 */ .name = "ATM-C2", .desc = "MWAIT 0x10", .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID, .exit_latency = CSTATE_EXIT_LATENCY_C2, .target_residency = 80, .enter = &intel_idle }, { /* MWAIT C4 */ .name = "ATM-C4", .desc = "MWAIT 0x30", .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = CSTATE_EXIT_LATENCY_C4, .target_residency = 400, .enter = &intel_idle }, { /* MWAIT C6 */ .name = "ATM-C6", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = CSTATE_EXIT_LATENCY_C6, .power_usage = C6_POWER_USAGE, .target_residency = 560, .enter = &soc_s0ix_idle }, { .name = "ATM-S0i1", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x1) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = CSTATE_EXIT_LATENCY_S0i1, .power_usage = S0I1_POWER_USAGE, .enter = &soc_s0ix_idle }, { .name = "ATM-LpAudio", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x3) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = CSTATE_EXIT_LATENCY_LPMP3, .power_usage = LPMP3_POWER_USAGE, .enter = &soc_s0ix_idle }, { .name = "ATM-S0i3", .desc = "MWAIT 0x52", .flags = MWAIT2flg(0x7) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = CSTATE_EXIT_LATENCY_S0i3, .power_usage = S0I3_POWER_USAGE, .enter = &soc_s0ix_idle }, { .enter = NULL } }; static inline bool is_irq_pending(void) { int i, base = APIC_IRR; for (i = 0; i < 8; i++) if (apic_read(base + i*0x10) != 0) return true; return false; } static int enter_s0ix_state(u32 eax, int gov_req_state, int s0ix_state, struct cpuidle_device *dev, int index) { int s0ix_entered = 0; int selected_state = C6_STATE_IDX; if (atomic_add_return(1, &nr_cpus_in_c6) == num_online_cpus() && s0ix_state) { s0ix_entered = mid_s0ix_enter(s0ix_state); if (!s0ix_entered) { if (pmu_is_s0ix_in_progress()) { atomic_dec(&nr_cpus_in_c6); eax = C4_HINT; } pmu_set_s0ix_complete(); } } switch (s0ix_state) { case MID_S0I1_STATE: trace_cpu_idle(S0I1_STATE_IDX, dev->cpu); break; case MID_LPMP3_STATE: trace_cpu_idle(LPMP3_STATE_IDX, dev->cpu); break; case MID_S0I3_STATE: trace_cpu_idle(S0I3_STATE_IDX, dev->cpu); break; case MID_S3_STATE: trace_cpu_idle(S0I3_STATE_IDX, dev->cpu); break; default: trace_cpu_idle((eax >> 4) + 1, dev->cpu); } #ifdef CONFIG_XEN HYPERVISOR_monitor_op((void *)&current_thread_info()->flags, 0, 0); #else __monitor((void *)&current_thread_info()->flags, 0, 0); #endif smp_mb(); if (!need_resched()) { #ifdef CONFIG_XEN HYPERVISOR_mwait_op(eax, 1, (void *)&current_thread_info()->flags, 0); #else __mwait(eax, 1); #endif } if (!need_resched() && is_irq_pending() == 0) __get_cpu_var(update_buckets) = 0; if (likely(eax == C6_HINT)) atomic_dec(&nr_cpus_in_c6); /* During s0ix exit inform scu that OS * has exited. In case scu is still waiting * for ack c6 trigger, it would exit out * of the ack-c6 timeout loop */ pmu_set_s0ix_complete(); /* In case of demotion to S0i1/lpmp3 update last_state */ if (s0ix_entered) { selected_state = S0I3_STATE_IDX; if (s0ix_state == MID_S0I1_STATE) { index = S0I1_STATE_IDX; selected_state = S0I1_STATE_IDX; } else if (s0ix_state == MID_LPMP3_STATE) { index = LPMP3_STATE_IDX; selected_state = LPMP3_STATE_IDX; } } else if (eax == C4_HINT) { index = C4_STATE_IDX; selected_state = C4_STATE_IDX; } else index = C6_STATE_IDX; pmu_s0ix_demotion_stat(gov_req_state, selected_state); return index; } static int soc_s0ix_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct cpuidle_state *state = &drv->states[index]; unsigned long eax = flg2MWAIT(state->flags); int cpu = smp_processor_id(); int s0ix_state = 0; unsigned int cstate; int gov_req_state = (int) eax; /* Check if s0ix is already in progress, * This is required to demote C6 while S0ix * is in progress */ if (unlikely(pmu_is_s0ix_in_progress())) return intel_idle(dev, drv, C4_STATE_IDX); /* check if we need/possible to do s0ix */ if (eax != C6_HINT) s0ix_state = get_target_platform_state(&eax); /* * leave_mm() to avoid costly and often unnecessary wakeups * for flushing the user TLB's associated with the active mm. */ if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) leave_mm(cpu); cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; if (!(lapic_timer_reliable_states & (1 << (cstate)))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); stop_critical_timings(); if (!need_resched()) index = enter_s0ix_state(eax, gov_req_state, s0ix_state, dev, index); start_critical_timings(); if (!(lapic_timer_reliable_states & (1 << (cstate)))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); return index; } #else #define mfld_cstates atom_cstates #endif #ifdef CONFIG_ATOM_SOC_POWER static unsigned int get_target_residency(unsigned int cstate) { unsigned int t_sleep = cpuidle_state_table[cstate].target_residency; unsigned int prev_idx; /* get the previous lower sleep state */ if ((cstate == 5) || (cstate == 6)) prev_idx = cstate - 2; else prev_idx = cstate - 1; /* calculate target_residency only if not defined already */ if (!t_sleep) { /* Use C0 power usage to calculate the target residency */ unsigned int p_active = C0_POWER_USAGE; unsigned int prev_state_power = cpuidle_state_table [prev_idx].power_usage; unsigned int curr_state_power = cpuidle_state_table [cstate].power_usage; unsigned int prev_state_lat = cpuidle_state_table [prev_idx].exit_latency; unsigned int curr_state_lat = cpuidle_state_table [cstate].exit_latency; if (curr_state_power && prev_state_power && p_active && prev_state_lat && curr_state_lat && (curr_state_lat > prev_state_lat) && (prev_state_power > curr_state_power)) { t_sleep = ((p_active * (curr_state_lat - prev_state_lat)) + (prev_state_lat * prev_state_power) - (curr_state_lat * curr_state_power)) / (prev_state_power - curr_state_power); /* round-up target_residency */ t_sleep++; } } WARN_ON(!t_sleep); pr_debug(PREFIX "cpuidle: target_residency[%d]= %d\n", cstate, t_sleep); return t_sleep; } #endif #ifdef CONFIG_MOOREFIELD /* MOFD: Optimize special variants of S0i1 where low residency is sufficient */ int low_latency_s0ix_state(int eax) { u32 dsp_ss_pm_val; dsp_ss_pm_val = intel_mid_msgbus_read32(PUNIT_PORT, DSP_SS_PM); if (dsp_ss_pm_val & S0I1_DISPLAY_MODE) eax = S0I1_STATE; return eax; } #endif /** * intel_idle * @dev: cpuidle_device * @drv: cpuidle driver * @index: index of cpuidle state * * Must be called under local_irq_disable(). */ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { unsigned long ecx = 1; /* break on interrupt flag */ struct cpuidle_state *state = &drv->states[index]; unsigned long eax = flg2MWAIT(state->flags); unsigned int cstate; int cpu = smp_processor_id(); #ifdef CONFIG_MOOREFIELD int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); #endif #if (defined(CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER) && \ defined(CONFIG_PM_DEBUG)) { /* Get Cstate based on ignore table from PMU driver */ unsigned int ncstate; cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; ncstate = pmu_get_new_cstate(cstate, &index); eax = flg2MWAIT(drv->states[index].flags); } #endif cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; /* * leave_mm() to avoid costly and often unnecessary wakeups * for flushing the user TLB's associated with the active mm. */ if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) leave_mm(cpu); if (!(lapic_timer_reliable_states & (1 << (cstate)))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); if (!current_set_polling_and_test()) { #ifdef CONFIG_XEN HYPERVISOR_mwait_op(eax, ecx, (void *)&current_thread_info()->flags, 0); #else #ifdef CONFIG_MOOREFIELD if (eax >= C6_HINT && latency_req > S0i1_LATENCY && per_cpu(predicted_time, cpu) > LOW_LATENCY_S0I1) eax = low_latency_s0ix_state(eax); #endif __monitor((void *)&current_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) __mwait(eax, ecx); #if defined(CONFIG_REMOVEME_INTEL_ATOM_MDFLD_POWER) || \ defined(CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER) if (!need_resched() && is_irq_pending() == 0) __get_cpu_var(update_buckets) = 0; #endif #endif /* CONFIG_XEN */ } if (!(lapic_timer_reliable_states & (1 << (cstate)))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); return index; } static void __setup_broadcast_timer(void *arg) { unsigned long reason = (unsigned long)arg; int cpu = smp_processor_id(); reason = reason ? CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; clockevents_notify(reason, &cpu); } static int cpu_hotplug_notify(struct notifier_block *n, unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; struct cpuidle_device *dev; switch (action & 0xf) { case CPU_ONLINE: if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) smp_call_function_single(hotcpu, __setup_broadcast_timer, (void *)true, 1); /* * Some systems can hotplug a cpu at runtime after * the kernel has booted, we have to initialize the * driver in this case */ dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu); if (!dev->registered) intel_idle_cpu_init(hotcpu); break; } return NOTIFY_OK; } static struct notifier_block cpu_hotplug_notifier = { .notifier_call = cpu_hotplug_notify, }; static void auto_demotion_disable(void *dummy) { unsigned long long msr_bits; rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); msr_bits &= ~(icpu->auto_demotion_disable_flags); wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); } static void c1e_promotion_disable(void *dummy) { unsigned long long msr_bits; rdmsrl(MSR_IA32_POWER_CTL, msr_bits); msr_bits &= ~0x2; wrmsrl(MSR_IA32_POWER_CTL, msr_bits); } static const struct idle_cpu idle_cpu_nehalem = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, .disable_promotion_to_c1e = true, }; static const struct idle_cpu idle_cpu_atom = { .state_table = atom_cstates, }; static const struct idle_cpu idle_cpu_lincroft = { .state_table = atom_cstates, .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE, }; static const struct idle_cpu idle_cpu_snb = { .state_table = snb_cstates, .disable_promotion_to_c1e = true, }; static const struct idle_cpu idle_cpu_ivb = { .state_table = ivb_cstates, .disable_promotion_to_c1e = true, }; static const struct idle_cpu idle_cpu_hsw = { .state_table = hsw_cstates, .disable_promotion_to_c1e = true, }; static const struct idle_cpu idle_cpu_mfld = { .state_table = mfld_cstates, .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE, }; static const struct idle_cpu idle_cpu_mrfld = { .state_table = mrfld_cstates, }; static const struct idle_cpu idle_cpu_vlv = { .state_table = vlv_cstates, }; static const struct idle_cpu idle_cpu_moorfld = { .state_table = moorfld_cstates, }; static const struct idle_cpu idle_cpu_chv = { .state_table = chv_cstates, }; #define ICPU(model, cpu) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } static const struct x86_cpu_id intel_idle_ids[] = { ICPU(0x1a, idle_cpu_nehalem), ICPU(0x1e, idle_cpu_nehalem), ICPU(0x1f, idle_cpu_nehalem), ICPU(0x25, idle_cpu_nehalem), ICPU(0x2c, idle_cpu_nehalem), ICPU(0x2e, idle_cpu_nehalem), ICPU(0x1c, idle_cpu_atom), ICPU(0x26, idle_cpu_lincroft), ICPU(0x2f, idle_cpu_nehalem), ICPU(0x2a, idle_cpu_snb), ICPU(0x2d, idle_cpu_snb), ICPU(0x4c, idle_cpu_chv), ICPU(0x37, idle_cpu_vlv), ICPU(0x3a, idle_cpu_ivb), ICPU(0x3e, idle_cpu_ivb), ICPU(0x3c, idle_cpu_hsw), ICPU(0x3f, idle_cpu_hsw), ICPU(0x45, idle_cpu_hsw), ICPU(0x46, idle_cpu_hsw), ICPU(0x27, idle_cpu_mfld), ICPU(0x35, idle_cpu_mfld), ICPU(0x4a, idle_cpu_mrfld), /* Tangier SoC */ ICPU(0x5a, idle_cpu_moorfld), /* Anniedale SoC */ {} }; MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); /* * intel_idle_probe() */ static int intel_idle_probe(void) { unsigned int eax, ebx, ecx; const struct x86_cpu_id *id; if (max_cstate == 0) { pr_debug(PREFIX "disabled\n"); return -EPERM; } id = x86_match_cpu(intel_idle_ids); if (!id) { if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 6) pr_debug(PREFIX "does not run on family %d model %d\n", boot_cpu_data.x86, boot_cpu_data.x86_model); return -ENODEV; } if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) return -ENODEV; cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || !mwait_substates) return -ENODEV; pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); icpu = (const struct idle_cpu *)id->driver_data; cpuidle_state_table = icpu->state_table; if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; else on_each_cpu(__setup_broadcast_timer, (void *)true, 1); pr_debug(PREFIX "v" INTEL_IDLE_VERSION " model 0x%X\n", boot_cpu_data.x86_model); pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n", lapic_timer_reliable_states); return 0; } /* * intel_idle_cpuidle_devices_uninit() * unregister, free cpuidle_devices */ static void intel_idle_cpuidle_devices_uninit(void) { int i; struct cpuidle_device *dev; for_each_online_cpu(i) { dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); cpuidle_unregister_device(dev); } free_percpu(intel_idle_cpuidle_devices); return; } /* * intel_idle_cpuidle_driver_init() * allocate, initialize cpuidle_states */ static int intel_idle_cpuidle_driver_init(void) { int cstate; struct cpuidle_driver *drv = &intel_idle_driver; drv->state_count = 1; for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { int num_substates = 0, mwait_hint, mwait_cstate, mwait_substate; if (cpuidle_state_table[cstate].enter == NULL) break; if (cstate + 1 > max_cstate) { printk(PREFIX "max_cstate %d reached\n", max_cstate); break; } mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint); mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint); /* does the state exist in CPUID.MWAIT? */ /* FIXME: Do not check number of substates for any states above C6 * as these are not real C states supported by the CPU, they * are emulated c states for s0ix support. */ if ((mwait_cstate + 1) <= 6) { num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4)) & MWAIT_SUBSTATE_MASK; if (num_substates == 0) continue; } #if !defined(CONFIG_ATOM_SOC_POWER) if ((boot_cpu_data.x86_model != 0x37) && (boot_cpu_data.x86_model != 0x4c)) { /* if sub-state in table is not enumerated by CPUID */ if ((mwait_substate + 1) > num_substates) continue; } #endif if (((mwait_cstate + 1) > 2) && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) mark_tsc_unstable("TSC halts in idle" " states deeper than C2"); #ifdef CONFIG_ATOM_SOC_POWER /* Calculate target_residency if power_usage is given */ cpuidle_state_table[cstate].target_residency = get_target_residency(cstate); #endif drv->states[drv->state_count] = /* structure copy */ cpuidle_state_table[cstate]; drv->state_count += 1; } if (icpu->auto_demotion_disable_flags) on_each_cpu(auto_demotion_disable, NULL, 1); if (icpu->disable_promotion_to_c1e) /* each-cpu is redundant */ on_each_cpu(c1e_promotion_disable, NULL, 1); return 0; } /* * intel_idle_cpu_init() * allocate, initialize, register cpuidle_devices * @cpu: cpu/core to initialize */ static int intel_idle_cpu_init(int cpu) { int cstate; struct cpuidle_device *dev; dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu); dev->state_count = 1; for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { int num_substates = 0, mwait_hint, mwait_cstate, mwait_substate; if (cpuidle_state_table[cstate].enter == NULL) continue; if (cstate + 1 > max_cstate) { printk(PREFIX "max_cstate %d reached\n", max_cstate); break; } mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint); mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint); /* does the state exist in CPUID.MWAIT? */ /* FIXME: Do not check number of substates for any states above C6 * as these are not real C states supported by the CPU, they * are emulated c states for s0ix support. */ if ((mwait_cstate + 1) <= 6) { num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4)) & MWAIT_SUBSTATE_MASK; if (num_substates == 0) continue; } #if !defined(CONFIG_ATOM_SOC_POWER) if ((boot_cpu_data.x86_model != 0x37) && (boot_cpu_data.x86_model != 0x4c)) { /* if sub-state in table is not enumerated by CPUID */ if ((mwait_substate + 1) > num_substates) continue; } #endif dev->state_count += 1; } dev->cpu = cpu; if (cpuidle_register_device(dev)) { pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu); intel_idle_cpuidle_devices_uninit(); return -EIO; } if (icpu->auto_demotion_disable_flags) smp_call_function_single(cpu, auto_demotion_disable, NULL, 1); per_cpu(update_buckets, cpu) = 1; return 0; } static int __init intel_idle_init(void) { int retval, i; /* Do not load intel_idle at all for now if idle= is passed */ if (boot_option_idle_override != IDLE_NO_OVERRIDE) return -ENODEV; retval = intel_idle_probe(); if (retval) return retval; intel_idle_cpuidle_driver_init(); retval = cpuidle_register_driver(&intel_idle_driver); if (retval) { struct cpuidle_driver *drv = cpuidle_get_driver(); printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", drv ? drv->name : "none"); return retval; } intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); if (intel_idle_cpuidle_devices == NULL) return -ENOMEM; for_each_online_cpu(i) { retval = intel_idle_cpu_init(i); if (retval) { cpuidle_unregister_driver(&intel_idle_driver); return retval; } if (platform_is(INTEL_ATOM_BYT) || platform_is(INTEL_ATOM_CHT)) { /* Disable automatic core C6 demotion by PUNIT */ if (wrmsr_on_cpu(i, CLPU_CR_C6_POLICY_CONFIG, DISABLE_CORE_C6_DEMOTION, 0x0)) pr_err("Error to disable core C6 demotion"); /* Disable automatic module C6 demotion by PUNIT */ if (wrmsr_on_cpu(i, CLPU_MD_C6_POLICY_CONFIG, DISABLE_MODULE_C6_DEMOTION, 0x0)) pr_err("Error to disable module C6 demotion"); } } register_cpu_notifier(&cpu_hotplug_notifier); return 0; } static void __exit intel_idle_exit(void) { intel_idle_cpuidle_devices_uninit(); cpuidle_unregister_driver(&intel_idle_driver); if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) on_each_cpu(__setup_broadcast_timer, (void *)false, 1); unregister_cpu_notifier(&cpu_hotplug_notifier); return; } module_init(intel_idle_init); module_exit(intel_idle_exit); module_param(max_cstate, int, 0444); MODULE_AUTHOR("Len Brown <len.brown@intel.com>"); MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
openjdk/jdk19u
src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp
5
9312
/* * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "asm/assembler.inline.hpp" #include "asm/macroAssembler.inline.hpp" #include "code/vtableStubs.hpp" #include "interp_masm_aarch64.hpp" #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" #include "oops/instanceKlass.hpp" #include "oops/klassVtable.hpp" #include "runtime/sharedRuntime.hpp" #include "vmreg_aarch64.inline.hpp" #ifdef COMPILER2 #include "opto/runtime.hpp" #endif // machine-dependent part of VtableStubs: create VtableStub of correct size and // initialize its code #define __ masm-> #ifndef PRODUCT extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index); #endif VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. const int stub_code_length = code_size_limit(true); VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index); // Can be NULL if there is no free space in the code cache. if (s == NULL) { return NULL; } // Count unused bytes in instruction sequences of variable size. // We add them to the computed buffer size in order to avoid // overflow in subsequently generated stubs. address start_pc; int slop_bytes = 0; int slop_delta = 0; ResourceMark rm; CodeBuffer cb(s->entry_point(), stub_code_length); MacroAssembler* masm = new MacroAssembler(&cb); #if (!defined(PRODUCT) && defined(COMPILER2)) if (CountCompiledCalls) { __ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); __ increment(Address(r16)); } #endif // get receiver (need to skip return address on top of stack) assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0"); // get receiver klass address npe_addr = __ pc(); __ load_klass(r16, j_rarg0); #ifndef PRODUCT if (DebugVtables) { Label L; // TODO: find upper bound for this debug code. start_pc = __ pc(); // check offset vs vtable length __ ldrw(rscratch1, Address(r16, Klass::vtable_length_offset())); __ cmpw(rscratch1, vtable_index * vtableEntry::size()); __ br(Assembler::GT, L); __ enter(); __ mov(r2, vtable_index); __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, r2); const ptrdiff_t estimate = 256; const ptrdiff_t codesize = __ pc() - start_pc; slop_delta = estimate - codesize; // call_VM varies in length, depending on data slop_bytes += slop_delta; assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize); __ leave(); __ bind(L); } #endif // PRODUCT start_pc = __ pc(); __ lookup_virtual_method(r16, vtable_index, rmethod); slop_delta = 8 - (int)(__ pc() - start_pc); slop_bytes += slop_delta; assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta); #ifndef PRODUCT if (DebugVtables) { Label L; __ cbz(rmethod, L); __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset())); __ cbnz(rscratch1, L); __ stop("Vtable entry is NULL"); __ bind(L); } #endif // PRODUCT // r0: receiver klass // rmethod: Method* // r2: receiver address ame_addr = __ pc(); __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset())); __ br(rscratch1); masm->flush(); bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0); return s; } VtableStub* VtableStubs::create_itable_stub(int itable_index) { // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. const int stub_code_length = code_size_limit(false); VtableStub* s = new(stub_code_length) VtableStub(false, itable_index); // Can be NULL if there is no free space in the code cache. if (s == NULL) { return NULL; } // Count unused bytes in instruction sequences of variable size. // We add them to the computed buffer size in order to avoid // overflow in subsequently generated stubs. address start_pc; int slop_bytes = 0; int slop_delta = 0; ResourceMark rm; CodeBuffer cb(s->entry_point(), stub_code_length); MacroAssembler* masm = new MacroAssembler(&cb); #if (!defined(PRODUCT) && defined(COMPILER2)) if (CountCompiledCalls) { __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); __ increment(Address(r10)); } #endif // get receiver (need to skip return address on top of stack) assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0"); // Entry arguments: // rscratch2: CompiledICHolder // j_rarg0: Receiver // This stub is called from compiled code which has no callee-saved registers, // so all registers except arguments are free at this point. const Register recv_klass_reg = r10; const Register holder_klass_reg = r16; // declaring interface klass (DECC) const Register resolved_klass_reg = rmethod; // resolved interface klass (REFC) const Register temp_reg = r11; const Register temp_reg2 = r15; const Register icholder_reg = rscratch2; Label L_no_such_interface; __ ldr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset())); __ ldr(holder_klass_reg, Address(icholder_reg, CompiledICHolder::holder_metadata_offset())); start_pc = __ pc(); // get receiver klass (also an implicit null-check) address npe_addr = __ pc(); __ load_klass(recv_klass_reg, j_rarg0); // Receiver subtype check against REFC. __ lookup_interface_method(// inputs: rec. class, interface recv_klass_reg, resolved_klass_reg, noreg, // outputs: scan temp. reg1, scan temp. reg2 temp_reg2, temp_reg, L_no_such_interface, /*return_method=*/false); const ptrdiff_t typecheckSize = __ pc() - start_pc; start_pc = __ pc(); // Get selected method from declaring class and itable index __ lookup_interface_method(// inputs: rec. class, interface, itable index recv_klass_reg, holder_klass_reg, itable_index, // outputs: method, scan temp. reg rmethod, temp_reg, L_no_such_interface); const ptrdiff_t lookupSize = __ pc() - start_pc; // Reduce "estimate" such that "padding" does not drop below 8. const ptrdiff_t estimate = 124; const ptrdiff_t codesize = typecheckSize + lookupSize; slop_delta = (int)(estimate - codesize); slop_bytes += slop_delta; assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize); #ifdef ASSERT if (DebugVtables) { Label L2; __ cbz(rmethod, L2); __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset())); __ cbnz(rscratch1, L2); __ stop("compiler entrypoint is null"); __ bind(L2); } #endif // ASSERT // rmethod: Method* // j_rarg0: receiver address ame_addr = __ pc(); __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset())); __ br(rscratch1); __ bind(L_no_such_interface); // Handle IncompatibleClassChangeError in itable stubs. // More detailed error message. // We force resolving of the call site by jumping to the "handle // wrong method" stub, and so let the interpreter runtime do all the // dirty work. assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order"); __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); masm->flush(); bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0); return s; } int VtableStub::pd_code_alignment() { // aarch64 cache line size is not an architected constant. We just align on 4 bytes (instruction size). const unsigned int icache_line_size = 4; return icache_line_size; }
gpl-2.0
buaabyl/lm8-gcc
gcc-4.4.3/gcc/testsuite/gcc.dg/tree-ssa/prefetch-6.c
5
1767
/* { dg-do compile { target i?86-*-* x86_64-*-* } } */ /* { dg-require-effective-target ilp32 } */ /* { dg-options "-O2 -fprefetch-loop-arrays -march=athlon -msse2 -mfpmath=sse --param simultaneous-prefetches=100 -fdump-tree-aprefetch-details" } */ #define N 1000 #define K 900 double a[N][N]; double test(void) { unsigned i, j; double sum = 0; /* Here, we should use non-temporal prefetch instruction. */ for (i = 0; i < K; i++) for (j = 0; j < K; j++) sum += a[i][j]; /* Here, we should not use non-temporal prefetch instruction, since the value of a[i+10][j] is reused in L2 cache. */ for (i = 0; i < K; i++) for (j = 0; j < K; j++) sum += a[i][j] * a[i + 10][j]; /* Here, we should use non-temporal prefetch instruction, since the value of a[i+100][j] is too far to be reused in L2 cache. */ for (i = 0; i < K; i++) for (j = 0; j < K; j++) sum += a[i][j] * a[i + 100][j]; /* Here, temporal prefetches should be used, since the volume of the memory accesses is smaller than L2 cache. */ for (i = 0; i < 100; i++) for (j = 0; j < 100; j++) sum += a[i][j] * a[i + 100][j]; /* Temporal prefetches should be used here (even though the accesses to a[j][i] are independent, the same cache line is almost always hit every N iterations). */ for (i = 0; i < N; i++) for (j = 0; j < N; j++) sum += a[j][i]; return sum; } /* { dg-final { scan-tree-dump-times "Issued prefetch" 5 "aprefetch" } } */ /* { dg-final { scan-tree-dump-times "Issued nontemporal prefetch" 3 "aprefetch" } } */ /* { dg-final { scan-assembler-times "prefetcht" 5 } } */ /* { dg-final { scan-assembler-times "prefetchnta" 3 } } */ /* { dg-final { cleanup-tree-dump "aprefetch" } } */
gpl-2.0
VenT/wow
src/server/scripts/Kalimdor/CavernsOfTime/CullingOfStratholme/instance_culling_of_stratholme.cpp
5
7659
/* * Copyright (C) 2008-2010 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptPCH.h" #include "culling_of_stratholme.h" #define MAX_ENCOUNTER 5 /* Culling of Stratholme encounters: 0 - Meathook 1 - Salramm the Fleshcrafter 2 - Chrono-Lord Epoch 3 - Mal'Ganis 4 - Infinite Corruptor (Heroic only) */ struct instance_culling_of_stratholme : public ScriptedInstance { instance_culling_of_stratholme(Map* pMap) : ScriptedInstance(pMap) {Initialize();}; uint64 uiArthas; uint64 uiMeathook; uint64 uiSalramm; uint64 uiEpoch; uint64 uiMalGanis; uint64 uiInfinite; uint64 uiShkafGate; uint64 uiMalGanisGate1; uint64 uiMalGanisGate2; uint64 uiExitGate; uint64 uiMalGanisChest; uint32 m_auiEncounter[MAX_ENCOUNTER]; std::string str_data; bool IsEncounterInProgress() const { for (uint8 i = 0; i < MAX_ENCOUNTER; ++i) if (m_auiEncounter[i] == IN_PROGRESS) return true; return false; } void OnCreatureCreate(Creature* pCreature, bool /*add*/) { switch(pCreature->GetEntry()) { case NPC_ARTHAS: uiArthas = pCreature->GetGUID(); break; case NPC_MEATHOOK: uiMeathook = pCreature->GetGUID(); break; case NPC_SALRAMM: uiSalramm = pCreature->GetGUID(); break; case NPC_EPOCH: uiEpoch = pCreature->GetGUID(); break; case NPC_MAL_GANIS: uiMalGanis = pCreature->GetGUID(); break; case NPC_INFINITE: uiInfinite = pCreature->GetGUID(); break; } } void OnGameObjectCreate(GameObject* pGo, bool /*add*/) { switch(pGo->GetEntry()) { case GO_SHKAF_GATE: uiShkafGate = pGo->GetGUID(); break; case GO_MALGANIS_GATE_1: uiMalGanisGate1 = pGo->GetGUID(); break; case GO_MALGANIS_GATE_2: uiMalGanisGate2 = pGo->GetGUID(); break; case GO_EXIT_GATE: uiExitGate = pGo->GetGUID(); if (m_auiEncounter[3] == DONE) HandleGameObject(uiExitGate,true); break; case GO_MALGANIS_CHEST_N: case GO_MALGANIS_CHEST_H: uiMalGanisChest = pGo->GetGUID(); if (m_auiEncounter[3] == DONE) pGo->RemoveFlag(GAMEOBJECT_FLAGS,GO_FLAG_INTERACT_COND); break; } } void SetData(uint32 type, uint32 data) { switch(type) { case DATA_MEATHOOK_EVENT: m_auiEncounter[0] = data; break; case DATA_SALRAMM_EVENT: m_auiEncounter[1] = data; break; case DATA_EPOCH_EVENT: m_auiEncounter[2] = data; break; case DATA_MAL_GANIS_EVENT: m_auiEncounter[3] = data; switch(m_auiEncounter[3]) { case NOT_STARTED: HandleGameObject(uiMalGanisGate2,true); break; case IN_PROGRESS: HandleGameObject(uiMalGanisGate2,false); break; case DONE: HandleGameObject(uiExitGate, true); if (GameObject *pGo = instance->GetGameObject(uiMalGanisChest)) pGo->RemoveFlag(GAMEOBJECT_FLAGS,GO_FLAG_INTERACT_COND); break; } break; case DATA_INFINITE_EVENT: m_auiEncounter[4] = data; break; } if (data == DONE) SaveToDB(); } uint32 GetData(uint32 type) { switch(type) { case DATA_MEATHOOK_EVENT: return m_auiEncounter[0]; case DATA_SALRAMM_EVENT: return m_auiEncounter[1]; case DATA_EPOCH_EVENT: return m_auiEncounter[2]; case DATA_MAL_GANIS_EVENT: return m_auiEncounter[3]; case DATA_INFINITE_EVENT: return m_auiEncounter[4]; } return 0; } uint64 GetData64(uint32 identifier) { switch(identifier) { case DATA_ARTHAS: return uiArthas; case DATA_MEATHOOK: return uiMeathook; case DATA_SALRAMM: return uiSalramm; case DATA_EPOCH: return uiEpoch; case DATA_MAL_GANIS: return uiMalGanis; case DATA_INFINITE: return uiInfinite; case DATA_SHKAF_GATE: return uiShkafGate; case DATA_MAL_GANIS_GATE_1: return uiMalGanisGate1; case DATA_MAL_GANIS_GATE_2: return uiMalGanisGate2; case DATA_EXIT_GATE: return uiExitGate; case DATA_MAL_GANIS_CHEST: return uiMalGanisChest; } return 0; } std::string GetSaveData() { OUT_SAVE_INST_DATA; std::ostringstream saveStream; saveStream << "C S " << m_auiEncounter[0] << " " << m_auiEncounter[1] << " " << m_auiEncounter[2] << " " << m_auiEncounter[3] << " " << m_auiEncounter[4]; str_data = saveStream.str(); OUT_SAVE_INST_DATA_COMPLETE; return str_data; } void Load(const char* in) { if (!in) { OUT_LOAD_INST_DATA_FAIL; return; } OUT_LOAD_INST_DATA(in); char dataHead1, dataHead2; uint16 data0, data1, data2, data3, data4; std::istringstream loadStream(in); loadStream >> dataHead1 >> dataHead2 >> data0 >> data1 >> data2 >> data3 >> data4; if (dataHead1 == 'C' && dataHead2 == 'S') { m_auiEncounter[0] = data0; m_auiEncounter[1] = data1; m_auiEncounter[2] = data2; m_auiEncounter[3] = data3; m_auiEncounter[4] = data4; for (uint8 i = 0; i < MAX_ENCOUNTER; ++i) if (m_auiEncounter[i] == IN_PROGRESS) m_auiEncounter[i] = NOT_STARTED; } else OUT_LOAD_INST_DATA_FAIL; OUT_LOAD_INST_DATA_COMPLETE; } }; InstanceData* GetInstanceData_instance_culling_of_stratholme(Map* pMap) { return new instance_culling_of_stratholme(pMap); } void AddSC_instance_culling_of_stratholme() { Script *newscript; newscript = new Script; newscript->Name = "instance_culling_of_stratholme"; newscript->GetInstanceData = &GetInstanceData_instance_culling_of_stratholme; newscript->RegisterSelf(); }
gpl-2.0
xvisor/xvisor
libs/common/memcpy.c
5
13040
/******************************************************************** ** File: memcpy.c ** ** Copyright (C) 1999-2010 Daniel Vik ** ** This software is provided 'as-is', without any express or implied ** warranty. In no event will the authors be held liable for any ** damages arising from the use of this software. ** Permission is granted to anyone to use this software for any ** purpose, including commercial applications, and to alter it and ** redistribute it freely, subject to the following restrictions: ** ** 1. The origin of this software must not be misrepresented; you ** must not claim that you wrote the original software. If you ** use this software in a product, an acknowledgment in the ** use this software in a product, an acknowledgment in the ** product documentation would be appreciated but is not ** required. ** ** 2. Altered source versions must be plainly marked as such, and ** must not be misrepresented as being the original software. ** ** 3. This notice may not be removed or altered from any source ** distribution. ** ** ** Description: Implementation of the standard library function memcpy. ** This implementation of memcpy() is ANSI-C89 compatible. ** ** The following configuration options can be set: ** ** CONFIG_CPU_LE - Uses processor with little endian ** addressing. Default is big endian. ** Note: This is defined by menuconfig ** ** PRE_INC_PTRS - Use pre increment of pointers. ** Default is post increment of ** pointers. ** ** INDEXED_COPY - Copying data using array indexing. ** Using this option, disables the ** PRE_INC_PTRS option. ** ** CONFIG_64BIT - Compiles memcpy for 64 bit ** architectures ** Note: This is defined by menuconfig ** ** ** Best Settings: ** ** Intel x86: CONFIG_CPU_LE and INDEXED_COPY ** ARM: TBD ** Other processors: TBD ** *******************************************************************/ /******************************************************************** ** Includes *******************************************************************/ #include <vmm_types.h> #include <vmm_host_io.h> #include <vmm_limits.h> #include <vmm_compiler.h> #include <vmm_modules.h> #include <libs/ctype.h> #include <libs/stringlib.h> #include <libs/mathlib.h> #include <stdarg.h> #if !defined(ARCH_HAS_MEMCPY) /******************************************************************** ** Configuration definitions. *******************************************************************/ #define INDEXED_COPY /******************************************************************** ** Typedefs *******************************************************************/ #ifdef CONFIG_64BIT typedef u64 UIntN; #define TYPE_WIDTH 8L #else typedef u32 UIntN; #define TYPE_WIDTH 4L #endif /******************************************************************** ** Remove definitions when INDEXED_COPY is defined. *******************************************************************/ #if defined (INDEXED_COPY) #if defined (PRE_INC_PTRS) #undef PRE_INC_PTRS #endif /*PRE_INC_PTRS*/ #endif /*INDEXED_COPY*/ /******************************************************************** ** Definitions for pre and post increment of pointers. *******************************************************************/ #if defined (PRE_INC_PTRS) #define START_VAL(x) (x)-- #define INC_VAL(x) *++(x) #define CAST_TO_U8(p, o) ((u8*)p + o + TYPE_WIDTH) #define WHILE_DEST_BREAK (TYPE_WIDTH - 1) #define PRE_LOOP_ADJUST - (TYPE_WIDTH - 1) #define PRE_SWITCH_ADJUST + 1 #else /*PRE_INC_PTRS*/ #define START_VAL(x) #define INC_VAL(x) *(x)++ #define CAST_TO_U8(p, o) ((u8*)p + o) #define WHILE_DEST_BREAK 0 #define PRE_LOOP_ADJUST #define PRE_SWITCH_ADJUST #endif /*PRE_INC_PTRS*/ /******************************************************************** ** Definitions for endians *******************************************************************/ #if defined (CONFIG_CPU_LE) #define SHL >> #define SHR << #else /* CONFIG_CPU_LE */ #define SHL << #define SHR >> #endif /* CONFIG_CPU_LE */ /******************************************************************** ** Macros for copying words of different alignment. ** Uses incremening pointers. *******************************************************************/ #define CP_INCR() { \ INC_VAL(dstN) = INC_VAL(srcN); \ } #define CP_INCR_SH(shl, shr) { \ dstWord = srcWord SHL shl; \ srcWord = INC_VAL(srcN); \ dstWord |= srcWord SHR shr; \ INC_VAL(dstN) = dstWord; \ } /******************************************************************** ** Macros for copying words of different alignment. ** Uses array indexes. *******************************************************************/ #define CP_INDEX(idx) { \ dstN[idx] = srcN[idx]; \ } #define CP_INDEX_SH(x, shl, shr) { \ dstWord = srcWord SHL shl; \ srcWord = srcN[x]; \ dstWord |= srcWord SHR shr; \ dstN[x] = dstWord; \ } /******************************************************************** ** Macros for copying words of different alignment. ** Uses incremening pointers or array indexes depending on ** configuration. *******************************************************************/ #if defined (INDEXED_COPY) #define CP(idx) CP_INDEX(idx) #define CP_SH(idx, shl, shr) CP_INDEX_SH(idx, shl, shr) #define INC_INDEX(p, o) ((p) += (o)) #else /* INDEXED_COPY */ #define CP(idx) CP_INCR() #define CP_SH(idx, shl, shr) CP_INCR_SH(shl, shr) #define INC_INDEX(p, o) #endif /* INDEXED_COPY */ #define COPY_REMAINING(count) { \ START_VAL(dst8); \ START_VAL(src8); \ \ switch (count) { \ case 7: INC_VAL(dst8) = INC_VAL(src8); \ case 6: INC_VAL(dst8) = INC_VAL(src8); \ case 5: INC_VAL(dst8) = INC_VAL(src8); \ case 4: INC_VAL(dst8) = INC_VAL(src8); \ case 3: INC_VAL(dst8) = INC_VAL(src8); \ case 2: INC_VAL(dst8) = INC_VAL(src8); \ case 1: INC_VAL(dst8) = INC_VAL(src8); \ case 0: \ default: break; \ } \ } #define COPY_NO_SHIFT() { \ UIntN* dstN = (UIntN*)(dst8 PRE_LOOP_ADJUST); \ UIntN* srcN = (UIntN*)(src8 PRE_LOOP_ADJUST); \ size_t length = count / TYPE_WIDTH; \ \ while (length & 7) { \ CP_INCR(); \ length--; \ } \ \ length /= 8; \ \ while (length--) { \ CP(0); \ CP(1); \ CP(2); \ CP(3); \ CP(4); \ CP(5); \ CP(6); \ CP(7); \ \ INC_INDEX(dstN, 8); \ INC_INDEX(srcN, 8); \ } \ \ src8 = CAST_TO_U8(srcN, 0); \ dst8 = CAST_TO_U8(dstN, 0); \ \ COPY_REMAINING(count & (TYPE_WIDTH - 1)); \ } #define COPY_SHIFT(shift) { \ UIntN* dstN = (UIntN*)((((UIntN)dst8) PRE_LOOP_ADJUST) & \ ~(TYPE_WIDTH - 1)); \ UIntN* srcN = (UIntN*)((((UIntN)src8) PRE_LOOP_ADJUST) & \ ~(TYPE_WIDTH - 1)); \ size_t length = count / TYPE_WIDTH; \ UIntN srcWord = INC_VAL(srcN); \ UIntN dstWord; \ \ while (length & 7) { \ CP_INCR_SH(8 * shift, 8 * (TYPE_WIDTH - shift)); \ length--; \ } \ \ length /= 8; \ \ while (length--) { \ CP_SH(0, 8 * shift, 8 * (TYPE_WIDTH - shift)); \ CP_SH(1, 8 * shift, 8 * (TYPE_WIDTH - shift)); \ CP_SH(2, 8 * shift, 8 * (TYPE_WIDTH - shift)); \ CP_SH(3, 8 * shift, 8 * (TYPE_WIDTH - shift)); \ CP_SH(4, 8 * shift, 8 * (TYPE_WIDTH - shift)); \ CP_SH(5, 8 * shift, 8 * (TYPE_WIDTH - shift)); \ CP_SH(6, 8 * shift, 8 * (TYPE_WIDTH - shift)); \ CP_SH(7, 8 * shift, 8 * (TYPE_WIDTH - shift)); \ \ INC_INDEX(dstN, 8); \ INC_INDEX(srcN, 8); \ } \ \ src8 = CAST_TO_U8(srcN, (shift - TYPE_WIDTH)); \ dst8 = CAST_TO_U8(dstN, 0); \ \ COPY_REMAINING(count & (TYPE_WIDTH - 1)); \ } /******************************************************************** ** ** void *memcpy(void *dest, const void *src, size_t count) ** ** Args: dest - pointer to destination buffer ** src - pointer to source buffer ** count - number of bytes to copy ** ** Return: A pointer to destination buffer ** ** Purpose: Copies count bytes from src to dest. ** No overlap check is performed. ** *******************************************************************/ void *memcpy(void *dest, const void *src, size_t count) { u8* dst8 = (u8*)dest; u8* src8 = (u8*)src; if (count < 8) { COPY_REMAINING(count); return dest; } START_VAL(dst8); START_VAL(src8); while (((UIntN)dst8 & (TYPE_WIDTH - 1)) != WHILE_DEST_BREAK) { INC_VAL(dst8) = INC_VAL(src8); count--; } switch ((((UIntN)src8) PRE_SWITCH_ADJUST) & (TYPE_WIDTH - 1)) { case 0: COPY_NO_SHIFT(); break; case 1: COPY_SHIFT(1); break; case 2: COPY_SHIFT(2); break; case 3: COPY_SHIFT(3); break; #if TYPE_WIDTH > 4 case 4: COPY_SHIFT(4); break; case 5: COPY_SHIFT(5); break; case 6: COPY_SHIFT(6); break; case 7: COPY_SHIFT(7); break; #endif } return dest; } #endif
gpl-2.0
BHSPitMonkey/linwizard-strtrk
arch/um/os-Linux/start_up.c
5
12668
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <sched.h> #include <signal.h> #include <string.h> #include <sys/mman.h> #include <sys/ptrace.h> #include <sys/stat.h> #include <sys/wait.h> #include <asm/unistd.h> #include "init.h" #include "kern_constants.h" #include "os.h" #include "mem_user.h" #include "ptrace_user.h" #include "registers.h" #include "skas_ptrace.h" static int ptrace_child(void) { int ret; /* Calling os_getpid because some libcs cached getpid incorrectly */ int pid = os_getpid(), ppid = getppid(); int sc_result; change_sig(SIGWINCH, 0); if (ptrace(PTRACE_TRACEME, 0, 0, 0) < 0) { perror("ptrace"); kill(pid, SIGKILL); } kill(pid, SIGSTOP); /* * This syscall will be intercepted by the parent. Don't call more than * once, please. */ sc_result = os_getpid(); if (sc_result == pid) /* Nothing modified by the parent, we are running normally. */ ret = 1; else if (sc_result == ppid) /* * Expected in check_ptrace and check_sysemu when they succeed * in modifying the stack frame */ ret = 0; else /* Serious trouble! This could be caused by a bug in host 2.6 * SKAS3/2.6 patch before release -V6, together with a bug in * the UML code itself. */ ret = 2; exit(ret); } static void fatal_perror(const char *str) { perror(str); exit(1); } static void fatal(char *fmt, ...) { va_list list; va_start(list, fmt); vprintf(fmt, list); va_end(list); fflush(stdout); exit(1); } static void non_fatal(char *fmt, ...) { va_list list; va_start(list, fmt); vprintf(fmt, list); va_end(list); fflush(stdout); } static int start_ptraced_child(void) { int pid, n, status; pid = fork(); if (pid == 0) ptrace_child(); else if (pid < 0) fatal_perror("start_ptraced_child : fork failed"); CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); if (n < 0) fatal_perror("check_ptrace : waitpid failed"); if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) fatal("check_ptrace : expected SIGSTOP, got status = %d", status); return pid; } /* When testing for SYSEMU support, if it is one of the broken versions, we * must just avoid using sysemu, not panic, but only if SYSEMU features are * broken. * So only for SYSEMU features we test mustpanic, while normal host features * must work anyway! */ static int stop_ptraced_child(int pid, int exitcode, int mustexit) { int status, n, ret = 0; if (ptrace(PTRACE_CONT, pid, 0, 0) < 0) fatal_perror("stop_ptraced_child : ptrace failed"); CATCH_EINTR(n = waitpid(pid, &status, 0)); if (!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) { int exit_with = WEXITSTATUS(status); if (exit_with == 2) non_fatal("check_ptrace : child exited with status 2. " "\nDisabling SYSEMU support.\n"); non_fatal("check_ptrace : child exited with exitcode %d, while " "expecting %d; status 0x%x\n", exit_with, exitcode, status); if (mustexit) exit(1); ret = -1; } return ret; } /* Changed only during early boot */ int ptrace_faultinfo = 1; int ptrace_ldt = 1; int proc_mm = 1; int skas_needs_stub = 0; static int __init skas0_cmd_param(char *str, int* add) { ptrace_faultinfo = proc_mm = 0; return 0; } /* The two __uml_setup would conflict, without this stupid alias. */ static int __init mode_skas0_cmd_param(char *str, int* add) __attribute__((alias("skas0_cmd_param"))); __uml_setup("skas0", skas0_cmd_param, "skas0\n" " Disables SKAS3 usage, so that SKAS0 is used, unless \n" " you specify mode=tt.\n\n"); __uml_setup("mode=skas0", mode_skas0_cmd_param, "mode=skas0\n" " Disables SKAS3 usage, so that SKAS0 is used, unless you \n" " specify mode=tt. Note that this was recently added - on \n" " older kernels you must use simply \"skas0\".\n\n"); /* Changed only during early boot */ static int force_sysemu_disabled = 0; static int __init nosysemu_cmd_param(char *str, int* add) { force_sysemu_disabled = 1; return 0; } __uml_setup("nosysemu", nosysemu_cmd_param, "nosysemu\n" " Turns off syscall emulation patch for ptrace (SYSEMU) on.\n" " SYSEMU is a performance-patch introduced by Laurent Vivier. It changes\n" " behaviour of ptrace() and helps reducing host context switch rate.\n" " To make it working, you need a kernel patch for your host, too.\n" " See http://perso.wanadoo.fr/laurent.vivier/UML/ for further \n" " information.\n\n"); static void __init check_sysemu(void) { unsigned long regs[MAX_REG_NR]; int pid, n, status, count=0; non_fatal("Checking syscall emulation patch for ptrace..."); sysemu_supported = 0; pid = start_ptraced_child(); if (ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0) goto fail; CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); if (n < 0) fatal_perror("check_sysemu : wait failed"); if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGTRAP)) fatal("check_sysemu : expected SIGTRAP, got status = %d", status); if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0) fatal_perror("check_sysemu : PTRACE_GETREGS failed"); if (PT_SYSCALL_NR(regs) != __NR_getpid) { non_fatal("check_sysemu got system call number %d, " "expected %d...", PT_SYSCALL_NR(regs), __NR_getpid); goto fail; } n = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_RET_OFFSET, os_getpid()); if (n < 0) { non_fatal("check_sysemu : failed to modify system call " "return"); goto fail; } if (stop_ptraced_child(pid, 0, 0) < 0) goto fail_stopped; sysemu_supported = 1; non_fatal("OK\n"); set_using_sysemu(!force_sysemu_disabled); non_fatal("Checking advanced syscall emulation patch for ptrace..."); pid = start_ptraced_child(); if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *) PTRACE_O_TRACESYSGOOD) < 0)) fatal_perror("check_ptrace: PTRACE_OLDSETOPTIONS failed"); while (1) { count++; if (ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0) goto fail; CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); if (n < 0) fatal_perror("check_ptrace : wait failed"); if (WIFSTOPPED(status) && (WSTOPSIG(status) == (SIGTRAP|0x80))) { if (!count) fatal("check_ptrace : SYSEMU_SINGLESTEP " "doesn't singlestep"); n = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_RET_OFFSET, os_getpid()); if (n < 0) fatal_perror("check_sysemu : failed to modify " "system call return"); break; } else if (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGTRAP)) count++; else fatal("check_ptrace : expected SIGTRAP or " "(SIGTRAP | 0x80), got status = %d", status); } if (stop_ptraced_child(pid, 0, 0) < 0) goto fail_stopped; sysemu_supported = 2; non_fatal("OK\n"); if (!force_sysemu_disabled) set_using_sysemu(sysemu_supported); return; fail: stop_ptraced_child(pid, 1, 0); fail_stopped: non_fatal("missing\n"); } static void __init check_ptrace(void) { int pid, syscall, n, status; non_fatal("Checking that ptrace can change system call numbers..."); pid = start_ptraced_child(); if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *) PTRACE_O_TRACESYSGOOD) < 0)) fatal_perror("check_ptrace: PTRACE_OLDSETOPTIONS failed"); while (1) { if (ptrace(PTRACE_SYSCALL, pid, 0, 0) < 0) fatal_perror("check_ptrace : ptrace failed"); CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); if (n < 0) fatal_perror("check_ptrace : wait failed"); if (!WIFSTOPPED(status) || (WSTOPSIG(status) != (SIGTRAP | 0x80))) fatal("check_ptrace : expected (SIGTRAP|0x80), " "got status = %d", status); syscall = ptrace(PTRACE_PEEKUSR, pid, PT_SYSCALL_NR_OFFSET, 0); if (syscall == __NR_getpid) { n = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET, __NR_getppid); if (n < 0) fatal_perror("check_ptrace : failed to modify " "system call"); break; } } stop_ptraced_child(pid, 0, 1); non_fatal("OK\n"); check_sysemu(); } extern void check_tmpexec(void); static void __init check_coredump_limit(void) { struct rlimit lim; int err = getrlimit(RLIMIT_CORE, &lim); if (err) { perror("Getting core dump limit"); return; } printf("Core dump limits :\n\tsoft - "); if (lim.rlim_cur == RLIM_INFINITY) printf("NONE\n"); else printf("%lu\n", lim.rlim_cur); printf("\thard - "); if (lim.rlim_max == RLIM_INFINITY) printf("NONE\n"); else printf("%lu\n", lim.rlim_max); } void __init os_early_checks(void) { int pid; /* Print out the core dump limits early */ check_coredump_limit(); check_ptrace(); /* Need to check this early because mmapping happens before the * kernel is running. */ check_tmpexec(); pid = start_ptraced_child(); if (init_registers(pid)) fatal("Failed to initialize default registers"); stop_ptraced_child(pid, 1, 1); } static int __init noprocmm_cmd_param(char *str, int* add) { proc_mm = 0; return 0; } __uml_setup("noprocmm", noprocmm_cmd_param, "noprocmm\n" " Turns off usage of /proc/mm, even if host supports it.\n" " To support /proc/mm, the host needs to be patched using\n" " the current skas3 patch.\n\n"); static int __init noptracefaultinfo_cmd_param(char *str, int* add) { ptrace_faultinfo = 0; return 0; } __uml_setup("noptracefaultinfo", noptracefaultinfo_cmd_param, "noptracefaultinfo\n" " Turns off usage of PTRACE_FAULTINFO, even if host supports\n" " it. To support PTRACE_FAULTINFO, the host needs to be patched\n" " using the current skas3 patch.\n\n"); static int __init noptraceldt_cmd_param(char *str, int* add) { ptrace_ldt = 0; return 0; } __uml_setup("noptraceldt", noptraceldt_cmd_param, "noptraceldt\n" " Turns off usage of PTRACE_LDT, even if host supports it.\n" " To support PTRACE_LDT, the host needs to be patched using\n" " the current skas3 patch.\n\n"); static inline void check_skas3_ptrace_faultinfo(void) { struct ptrace_faultinfo fi; int pid, n; non_fatal(" - PTRACE_FAULTINFO..."); pid = start_ptraced_child(); n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi); if (n < 0) { ptrace_faultinfo = 0; if (errno == EIO) non_fatal("not found\n"); else perror("not found"); } else { if (!ptrace_faultinfo) non_fatal("found but disabled on command line\n"); else non_fatal("found\n"); } stop_ptraced_child(pid, 1, 1); } static inline void check_skas3_ptrace_ldt(void) { #ifdef PTRACE_LDT int pid, n; unsigned char ldtbuf[40]; struct ptrace_ldt ldt_op = (struct ptrace_ldt) { .func = 2, /* read default ldt */ .ptr = ldtbuf, .bytecount = sizeof(ldtbuf)}; non_fatal(" - PTRACE_LDT..."); pid = start_ptraced_child(); n = ptrace(PTRACE_LDT, pid, 0, (unsigned long) &ldt_op); if (n < 0) { if (errno == EIO) non_fatal("not found\n"); else { perror("not found"); } ptrace_ldt = 0; } else { if (ptrace_ldt) non_fatal("found\n"); else non_fatal("found, but use is disabled\n"); } stop_ptraced_child(pid, 1, 1); #else /* PTRACE_LDT might be disabled via cmdline option. * We want to override this, else we might use the stub * without real need */ ptrace_ldt = 1; #endif } static inline void check_skas3_proc_mm(void) { non_fatal(" - /proc/mm..."); if (access("/proc/mm", W_OK) < 0) { proc_mm = 0; perror("not found"); } else if (!proc_mm) non_fatal("found but disabled on command line\n"); else non_fatal("found\n"); } void can_do_skas(void) { non_fatal("Checking for the skas3 patch in the host:\n"); check_skas3_proc_mm(); check_skas3_ptrace_faultinfo(); check_skas3_ptrace_ldt(); if (!proc_mm || !ptrace_faultinfo || !ptrace_ldt) skas_needs_stub = 1; } int __init parse_iomem(char *str, int *add) { struct iomem_region *new; struct stat64 buf; char *file, *driver; int fd, size; driver = str; file = strchr(str,','); if (file == NULL) { printf("parse_iomem : failed to parse iomem\n"); goto out; } *file = '\0'; file++; fd = open(file, O_RDWR, 0); if (fd < 0) { perror("parse_iomem - Couldn't open io file"); goto out; } if (fstat64(fd, &buf) < 0) { perror("parse_iomem - cannot stat_fd file"); goto out_close; } new = malloc(sizeof(*new)); if (new == NULL) { perror("Couldn't allocate iomem_region struct"); goto out_close; } size = (buf.st_size + UM_KERN_PAGE_SIZE) & ~(UM_KERN_PAGE_SIZE - 1); *new = ((struct iomem_region) { .next = iomem_regions, .driver = driver, .fd = fd, .size = size, .phys = 0, .virt = 0 }); iomem_regions = new; iomem_size += new->size + UM_KERN_PAGE_SIZE; return 0; out_close: close(fd); out: return 1; }
gpl-2.0
PikkonX/T989_ICS_KERNEL-
fs/nfs/nfs4filelayout.c
517
25561
/* * Module for the pnfs nfs4 file layout driver. * Defines all I/O and Policy interface operations, plus code * to register itself with the pNFS client. * * Copyright (c) 2002 * The Regents of the University of Michigan * All Rights Reserved * * Dean Hildebrand <dhildebz@umich.edu> * * Permission is granted to use, copy, create derivative works, and * redistribute this software and such derivative works for any purpose, * so long as the name of the University of Michigan is not used in * any advertising or publicity pertaining to the use or distribution * of this software without specific, written prior authorization. If * the above copyright notice or any other identification of the * University of Michigan is included in any copy of any portion of * this software, then the disclaimer below must also be included. * * This software is provided as is, without representation or warranty * of any kind either express or implied, including without limitation * the implied warranties of merchantability, fitness for a particular * purpose, or noninfringement. The Regents of the University of * Michigan shall not be liable for any damages, including special, * indirect, incidental, or consequential damages, with respect to any * claim arising out of or in connection with the use of the software, * even if it has been or is hereafter advised of the possibility of * such damages. */ #include <linux/nfs_fs.h> #include <linux/nfs_page.h> #include "internal.h" #include "nfs4filelayout.h" #define NFSDBG_FACILITY NFSDBG_PNFS_LD MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dean Hildebrand <dhildebz@umich.edu>"); MODULE_DESCRIPTION("The NFSv4 file layout driver"); #define FILELAYOUT_POLL_RETRY_MAX (15*HZ) static loff_t filelayout_get_dense_offset(struct nfs4_filelayout_segment *flseg, loff_t offset) { u32 stripe_width = flseg->stripe_unit * flseg->dsaddr->stripe_count; u64 tmp; offset -= flseg->pattern_offset; tmp = offset; do_div(tmp, stripe_width); return tmp * flseg->stripe_unit + do_div(offset, flseg->stripe_unit); } /* This function is used by the layout driver to calculate the * offset of the file on the dserver based on whether the * layout type is STRIPE_DENSE or STRIPE_SPARSE */ static loff_t filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset) { struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg); switch (flseg->stripe_type) { case STRIPE_SPARSE: return offset; case STRIPE_DENSE: return filelayout_get_dense_offset(flseg, offset); } BUG(); } /* For data server errors we don't recover from */ static void filelayout_set_lo_fail(struct pnfs_layout_segment *lseg) { if (lseg->pls_range.iomode == IOMODE_RW) { dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__); set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags); } else { dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__); set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags); } } static int filelayout_async_handle_error(struct rpc_task *task, struct nfs4_state *state, struct nfs_client *clp, int *reset) { if (task->tk_status >= 0) return 0; *reset = 0; switch (task->tk_status) { case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_DEADSESSION: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_SEQ_FALSE_RETRY: case -NFS4ERR_SEQ_MISORDERED: dprintk("%s ERROR %d, Reset session. Exchangeid " "flags 0x%x\n", __func__, task->tk_status, clp->cl_exchange_flags); nfs4_schedule_session_recovery(clp->cl_session); break; case -NFS4ERR_DELAY: case -NFS4ERR_GRACE: case -EKEYEXPIRED: rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX); break; case -NFS4ERR_RETRY_UNCACHED_REP: break; default: dprintk("%s DS error. Retry through MDS %d\n", __func__, task->tk_status); *reset = 1; break; } task->tk_status = 0; return -EAGAIN; } /* NFS_PROTO call done callback routines */ static int filelayout_read_done_cb(struct rpc_task *task, struct nfs_read_data *data) { struct nfs_client *clp = data->ds_clp; int reset = 0; dprintk("%s DS read\n", __func__); if (filelayout_async_handle_error(task, data->args.context->state, data->ds_clp, &reset) == -EAGAIN) { dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n", __func__, data->ds_clp, data->ds_clp->cl_session); if (reset) { filelayout_set_lo_fail(data->lseg); nfs4_reset_read(task, data); clp = NFS_SERVER(data->inode)->nfs_client; } nfs_restart_rpc(task, clp); return -EAGAIN; } return 0; } /* * We reference the rpc_cred of the first WRITE that triggers the need for * a LAYOUTCOMMIT, and use it to send the layoutcommit compound. * rfc5661 is not clear about which credential should be used. */ static void filelayout_set_layoutcommit(struct nfs_write_data *wdata) { if (FILELAYOUT_LSEG(wdata->lseg)->commit_through_mds || wdata->res.verf->committed == NFS_FILE_SYNC) return; pnfs_set_layoutcommit(wdata); dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, wdata->inode->i_ino, (unsigned long) NFS_I(wdata->inode)->layout->plh_lwb); } /* * Call ops for the async read/write cases * In the case of dense layouts, the offset needs to be reset to its * original value. */ static void filelayout_read_prepare(struct rpc_task *task, void *data) { struct nfs_read_data *rdata = (struct nfs_read_data *)data; rdata->read_done_cb = filelayout_read_done_cb; if (nfs41_setup_sequence(rdata->ds_clp->cl_session, &rdata->args.seq_args, &rdata->res.seq_res, 0, task)) return; rpc_call_start(task); } static void filelayout_read_call_done(struct rpc_task *task, void *data) { struct nfs_read_data *rdata = (struct nfs_read_data *)data; dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status); /* Note this may cause RPC to be resent */ rdata->mds_ops->rpc_call_done(task, data); } static void filelayout_read_release(void *data) { struct nfs_read_data *rdata = (struct nfs_read_data *)data; rdata->mds_ops->rpc_release(data); } static int filelayout_write_done_cb(struct rpc_task *task, struct nfs_write_data *data) { int reset = 0; if (filelayout_async_handle_error(task, data->args.context->state, data->ds_clp, &reset) == -EAGAIN) { struct nfs_client *clp; dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n", __func__, data->ds_clp, data->ds_clp->cl_session); if (reset) { filelayout_set_lo_fail(data->lseg); nfs4_reset_write(task, data); clp = NFS_SERVER(data->inode)->nfs_client; } else clp = data->ds_clp; nfs_restart_rpc(task, clp); return -EAGAIN; } filelayout_set_layoutcommit(data); return 0; } /* Fake up some data that will cause nfs_commit_release to retry the writes. */ static void prepare_to_resend_writes(struct nfs_write_data *data) { struct nfs_page *first = nfs_list_entry(data->pages.next); data->task.tk_status = 0; memcpy(data->verf.verifier, first->wb_verf.verifier, sizeof(first->wb_verf.verifier)); data->verf.verifier[0]++; /* ensure verifier mismatch */ } static int filelayout_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data) { int reset = 0; if (filelayout_async_handle_error(task, data->args.context->state, data->ds_clp, &reset) == -EAGAIN) { dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n", __func__, data->ds_clp, data->ds_clp->cl_session); if (reset) { prepare_to_resend_writes(data); filelayout_set_lo_fail(data->lseg); } else nfs_restart_rpc(task, data->ds_clp); return -EAGAIN; } return 0; } static void filelayout_write_prepare(struct rpc_task *task, void *data) { struct nfs_write_data *wdata = (struct nfs_write_data *)data; if (nfs41_setup_sequence(wdata->ds_clp->cl_session, &wdata->args.seq_args, &wdata->res.seq_res, 0, task)) return; rpc_call_start(task); } static void filelayout_write_call_done(struct rpc_task *task, void *data) { struct nfs_write_data *wdata = (struct nfs_write_data *)data; /* Note this may cause RPC to be resent */ wdata->mds_ops->rpc_call_done(task, data); } static void filelayout_write_release(void *data) { struct nfs_write_data *wdata = (struct nfs_write_data *)data; wdata->mds_ops->rpc_release(data); } static void filelayout_commit_release(void *data) { struct nfs_write_data *wdata = (struct nfs_write_data *)data; nfs_commit_release_pages(wdata); if (atomic_dec_and_test(&NFS_I(wdata->inode)->commits_outstanding)) nfs_commit_clear_lock(NFS_I(wdata->inode)); nfs_commitdata_release(wdata); } struct rpc_call_ops filelayout_read_call_ops = { .rpc_call_prepare = filelayout_read_prepare, .rpc_call_done = filelayout_read_call_done, .rpc_release = filelayout_read_release, }; struct rpc_call_ops filelayout_write_call_ops = { .rpc_call_prepare = filelayout_write_prepare, .rpc_call_done = filelayout_write_call_done, .rpc_release = filelayout_write_release, }; struct rpc_call_ops filelayout_commit_call_ops = { .rpc_call_prepare = filelayout_write_prepare, .rpc_call_done = filelayout_write_call_done, .rpc_release = filelayout_commit_release, }; static enum pnfs_try_status filelayout_read_pagelist(struct nfs_read_data *data) { struct pnfs_layout_segment *lseg = data->lseg; struct nfs4_pnfs_ds *ds; loff_t offset = data->args.offset; u32 j, idx; struct nfs_fh *fh; int status; dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n", __func__, data->inode->i_ino, data->args.pgbase, (size_t)data->args.count, offset); /* Retrieve the correct rpc_client for the byte range */ j = nfs4_fl_calc_j_index(lseg, offset); idx = nfs4_fl_calc_ds_index(lseg, j); ds = nfs4_fl_prepare_ds(lseg, idx); if (!ds) { /* Either layout fh index faulty, or ds connect failed */ set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags); set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags); return PNFS_NOT_ATTEMPTED; } dprintk("%s USE DS:ip %x %hu\n", __func__, ntohl(ds->ds_ip_addr), ntohs(ds->ds_port)); /* No multipath support. Use first DS */ data->ds_clp = ds->ds_clp; fh = nfs4_fl_select_ds_fh(lseg, j); if (fh) data->args.fh = fh; data->args.offset = filelayout_get_dserver_offset(lseg, offset); data->mds_offset = offset; /* Perform an asynchronous read to ds */ status = nfs_initiate_read(data, ds->ds_clp->cl_rpcclient, &filelayout_read_call_ops); BUG_ON(status != 0); return PNFS_ATTEMPTED; } /* Perform async writes. */ static enum pnfs_try_status filelayout_write_pagelist(struct nfs_write_data *data, int sync) { struct pnfs_layout_segment *lseg = data->lseg; struct nfs4_pnfs_ds *ds; loff_t offset = data->args.offset; u32 j, idx; struct nfs_fh *fh; int status; /* Retrieve the correct rpc_client for the byte range */ j = nfs4_fl_calc_j_index(lseg, offset); idx = nfs4_fl_calc_ds_index(lseg, j); ds = nfs4_fl_prepare_ds(lseg, idx); if (!ds) { printk(KERN_ERR "%s: prepare_ds failed, use MDS\n", __func__); set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags); set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags); return PNFS_NOT_ATTEMPTED; } dprintk("%s ino %lu sync %d req %Zu@%llu DS:%x:%hu\n", __func__, data->inode->i_ino, sync, (size_t) data->args.count, offset, ntohl(ds->ds_ip_addr), ntohs(ds->ds_port)); data->write_done_cb = filelayout_write_done_cb; data->ds_clp = ds->ds_clp; fh = nfs4_fl_select_ds_fh(lseg, j); if (fh) data->args.fh = fh; /* * Get the file offset on the dserver. Set the write offset to * this offset and save the original offset. */ data->args.offset = filelayout_get_dserver_offset(lseg, offset); /* Perform an asynchronous write */ status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient, &filelayout_write_call_ops, sync); BUG_ON(status != 0); return PNFS_ATTEMPTED; } /* * filelayout_check_layout() * * Make sure layout segment parameters are sane WRT the device. * At this point no generic layer initialization of the lseg has occurred, * and nothing has been added to the layout_hdr cache. * */ static int filelayout_check_layout(struct pnfs_layout_hdr *lo, struct nfs4_filelayout_segment *fl, struct nfs4_layoutget_res *lgr, struct nfs4_deviceid *id, gfp_t gfp_flags) { struct nfs4_deviceid_node *d; struct nfs4_file_layout_dsaddr *dsaddr; int status = -EINVAL; struct nfs_server *nfss = NFS_SERVER(lo->plh_inode); dprintk("--> %s\n", __func__); if (fl->pattern_offset > lgr->range.offset) { dprintk("%s pattern_offset %lld too large\n", __func__, fl->pattern_offset); goto out; } if (!fl->stripe_unit || fl->stripe_unit % PAGE_SIZE) { dprintk("%s Invalid stripe unit (%u)\n", __func__, fl->stripe_unit); goto out; } /* find and reference the deviceid */ d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode)->pnfs_curr_ld, NFS_SERVER(lo->plh_inode)->nfs_client, id); if (d == NULL) { dsaddr = get_device_info(lo->plh_inode, id, gfp_flags); if (dsaddr == NULL) goto out; } else dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node); fl->dsaddr = dsaddr; if (fl->first_stripe_index < 0 || fl->first_stripe_index >= dsaddr->stripe_count) { dprintk("%s Bad first_stripe_index %d\n", __func__, fl->first_stripe_index); goto out_put; } if ((fl->stripe_type == STRIPE_SPARSE && fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) || (fl->stripe_type == STRIPE_DENSE && fl->num_fh != dsaddr->stripe_count)) { dprintk("%s num_fh %u not valid for given packing\n", __func__, fl->num_fh); goto out_put; } if (fl->stripe_unit % nfss->rsize || fl->stripe_unit % nfss->wsize) { dprintk("%s Stripe unit (%u) not aligned with rsize %u " "wsize %u\n", __func__, fl->stripe_unit, nfss->rsize, nfss->wsize); } status = 0; out: dprintk("--> %s returns %d\n", __func__, status); return status; out_put: nfs4_fl_put_deviceid(dsaddr); goto out; } static void filelayout_free_fh_array(struct nfs4_filelayout_segment *fl) { int i; for (i = 0; i < fl->num_fh; i++) { if (!fl->fh_array[i]) break; kfree(fl->fh_array[i]); } kfree(fl->fh_array); fl->fh_array = NULL; } static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl) { filelayout_free_fh_array(fl); kfree(fl); } static int filelayout_decode_layout(struct pnfs_layout_hdr *flo, struct nfs4_filelayout_segment *fl, struct nfs4_layoutget_res *lgr, struct nfs4_deviceid *id, gfp_t gfp_flags) { struct xdr_stream stream; struct xdr_buf buf; struct page *scratch; __be32 *p; uint32_t nfl_util; int i; dprintk("%s: set_layout_map Begin\n", __func__); scratch = alloc_page(gfp_flags); if (!scratch) return -ENOMEM; xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len); xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); /* 20 = ufl_util (4), first_stripe_index (4), pattern_offset (8), * num_fh (4) */ p = xdr_inline_decode(&stream, NFS4_DEVICEID4_SIZE + 20); if (unlikely(!p)) goto out_err; memcpy(id, p, sizeof(*id)); p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE); nfs4_print_deviceid(id); nfl_util = be32_to_cpup(p++); if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS) fl->commit_through_mds = 1; if (nfl_util & NFL4_UFLG_DENSE) fl->stripe_type = STRIPE_DENSE; else fl->stripe_type = STRIPE_SPARSE; fl->stripe_unit = nfl_util & ~NFL4_UFLG_MASK; fl->first_stripe_index = be32_to_cpup(p++); p = xdr_decode_hyper(p, &fl->pattern_offset); fl->num_fh = be32_to_cpup(p++); dprintk("%s: nfl_util 0x%X num_fh %u fsi %u po %llu\n", __func__, nfl_util, fl->num_fh, fl->first_stripe_index, fl->pattern_offset); /* Note that a zero value for num_fh is legal for STRIPE_SPARSE. * Futher checking is done in filelayout_check_layout */ if (fl->num_fh < 0 || fl->num_fh > max(NFS4_PNFS_MAX_STRIPE_CNT, NFS4_PNFS_MAX_MULTI_CNT)) goto out_err; if (fl->num_fh > 0) { fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), gfp_flags); if (!fl->fh_array) goto out_err; } for (i = 0; i < fl->num_fh; i++) { /* Do we want to use a mempool here? */ fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags); if (!fl->fh_array[i]) goto out_err_free; p = xdr_inline_decode(&stream, 4); if (unlikely(!p)) goto out_err_free; fl->fh_array[i]->size = be32_to_cpup(p++); if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) { printk(KERN_ERR "Too big fh %d received %d\n", i, fl->fh_array[i]->size); goto out_err_free; } p = xdr_inline_decode(&stream, fl->fh_array[i]->size); if (unlikely(!p)) goto out_err_free; memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size); dprintk("DEBUG: %s: fh len %d\n", __func__, fl->fh_array[i]->size); } __free_page(scratch); return 0; out_err_free: filelayout_free_fh_array(fl); out_err: __free_page(scratch); return -EIO; } static void filelayout_free_lseg(struct pnfs_layout_segment *lseg) { struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); dprintk("--> %s\n", __func__); nfs4_fl_put_deviceid(fl->dsaddr); kfree(fl->commit_buckets); _filelayout_free_lseg(fl); } static struct pnfs_layout_segment * filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags) { struct nfs4_filelayout_segment *fl; int rc; struct nfs4_deviceid id; dprintk("--> %s\n", __func__); fl = kzalloc(sizeof(*fl), gfp_flags); if (!fl) return NULL; rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags); if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) { _filelayout_free_lseg(fl); return NULL; } /* This assumes there is only one IOMODE_RW lseg. What * we really want to do is have a layout_hdr level * dictionary of <multipath_list4, fh> keys, each * associated with a struct list_head, populated by calls * to filelayout_write_pagelist(). * */ if ((!fl->commit_through_mds) && (lgr->range.iomode == IOMODE_RW)) { int i; int size = (fl->stripe_type == STRIPE_SPARSE) ? fl->dsaddr->ds_num : fl->dsaddr->stripe_count; fl->commit_buckets = kcalloc(size, sizeof(struct list_head), gfp_flags); if (!fl->commit_buckets) { filelayout_free_lseg(&fl->generic_hdr); return NULL; } fl->number_of_buckets = size; for (i = 0; i < size; i++) INIT_LIST_HEAD(&fl->commit_buckets[i]); } return &fl->generic_hdr; } /* * filelayout_pg_test(). Called by nfs_can_coalesce_requests() * * return true : coalesce page * return false : don't coalesce page */ bool filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req) { u64 p_stripe, r_stripe; u32 stripe_unit; if (!pnfs_generic_pg_test(pgio, prev, req) || !nfs_generic_pg_test(pgio, prev, req)) return false; if (!pgio->pg_lseg) return 1; p_stripe = (u64)prev->wb_index << PAGE_CACHE_SHIFT; r_stripe = (u64)req->wb_index << PAGE_CACHE_SHIFT; stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit; do_div(p_stripe, stripe_unit); do_div(r_stripe, stripe_unit); return (p_stripe == r_stripe); } static bool filelayout_mark_pnfs_commit(struct pnfs_layout_segment *lseg) { return !FILELAYOUT_LSEG(lseg)->commit_through_mds; } static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j) { if (fl->stripe_type == STRIPE_SPARSE) return nfs4_fl_calc_ds_index(&fl->generic_hdr, j); else return j; } struct list_head *filelayout_choose_commit_list(struct nfs_page *req) { struct pnfs_layout_segment *lseg = req->wb_commit_lseg; struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); u32 i, j; struct list_head *list; /* Note that we are calling nfs4_fl_calc_j_index on each page * that ends up being committed to a data server. An attractive * alternative is to add a field to nfs_write_data and nfs_page * to store the value calculated in filelayout_write_pagelist * and just use that here. */ j = nfs4_fl_calc_j_index(lseg, (loff_t)req->wb_index << PAGE_CACHE_SHIFT); i = select_bucket_index(fl, j); list = &fl->commit_buckets[i]; if (list_empty(list)) { /* Non-empty buckets hold a reference on the lseg */ get_lseg(lseg); } return list; } static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) { struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg); if (flseg->stripe_type == STRIPE_SPARSE) return i; else return nfs4_fl_calc_ds_index(lseg, i); } static struct nfs_fh * select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i) { struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg); if (flseg->stripe_type == STRIPE_SPARSE) { if (flseg->num_fh == 1) i = 0; else if (flseg->num_fh == 0) /* Use the MDS OPEN fh set in nfs_read_rpcsetup */ return NULL; } return flseg->fh_array[i]; } static int filelayout_initiate_commit(struct nfs_write_data *data, int how) { struct pnfs_layout_segment *lseg = data->lseg; struct nfs4_pnfs_ds *ds; u32 idx; struct nfs_fh *fh; idx = calc_ds_index_from_commit(lseg, data->ds_commit_index); ds = nfs4_fl_prepare_ds(lseg, idx); if (!ds) { printk(KERN_ERR "%s: prepare_ds failed, use MDS\n", __func__); set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags); set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags); prepare_to_resend_writes(data); data->mds_ops->rpc_release(data); return -EAGAIN; } dprintk("%s ino %lu, how %d\n", __func__, data->inode->i_ino, how); data->write_done_cb = filelayout_commit_done_cb; data->ds_clp = ds->ds_clp; fh = select_ds_fh_from_commit(lseg, data->ds_commit_index); if (fh) data->args.fh = fh; return nfs_initiate_commit(data, ds->ds_clp->cl_rpcclient, &filelayout_commit_call_ops, how); } /* * This is only useful while we are using whole file layouts. */ static struct pnfs_layout_segment *find_only_write_lseg(struct inode *inode) { struct pnfs_layout_segment *lseg, *rv = NULL; spin_lock(&inode->i_lock); list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) if (lseg->pls_range.iomode == IOMODE_RW) rv = get_lseg(lseg); spin_unlock(&inode->i_lock); return rv; } static int alloc_ds_commits(struct inode *inode, struct list_head *list) { struct pnfs_layout_segment *lseg; struct nfs4_filelayout_segment *fl; struct nfs_write_data *data; int i, j; /* Won't need this when non-whole file layout segments are supported * instead we will use a pnfs_layout_hdr structure */ lseg = find_only_write_lseg(inode); if (!lseg) return 0; fl = FILELAYOUT_LSEG(lseg); for (i = 0; i < fl->number_of_buckets; i++) { if (list_empty(&fl->commit_buckets[i])) continue; data = nfs_commitdata_alloc(); if (!data) goto out_bad; data->ds_commit_index = i; data->lseg = lseg; list_add(&data->pages, list); } put_lseg(lseg); return 0; out_bad: for (j = i; j < fl->number_of_buckets; j++) { if (list_empty(&fl->commit_buckets[i])) continue; nfs_retry_commit(&fl->commit_buckets[i], lseg); put_lseg(lseg); /* associated with emptying bucket */ } put_lseg(lseg); /* Caller will clean up entries put on list */ return -ENOMEM; } /* This follows nfs_commit_list pretty closely */ static int filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages, int how) { struct nfs_write_data *data, *tmp; LIST_HEAD(list); if (!list_empty(mds_pages)) { data = nfs_commitdata_alloc(); if (!data) goto out_bad; data->lseg = NULL; list_add(&data->pages, &list); } if (alloc_ds_commits(inode, &list)) goto out_bad; list_for_each_entry_safe(data, tmp, &list, pages) { list_del_init(&data->pages); atomic_inc(&NFS_I(inode)->commits_outstanding); if (!data->lseg) { nfs_init_commit(data, mds_pages, NULL); nfs_initiate_commit(data, NFS_CLIENT(inode), data->mds_ops, how); } else { nfs_init_commit(data, &FILELAYOUT_LSEG(data->lseg)->commit_buckets[data->ds_commit_index], data->lseg); filelayout_initiate_commit(data, how); } } return 0; out_bad: list_for_each_entry_safe(data, tmp, &list, pages) { nfs_retry_commit(&data->pages, data->lseg); list_del_init(&data->pages); nfs_commit_free(data); } nfs_retry_commit(mds_pages, NULL); nfs_commit_clear_lock(NFS_I(inode)); return -ENOMEM; } static void filelayout_free_deveiceid_node(struct nfs4_deviceid_node *d) { nfs4_fl_free_deviceid(container_of(d, struct nfs4_file_layout_dsaddr, id_node)); } static struct pnfs_layoutdriver_type filelayout_type = { .id = LAYOUT_NFSV4_1_FILES, .name = "LAYOUT_NFSV4_1_FILES", .owner = THIS_MODULE, .alloc_lseg = filelayout_alloc_lseg, .free_lseg = filelayout_free_lseg, .pg_test = filelayout_pg_test, .mark_pnfs_commit = filelayout_mark_pnfs_commit, .choose_commit_list = filelayout_choose_commit_list, .commit_pagelist = filelayout_commit_pagelist, .read_pagelist = filelayout_read_pagelist, .write_pagelist = filelayout_write_pagelist, .free_deviceid_node = filelayout_free_deveiceid_node, }; static int __init nfs4filelayout_init(void) { printk(KERN_INFO "%s: NFSv4 File Layout Driver Registering...\n", __func__); return pnfs_register_layoutdriver(&filelayout_type); } static void __exit nfs4filelayout_exit(void) { printk(KERN_INFO "%s: NFSv4 File Layout Driver Unregistering...\n", __func__); pnfs_unregister_layoutdriver(&filelayout_type); } module_init(nfs4filelayout_init); module_exit(nfs4filelayout_exit);
gpl-2.0
cmenard/kernel_smdk4412
drivers/char/diag/diagmem.c
517
4808
/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/mempool.h> #include <linux/mutex.h> #include <linux/atomic.h> #include "diagchar.h" void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type) { void *buf = NULL; if (pool_type == POOL_TYPE_COPY) { if (driver->diagpool) { mutex_lock(&driver->diagmem_mutex); if (driver->count < driver->poolsize) { atomic_add(1, (atomic_t *)&driver->count); buf = mempool_alloc(driver->diagpool, GFP_ATOMIC); } mutex_unlock(&driver->diagmem_mutex); } } else if (pool_type == POOL_TYPE_HDLC) { if (driver->diag_hdlc_pool) { if (driver->count_hdlc_pool < driver->poolsize_hdlc) { atomic_add(1, (atomic_t *)&driver->count_hdlc_pool); buf = mempool_alloc(driver->diag_hdlc_pool, GFP_ATOMIC); } } } else if (pool_type == POOL_TYPE_WRITE_STRUCT) { if (driver->diag_write_struct_pool) { if (driver->count_write_struct_pool < driver->poolsize_write_struct) { atomic_add(1, (atomic_t *)&driver->count_write_struct_pool); buf = mempool_alloc( driver->diag_write_struct_pool, GFP_ATOMIC); } } } return buf; } void diagmem_exit(struct diagchar_dev *driver, int pool_type) { if (driver->diagpool) { if (driver->count == 0 && driver->ref_count == 0) { mempool_destroy(driver->diagpool); driver->diagpool = NULL; } else if (driver->ref_count == 0 && pool_type == POOL_TYPE_ALL) printk(KERN_ALERT "Unable to destroy COPY mempool"); } if (driver->diag_hdlc_pool) { if (driver->count_hdlc_pool == 0 && driver->ref_count == 0) { mempool_destroy(driver->diag_hdlc_pool); driver->diag_hdlc_pool = NULL; } else if (driver->ref_count == 0 && pool_type == POOL_TYPE_ALL) printk(KERN_ALERT "Unable to destroy HDLC mempool"); } if (driver->diag_write_struct_pool) { /* Free up struct pool ONLY if there are no outstanding transactions(aggregation buffer) with USB */ if (driver->count_write_struct_pool == 0 && driver->count_hdlc_pool == 0 && driver->ref_count == 0) { mempool_destroy(driver->diag_write_struct_pool); driver->diag_write_struct_pool = NULL; } else if (driver->ref_count == 0 && pool_type == POOL_TYPE_ALL) printk(KERN_ALERT "Unable to destroy STRUCT mempool"); } } void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type) { if (pool_type == POOL_TYPE_COPY) { if (driver->diagpool != NULL && driver->count > 0) { mempool_free(buf, driver->diagpool); atomic_add(-1, (atomic_t *)&driver->count); } else pr_err("diag: Attempt to free up DIAG driver " "mempool memory which is already free %d", driver->count); } else if (pool_type == POOL_TYPE_HDLC) { if (driver->diag_hdlc_pool != NULL && driver->count_hdlc_pool > 0) { mempool_free(buf, driver->diag_hdlc_pool); atomic_add(-1, (atomic_t *)&driver->count_hdlc_pool); } else pr_err("diag: Attempt to free up DIAG driver " "HDLC mempool which is already free %d ", driver->count_hdlc_pool); } else if (pool_type == POOL_TYPE_WRITE_STRUCT) { if (driver->diag_write_struct_pool != NULL && driver->count_write_struct_pool > 0) { mempool_free(buf, driver->diag_write_struct_pool); atomic_add(-1, (atomic_t *)&driver->count_write_struct_pool); } else pr_err("diag: Attempt to free up DIAG driver " "USB structure mempool which is already free %d ", driver->count_write_struct_pool); } diagmem_exit(driver, pool_type); } void diagmem_init(struct diagchar_dev *driver) { mutex_init(&driver->diagmem_mutex); if (driver->count == 0) driver->diagpool = mempool_create_kmalloc_pool( driver->poolsize, driver->itemsize); if (driver->count_hdlc_pool == 0) driver->diag_hdlc_pool = mempool_create_kmalloc_pool( driver->poolsize_hdlc, driver->itemsize_hdlc); if (driver->count_write_struct_pool == 0) driver->diag_write_struct_pool = mempool_create_kmalloc_pool( driver->poolsize_write_struct, driver->itemsize_write_struct); if (!driver->diagpool) printk(KERN_INFO "Cannot allocate diag mempool\n"); if (!driver->diag_hdlc_pool) printk(KERN_INFO "Cannot allocate diag HDLC mempool\n"); if (!driver->diag_write_struct_pool) printk(KERN_INFO "Cannot allocate diag USB struct mempool\n"); }
gpl-2.0
fentensoft/kernel_xt701
drivers/ide/ali14xx.c
773
6564
/* * Copyright (C) 1996 Linus Torvalds & author (see below) */ /* * ALI M14xx chipset EIDE controller * * Works for ALI M1439/1443/1445/1487/1489 chipsets. * * Adapted from code developed by derekn@vw.ece.cmu.edu. -ml * Derek's notes follow: * * I think the code should be pretty understandable, * but I'll be happy to (try to) answer questions. * * The critical part is in the setupDrive function. The initRegisters * function doesn't seem to be necessary, but the DOS driver does it, so * I threw it in. * * I've only tested this on my system, which only has one disk. I posted * it to comp.sys.linux.hardware, so maybe some other people will try it * out. * * Derek Noonburg (derekn@ece.cmu.edu) * 95-sep-26 * * Update 96-jul-13: * * I've since upgraded to two disks and a CD-ROM, with no trouble, and * I've also heard from several others who have used it successfully. * This driver appears to work with both the 1443/1445 and the 1487/1489 * chipsets. I've added support for PIO mode 4 for the 1487. This * seems to work just fine on the 1443 also, although I'm not sure it's * advertised as supporting mode 4. (I've been running a WDC AC21200 in * mode 4 for a while now with no trouble.) -Derek */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/ioport.h> #include <linux/blkdev.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/io.h> #define DRV_NAME "ali14xx" /* port addresses for auto-detection */ #define ALI_NUM_PORTS 4 static const int ports[ALI_NUM_PORTS] __initdata = { 0x074, 0x0f4, 0x034, 0x0e4 }; /* register initialization data */ typedef struct { u8 reg, data; } RegInitializer; static const RegInitializer initData[] __initdata = { {0x01, 0x0f}, {0x02, 0x00}, {0x03, 0x00}, {0x04, 0x00}, {0x05, 0x00}, {0x06, 0x00}, {0x07, 0x2b}, {0x0a, 0x0f}, {0x25, 0x00}, {0x26, 0x00}, {0x27, 0x00}, {0x28, 0x00}, {0x29, 0x00}, {0x2a, 0x00}, {0x2f, 0x00}, {0x2b, 0x00}, {0x2c, 0x00}, {0x2d, 0x00}, {0x2e, 0x00}, {0x30, 0x00}, {0x31, 0x00}, {0x32, 0x00}, {0x33, 0x00}, {0x34, 0xff}, {0x35, 0x03}, {0x00, 0x00} }; /* timing parameter registers for each drive */ static struct { u8 reg1, reg2, reg3, reg4; } regTab[4] = { {0x03, 0x26, 0x04, 0x27}, /* drive 0 */ {0x05, 0x28, 0x06, 0x29}, /* drive 1 */ {0x2b, 0x30, 0x2c, 0x31}, /* drive 2 */ {0x2d, 0x32, 0x2e, 0x33}, /* drive 3 */ }; static int basePort; /* base port address */ static int regPort; /* port for register number */ static int dataPort; /* port for register data */ static u8 regOn; /* output to base port to access registers */ static u8 regOff; /* output to base port to close registers */ /*------------------------------------------------------------------------*/ /* * Read a controller register. */ static inline u8 inReg(u8 reg) { outb_p(reg, regPort); return inb(dataPort); } /* * Write a controller register. */ static void outReg(u8 data, u8 reg) { outb_p(reg, regPort); outb_p(data, dataPort); } static DEFINE_SPINLOCK(ali14xx_lock); /* * Set PIO mode for the specified drive. * This function computes timing parameters * and sets controller registers accordingly. */ static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio) { int driveNum; int time1, time2; u8 param1, param2, param3, param4; unsigned long flags; int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50; struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); /* calculate timing, according to PIO mode */ time1 = ide_pio_cycle_time(drive, pio); time2 = t->active; param3 = param1 = (time2 * bus_speed + 999) / 1000; param4 = param2 = (time1 * bus_speed + 999) / 1000 - param1; if (pio < 3) { param3 += 8; param4 += 8; } printk(KERN_DEBUG "%s: PIO mode%d, t1=%dns, t2=%dns, cycles = %d+%d, %d+%d\n", drive->name, pio, time1, time2, param1, param2, param3, param4); /* stuff timing parameters into controller registers */ driveNum = (drive->hwif->index << 1) + (drive->dn & 1); spin_lock_irqsave(&ali14xx_lock, flags); outb_p(regOn, basePort); outReg(param1, regTab[driveNum].reg1); outReg(param2, regTab[driveNum].reg2); outReg(param3, regTab[driveNum].reg3); outReg(param4, regTab[driveNum].reg4); outb_p(regOff, basePort); spin_unlock_irqrestore(&ali14xx_lock, flags); } /* * Auto-detect the IDE controller port. */ static int __init findPort(void) { int i; u8 t; unsigned long flags; local_irq_save(flags); for (i = 0; i < ALI_NUM_PORTS; ++i) { basePort = ports[i]; regOff = inb(basePort); for (regOn = 0x30; regOn <= 0x33; ++regOn) { outb_p(regOn, basePort); if (inb(basePort) == regOn) { regPort = basePort + 4; dataPort = basePort + 8; t = inReg(0) & 0xf0; outb_p(regOff, basePort); local_irq_restore(flags); if (t != 0x50) return 0; return 1; /* success */ } } outb_p(regOff, basePort); } local_irq_restore(flags); return 0; } /* * Initialize controller registers with default values. */ static int __init initRegisters(void) { const RegInitializer *p; u8 t; unsigned long flags; local_irq_save(flags); outb_p(regOn, basePort); for (p = initData; p->reg != 0; ++p) outReg(p->data, p->reg); outb_p(0x01, regPort); t = inb(regPort) & 0x01; outb_p(regOff, basePort); local_irq_restore(flags); return t; } static const struct ide_port_ops ali14xx_port_ops = { .set_pio_mode = ali14xx_set_pio_mode, }; static const struct ide_port_info ali14xx_port_info = { .name = DRV_NAME, .chipset = ide_ali14xx, .port_ops = &ali14xx_port_ops, .host_flags = IDE_HFLAG_NO_DMA, .pio_mask = ATA_PIO4, }; static int __init ali14xx_probe(void) { printk(KERN_DEBUG "ali14xx: base=0x%03x, regOn=0x%02x.\n", basePort, regOn); /* initialize controller registers */ if (!initRegisters()) { printk(KERN_ERR "ali14xx: Chip initialization failed.\n"); return 1; } return ide_legacy_device_add(&ali14xx_port_info, 0); } static int probe_ali14xx; module_param_named(probe, probe_ali14xx, bool, 0); MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets"); static int __init ali14xx_init(void) { if (probe_ali14xx == 0) goto out; /* auto-detect IDE controller port */ if (findPort()) { if (ali14xx_probe()) return -ENODEV; return 0; } printk(KERN_ERR "ali14xx: not found.\n"); out: return -ENODEV; } module_init(ali14xx_init); MODULE_AUTHOR("see local file"); MODULE_DESCRIPTION("support of ALI 14XX IDE chipsets"); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/zte-kernel-msm7x27
drivers/isdn/gigaset/common.c
773
28130
/* * Stuff used by all variants of the driver * * Copyright (c) 2001 by Stefan Eilers, * Hansjoerg Lipp <hjlipp@web.de>, * Tilman Schmidt <tilman@imap.cc>. * * ===================================================================== * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * ===================================================================== */ #include "gigaset.h" #include <linux/module.h> #include <linux/moduleparam.h> /* Version Information */ #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" #define DRIVER_DESC "Driver for Gigaset 307x" #ifdef CONFIG_GIGASET_DEBUG #define DRIVER_DESC_DEBUG " (debug build)" #else #define DRIVER_DESC_DEBUG "" #endif /* Module parameters */ int gigaset_debuglevel; EXPORT_SYMBOL_GPL(gigaset_debuglevel); module_param_named(debug, gigaset_debuglevel, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug, "debug level"); /* driver state flags */ #define VALID_MINOR 0x01 #define VALID_ID 0x02 /** * gigaset_dbg_buffer() - dump data in ASCII and hex for debugging * @level: debugging level. * @msg: message prefix. * @len: number of bytes to dump. * @buf: data to dump. * * If the current debugging level includes one of the bits set in @level, * @len bytes starting at @buf are logged to dmesg at KERN_DEBUG prio, * prefixed by the text @msg. */ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, size_t len, const unsigned char *buf) { unsigned char outbuf[80]; unsigned char c; size_t space = sizeof outbuf - 1; unsigned char *out = outbuf; size_t numin = len; while (numin--) { c = *buf++; if (c == '~' || c == '^' || c == '\\') { if (!space--) break; *out++ = '\\'; } if (c & 0x80) { if (!space--) break; *out++ = '~'; c ^= 0x80; } if (c < 0x20 || c == 0x7f) { if (!space--) break; *out++ = '^'; c ^= 0x40; } if (!space--) break; *out++ = c; } *out = 0; gig_dbg(level, "%s (%u bytes): %s", msg, (unsigned) len, outbuf); } EXPORT_SYMBOL_GPL(gigaset_dbg_buffer); static int setflags(struct cardstate *cs, unsigned flags, unsigned delay) { int r; r = cs->ops->set_modem_ctrl(cs, cs->control_state, flags); cs->control_state = flags; if (r < 0) return r; if (delay) { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(delay * HZ / 1000); } return 0; } int gigaset_enterconfigmode(struct cardstate *cs) { int i, r; cs->control_state = TIOCM_RTS; r = setflags(cs, TIOCM_DTR, 200); if (r < 0) goto error; r = setflags(cs, 0, 200); if (r < 0) goto error; for (i = 0; i < 5; ++i) { r = setflags(cs, TIOCM_RTS, 100); if (r < 0) goto error; r = setflags(cs, 0, 100); if (r < 0) goto error; } r = setflags(cs, TIOCM_RTS|TIOCM_DTR, 800); if (r < 0) goto error; return 0; error: dev_err(cs->dev, "error %d on setuartbits\n", -r); cs->control_state = TIOCM_RTS|TIOCM_DTR; cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS|TIOCM_DTR); return -1; } static int test_timeout(struct at_state_t *at_state) { if (!at_state->timer_expires) return 0; if (--at_state->timer_expires) { gig_dbg(DEBUG_MCMD, "decreased timer of %p to %lu", at_state, at_state->timer_expires); return 0; } gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL, at_state->timer_index, NULL); return 1; } static void timer_tick(unsigned long data) { struct cardstate *cs = (struct cardstate *) data; unsigned long flags; unsigned channel; struct at_state_t *at_state; int timeout = 0; spin_lock_irqsave(&cs->lock, flags); for (channel = 0; channel < cs->channels; ++channel) if (test_timeout(&cs->bcs[channel].at_state)) timeout = 1; if (test_timeout(&cs->at_state)) timeout = 1; list_for_each_entry(at_state, &cs->temp_at_states, list) if (test_timeout(at_state)) timeout = 1; if (cs->running) { mod_timer(&cs->timer, jiffies + msecs_to_jiffies(GIG_TICK)); if (timeout) { gig_dbg(DEBUG_EVENT, "scheduling timeout"); tasklet_schedule(&cs->event_tasklet); } } spin_unlock_irqrestore(&cs->lock, flags); } int gigaset_get_channel(struct bc_state *bcs) { unsigned long flags; spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->use_count || !try_module_get(bcs->cs->driver->owner)) { gig_dbg(DEBUG_CHANNEL, "could not allocate channel %d", bcs->channel); spin_unlock_irqrestore(&bcs->cs->lock, flags); return 0; } ++bcs->use_count; bcs->busy = 1; gig_dbg(DEBUG_CHANNEL, "allocated channel %d", bcs->channel); spin_unlock_irqrestore(&bcs->cs->lock, flags); return 1; } struct bc_state *gigaset_get_free_channel(struct cardstate *cs) { unsigned long flags; int i; spin_lock_irqsave(&cs->lock, flags); if (!try_module_get(cs->driver->owner)) { gig_dbg(DEBUG_CHANNEL, "could not get module for allocating channel"); spin_unlock_irqrestore(&cs->lock, flags); return NULL; } for (i = 0; i < cs->channels; ++i) if (!cs->bcs[i].use_count) { ++cs->bcs[i].use_count; cs->bcs[i].busy = 1; spin_unlock_irqrestore(&cs->lock, flags); gig_dbg(DEBUG_CHANNEL, "allocated channel %d", i); return cs->bcs + i; } module_put(cs->driver->owner); spin_unlock_irqrestore(&cs->lock, flags); gig_dbg(DEBUG_CHANNEL, "no free channel"); return NULL; } void gigaset_free_channel(struct bc_state *bcs) { unsigned long flags; spin_lock_irqsave(&bcs->cs->lock, flags); if (!bcs->busy) { gig_dbg(DEBUG_CHANNEL, "could not free channel %d", bcs->channel); spin_unlock_irqrestore(&bcs->cs->lock, flags); return; } --bcs->use_count; bcs->busy = 0; module_put(bcs->cs->driver->owner); gig_dbg(DEBUG_CHANNEL, "freed channel %d", bcs->channel); spin_unlock_irqrestore(&bcs->cs->lock, flags); } int gigaset_get_channels(struct cardstate *cs) { unsigned long flags; int i; spin_lock_irqsave(&cs->lock, flags); for (i = 0; i < cs->channels; ++i) if (cs->bcs[i].use_count) { spin_unlock_irqrestore(&cs->lock, flags); gig_dbg(DEBUG_CHANNEL, "could not allocate all channels"); return 0; } for (i = 0; i < cs->channels; ++i) ++cs->bcs[i].use_count; spin_unlock_irqrestore(&cs->lock, flags); gig_dbg(DEBUG_CHANNEL, "allocated all channels"); return 1; } void gigaset_free_channels(struct cardstate *cs) { unsigned long flags; int i; gig_dbg(DEBUG_CHANNEL, "unblocking all channels"); spin_lock_irqsave(&cs->lock, flags); for (i = 0; i < cs->channels; ++i) --cs->bcs[i].use_count; spin_unlock_irqrestore(&cs->lock, flags); } void gigaset_block_channels(struct cardstate *cs) { unsigned long flags; int i; gig_dbg(DEBUG_CHANNEL, "blocking all channels"); spin_lock_irqsave(&cs->lock, flags); for (i = 0; i < cs->channels; ++i) ++cs->bcs[i].use_count; spin_unlock_irqrestore(&cs->lock, flags); } static void clear_events(struct cardstate *cs) { struct event_t *ev; unsigned head, tail; unsigned long flags; spin_lock_irqsave(&cs->ev_lock, flags); head = cs->ev_head; tail = cs->ev_tail; while (tail != head) { ev = cs->events + head; kfree(ev->ptr); head = (head + 1) % MAX_EVENTS; } cs->ev_head = tail; spin_unlock_irqrestore(&cs->ev_lock, flags); } /** * gigaset_add_event() - add event to device event queue * @cs: device descriptor structure. * @at_state: connection state structure. * @type: event type. * @ptr: pointer parameter for event. * @parameter: integer parameter for event. * @arg: pointer parameter for event. * * Allocate an event queue entry from the device's event queue, and set it up * with the parameters given. * * Return value: added event */ struct event_t *gigaset_add_event(struct cardstate *cs, struct at_state_t *at_state, int type, void *ptr, int parameter, void *arg) { unsigned long flags; unsigned next, tail; struct event_t *event = NULL; gig_dbg(DEBUG_EVENT, "queueing event %d", type); spin_lock_irqsave(&cs->ev_lock, flags); tail = cs->ev_tail; next = (tail + 1) % MAX_EVENTS; if (unlikely(next == cs->ev_head)) dev_err(cs->dev, "event queue full\n"); else { event = cs->events + tail; event->type = type; event->at_state = at_state; event->cid = -1; event->ptr = ptr; event->arg = arg; event->parameter = parameter; cs->ev_tail = next; } spin_unlock_irqrestore(&cs->ev_lock, flags); return event; } EXPORT_SYMBOL_GPL(gigaset_add_event); static void free_strings(struct at_state_t *at_state) { int i; for (i = 0; i < STR_NUM; ++i) { kfree(at_state->str_var[i]); at_state->str_var[i] = NULL; } } static void clear_at_state(struct at_state_t *at_state) { free_strings(at_state); } static void dealloc_at_states(struct cardstate *cs) { struct at_state_t *cur, *next; list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) { list_del(&cur->list); free_strings(cur); kfree(cur); } } static void gigaset_freebcs(struct bc_state *bcs) { int i; gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel); if (!bcs->cs->ops->freebcshw(bcs)) gig_dbg(DEBUG_INIT, "failed"); gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel); clear_at_state(&bcs->at_state); gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel); dev_kfree_skb(bcs->rx_skb); bcs->rx_skb = NULL; for (i = 0; i < AT_NUM; ++i) { kfree(bcs->commands[i]); bcs->commands[i] = NULL; } } static struct cardstate *alloc_cs(struct gigaset_driver *drv) { unsigned long flags; unsigned i; struct cardstate *cs; struct cardstate *ret = NULL; spin_lock_irqsave(&drv->lock, flags); if (drv->blocked) goto exit; for (i = 0; i < drv->minors; ++i) { cs = drv->cs + i; if (!(cs->flags & VALID_MINOR)) { cs->flags = VALID_MINOR; ret = cs; break; } } exit: spin_unlock_irqrestore(&drv->lock, flags); return ret; } static void free_cs(struct cardstate *cs) { cs->flags = 0; } static void make_valid(struct cardstate *cs, unsigned mask) { unsigned long flags; struct gigaset_driver *drv = cs->driver; spin_lock_irqsave(&drv->lock, flags); cs->flags |= mask; spin_unlock_irqrestore(&drv->lock, flags); } static void make_invalid(struct cardstate *cs, unsigned mask) { unsigned long flags; struct gigaset_driver *drv = cs->driver; spin_lock_irqsave(&drv->lock, flags); cs->flags &= ~mask; spin_unlock_irqrestore(&drv->lock, flags); } /** * gigaset_freecs() - free all associated ressources of a device * @cs: device descriptor structure. * * Stops all tasklets and timers, unregisters the device from all * subsystems it was registered to, deallocates the device structure * @cs and all structures referenced from it. * Operations on the device should be stopped before calling this. */ void gigaset_freecs(struct cardstate *cs) { int i; unsigned long flags; if (!cs) return; mutex_lock(&cs->mutex); if (!cs->bcs) goto f_cs; if (!cs->inbuf) goto f_bcs; spin_lock_irqsave(&cs->lock, flags); cs->running = 0; spin_unlock_irqrestore(&cs->lock, flags); /* event handler and timer are not rescheduled below */ tasklet_kill(&cs->event_tasklet); del_timer_sync(&cs->timer); switch (cs->cs_init) { default: /* clear B channel structures */ for (i = 0; i < cs->channels; ++i) { gig_dbg(DEBUG_INIT, "clearing bcs[%d]", i); gigaset_freebcs(cs->bcs + i); } /* clear device sysfs */ gigaset_free_dev_sysfs(cs); gigaset_if_free(cs); gig_dbg(DEBUG_INIT, "clearing hw"); cs->ops->freecshw(cs); /* fall through */ case 2: /* error in initcshw */ /* Deregister from LL */ make_invalid(cs, VALID_ID); gigaset_isdn_unregdev(cs); /* fall through */ case 1: /* error when registering to LL */ gig_dbg(DEBUG_INIT, "clearing at_state"); clear_at_state(&cs->at_state); dealloc_at_states(cs); /* fall through */ case 0: /* error in basic setup */ clear_events(cs); gig_dbg(DEBUG_INIT, "freeing inbuf"); kfree(cs->inbuf); } f_bcs: gig_dbg(DEBUG_INIT, "freeing bcs[]"); kfree(cs->bcs); f_cs: gig_dbg(DEBUG_INIT, "freeing cs"); mutex_unlock(&cs->mutex); free_cs(cs); } EXPORT_SYMBOL_GPL(gigaset_freecs); void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs, struct cardstate *cs, int cid) { int i; INIT_LIST_HEAD(&at_state->list); at_state->waiting = 0; at_state->getstring = 0; at_state->pending_commands = 0; at_state->timer_expires = 0; at_state->timer_active = 0; at_state->timer_index = 0; at_state->seq_index = 0; at_state->ConState = 0; for (i = 0; i < STR_NUM; ++i) at_state->str_var[i] = NULL; at_state->int_var[VAR_ZDLE] = 0; at_state->int_var[VAR_ZCTP] = -1; at_state->int_var[VAR_ZSAU] = ZSAU_NULL; at_state->cs = cs; at_state->bcs = bcs; at_state->cid = cid; if (!cid) at_state->replystruct = cs->tabnocid; else at_state->replystruct = cs->tabcid; } static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs) /* inbuf->read must be allocated before! */ { inbuf->head = 0; inbuf->tail = 0; inbuf->cs = cs; inbuf->inputstate = INS_command; } /** * gigaset_fill_inbuf() - append received data to input buffer * @inbuf: buffer structure. * @src: received data. * @numbytes: number of bytes received. */ int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, unsigned numbytes) { unsigned n, head, tail, bytesleft; gig_dbg(DEBUG_INTR, "received %u bytes", numbytes); if (!numbytes) return 0; bytesleft = numbytes; tail = inbuf->tail; head = inbuf->head; gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail); while (bytesleft) { if (head > tail) n = head - 1 - tail; else if (head == 0) n = (RBUFSIZE-1) - tail; else n = RBUFSIZE - tail; if (!n) { dev_err(inbuf->cs->dev, "buffer overflow (%u bytes lost)\n", bytesleft); break; } if (n > bytesleft) n = bytesleft; memcpy(inbuf->data + tail, src, n); bytesleft -= n; tail = (tail + n) % RBUFSIZE; src += n; } gig_dbg(DEBUG_INTR, "setting tail to %u", tail); inbuf->tail = tail; return numbytes != bytesleft; } EXPORT_SYMBOL_GPL(gigaset_fill_inbuf); /* Initialize the b-channel structure */ static struct bc_state *gigaset_initbcs(struct bc_state *bcs, struct cardstate *cs, int channel) { int i; bcs->tx_skb = NULL; skb_queue_head_init(&bcs->squeue); bcs->corrupted = 0; bcs->trans_down = 0; bcs->trans_up = 0; gig_dbg(DEBUG_INIT, "setting up bcs[%d]->at_state", channel); gigaset_at_init(&bcs->at_state, bcs, cs, -1); #ifdef CONFIG_GIGASET_DEBUG bcs->emptycount = 0; #endif bcs->rx_bufsize = 0; bcs->rx_skb = NULL; bcs->rx_fcs = PPP_INITFCS; bcs->inputstate = 0; bcs->channel = channel; bcs->cs = cs; bcs->chstate = 0; bcs->use_count = 1; bcs->busy = 0; bcs->ignore = cs->ignoreframes; for (i = 0; i < AT_NUM; ++i) bcs->commands[i] = NULL; spin_lock_init(&bcs->aplock); bcs->ap = NULL; bcs->apconnstate = 0; gig_dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel); if (cs->ops->initbcshw(bcs)) return bcs; gig_dbg(DEBUG_INIT, " failed"); return NULL; } /** * gigaset_initcs() - initialize device structure * @drv: hardware driver the device belongs to * @channels: number of B channels supported by device * @onechannel: !=0 if B channel data and AT commands share one * communication channel (M10x), * ==0 if B channels have separate communication channels (base) * @ignoreframes: number of frames to ignore after setting up B channel * @cidmode: !=0: start in CallID mode * @modulename: name of driver module for LL registration * * Allocate and initialize cardstate structure for Gigaset driver * Calls hardware dependent gigaset_initcshw() function * Calls B channel initialization function gigaset_initbcs() for each B channel * * Return value: * pointer to cardstate structure */ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, int onechannel, int ignoreframes, int cidmode, const char *modulename) { struct cardstate *cs; unsigned long flags; int i; gig_dbg(DEBUG_INIT, "allocating cs"); cs = alloc_cs(drv); if (!cs) { pr_err("maximum number of devices exceeded\n"); return NULL; } gig_dbg(DEBUG_INIT, "allocating bcs[0..%d]", channels - 1); cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL); if (!cs->bcs) { pr_err("out of memory\n"); goto error; } gig_dbg(DEBUG_INIT, "allocating inbuf"); cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL); if (!cs->inbuf) { pr_err("out of memory\n"); goto error; } cs->cs_init = 0; cs->channels = channels; cs->onechannel = onechannel; cs->ignoreframes = ignoreframes; INIT_LIST_HEAD(&cs->temp_at_states); cs->running = 0; init_timer(&cs->timer); /* clear next & prev */ spin_lock_init(&cs->ev_lock); cs->ev_tail = 0; cs->ev_head = 0; tasklet_init(&cs->event_tasklet, gigaset_handle_event, (unsigned long) cs); cs->commands_pending = 0; cs->cur_at_seq = 0; cs->gotfwver = -1; cs->open_count = 0; cs->dev = NULL; cs->tty = NULL; cs->tty_dev = NULL; cs->cidmode = cidmode != 0; cs->tabnocid = gigaset_tab_nocid; cs->tabcid = gigaset_tab_cid; init_waitqueue_head(&cs->waitqueue); cs->waiting = 0; cs->mode = M_UNKNOWN; cs->mstate = MS_UNINITIALIZED; ++cs->cs_init; gig_dbg(DEBUG_INIT, "setting up at_state"); spin_lock_init(&cs->lock); gigaset_at_init(&cs->at_state, NULL, cs, 0); cs->dle = 0; cs->cbytes = 0; gig_dbg(DEBUG_INIT, "setting up inbuf"); gigaset_inbuf_init(cs->inbuf, cs); cs->connected = 0; cs->isdn_up = 0; gig_dbg(DEBUG_INIT, "setting up cmdbuf"); cs->cmdbuf = cs->lastcmdbuf = NULL; spin_lock_init(&cs->cmdlock); cs->curlen = 0; cs->cmdbytes = 0; gig_dbg(DEBUG_INIT, "setting up iif"); if (!gigaset_isdn_regdev(cs, modulename)) { pr_err("error registering ISDN device\n"); goto error; } make_valid(cs, VALID_ID); ++cs->cs_init; gig_dbg(DEBUG_INIT, "setting up hw"); if (!cs->ops->initcshw(cs)) goto error; ++cs->cs_init; /* set up character device */ gigaset_if_init(cs); /* set up device sysfs */ gigaset_init_dev_sysfs(cs); /* set up channel data structures */ for (i = 0; i < channels; ++i) { gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i); if (!gigaset_initbcs(cs->bcs + i, cs, i)) { pr_err("could not allocate channel %d data\n", i); goto error; } } spin_lock_irqsave(&cs->lock, flags); cs->running = 1; spin_unlock_irqrestore(&cs->lock, flags); setup_timer(&cs->timer, timer_tick, (unsigned long) cs); cs->timer.expires = jiffies + msecs_to_jiffies(GIG_TICK); /* FIXME: can jiffies increase too much until the timer is added? * Same problem(?) with mod_timer() in timer_tick(). */ add_timer(&cs->timer); gig_dbg(DEBUG_INIT, "cs initialized"); return cs; error: gig_dbg(DEBUG_INIT, "failed"); gigaset_freecs(cs); return NULL; } EXPORT_SYMBOL_GPL(gigaset_initcs); /* ReInitialize the b-channel structure on hangup */ void gigaset_bcs_reinit(struct bc_state *bcs) { struct sk_buff *skb; struct cardstate *cs = bcs->cs; unsigned long flags; while ((skb = skb_dequeue(&bcs->squeue)) != NULL) dev_kfree_skb(skb); spin_lock_irqsave(&cs->lock, flags); clear_at_state(&bcs->at_state); bcs->at_state.ConState = 0; bcs->at_state.timer_active = 0; bcs->at_state.timer_expires = 0; bcs->at_state.cid = -1; /* No CID defined */ spin_unlock_irqrestore(&cs->lock, flags); bcs->inputstate = 0; #ifdef CONFIG_GIGASET_DEBUG bcs->emptycount = 0; #endif bcs->rx_fcs = PPP_INITFCS; bcs->chstate = 0; bcs->ignore = cs->ignoreframes; dev_kfree_skb(bcs->rx_skb); bcs->rx_skb = NULL; cs->ops->reinitbcshw(bcs); } static void cleanup_cs(struct cardstate *cs) { struct cmdbuf_t *cb, *tcb; int i; unsigned long flags; spin_lock_irqsave(&cs->lock, flags); cs->mode = M_UNKNOWN; cs->mstate = MS_UNINITIALIZED; clear_at_state(&cs->at_state); dealloc_at_states(cs); free_strings(&cs->at_state); gigaset_at_init(&cs->at_state, NULL, cs, 0); cs->inbuf->inputstate = INS_command; cs->inbuf->head = 0; cs->inbuf->tail = 0; cb = cs->cmdbuf; while (cb) { tcb = cb; cb = cb->next; kfree(tcb); } cs->cmdbuf = cs->lastcmdbuf = NULL; cs->curlen = 0; cs->cmdbytes = 0; cs->gotfwver = -1; cs->dle = 0; cs->cur_at_seq = 0; cs->commands_pending = 0; cs->cbytes = 0; spin_unlock_irqrestore(&cs->lock, flags); for (i = 0; i < cs->channels; ++i) { gigaset_freebcs(cs->bcs + i); if (!gigaset_initbcs(cs->bcs + i, cs, i)) pr_err("could not allocate channel %d data\n", i); } if (cs->waiting) { cs->cmd_result = -ENODEV; cs->waiting = 0; wake_up_interruptible(&cs->waitqueue); } } /** * gigaset_start() - start device operations * @cs: device descriptor structure. * * Prepares the device for use by setting up communication parameters, * scheduling an EV_START event to initiate device initialization, and * waiting for completion of the initialization. * * Return value: * 1 - success, 0 - error */ int gigaset_start(struct cardstate *cs) { unsigned long flags; if (mutex_lock_interruptible(&cs->mutex)) return 0; spin_lock_irqsave(&cs->lock, flags); cs->connected = 1; spin_unlock_irqrestore(&cs->lock, flags); if (cs->mstate != MS_LOCKED) { cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR|TIOCM_RTS); cs->ops->baud_rate(cs, B115200); cs->ops->set_line_ctrl(cs, CS8); cs->control_state = TIOCM_DTR|TIOCM_RTS; } cs->waiting = 1; if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) { cs->waiting = 0; goto error; } gigaset_schedule_event(cs); wait_event(cs->waitqueue, !cs->waiting); mutex_unlock(&cs->mutex); return 1; error: mutex_unlock(&cs->mutex); return 0; } EXPORT_SYMBOL_GPL(gigaset_start); /** * gigaset_shutdown() - shut down device operations * @cs: device descriptor structure. * * Deactivates the device by scheduling an EV_SHUTDOWN event and * waiting for completion of the shutdown. * * Return value: * 0 - success, -1 - error (no device associated) */ int gigaset_shutdown(struct cardstate *cs) { mutex_lock(&cs->mutex); if (!(cs->flags & VALID_MINOR)) { mutex_unlock(&cs->mutex); return -1; } cs->waiting = 1; if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) goto exit; gigaset_schedule_event(cs); wait_event(cs->waitqueue, !cs->waiting); cleanup_cs(cs); exit: mutex_unlock(&cs->mutex); return 0; } EXPORT_SYMBOL_GPL(gigaset_shutdown); /** * gigaset_stop() - stop device operations * @cs: device descriptor structure. * * Stops operations on the device by scheduling an EV_STOP event and * waiting for completion of the shutdown. */ void gigaset_stop(struct cardstate *cs) { mutex_lock(&cs->mutex); cs->waiting = 1; if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) goto exit; gigaset_schedule_event(cs); wait_event(cs->waitqueue, !cs->waiting); cleanup_cs(cs); exit: mutex_unlock(&cs->mutex); } EXPORT_SYMBOL_GPL(gigaset_stop); static LIST_HEAD(drivers); static DEFINE_SPINLOCK(driver_lock); struct cardstate *gigaset_get_cs_by_id(int id) { unsigned long flags; struct cardstate *ret = NULL; struct cardstate *cs; struct gigaset_driver *drv; unsigned i; spin_lock_irqsave(&driver_lock, flags); list_for_each_entry(drv, &drivers, list) { spin_lock(&drv->lock); for (i = 0; i < drv->minors; ++i) { cs = drv->cs + i; if ((cs->flags & VALID_ID) && cs->myid == id) { ret = cs; break; } } spin_unlock(&drv->lock); if (ret) break; } spin_unlock_irqrestore(&driver_lock, flags); return ret; } void gigaset_debugdrivers(void) { unsigned long flags; static struct cardstate *cs; struct gigaset_driver *drv; unsigned i; spin_lock_irqsave(&driver_lock, flags); list_for_each_entry(drv, &drivers, list) { gig_dbg(DEBUG_DRIVER, "driver %p", drv); spin_lock(&drv->lock); for (i = 0; i < drv->minors; ++i) { gig_dbg(DEBUG_DRIVER, " index %u", i); cs = drv->cs + i; gig_dbg(DEBUG_DRIVER, " cardstate %p", cs); gig_dbg(DEBUG_DRIVER, " flags 0x%02x", cs->flags); gig_dbg(DEBUG_DRIVER, " minor_index %u", cs->minor_index); gig_dbg(DEBUG_DRIVER, " driver %p", cs->driver); gig_dbg(DEBUG_DRIVER, " i4l id %d", cs->myid); } spin_unlock(&drv->lock); } spin_unlock_irqrestore(&driver_lock, flags); } static struct cardstate *gigaset_get_cs_by_minor(unsigned minor) { unsigned long flags; struct cardstate *ret = NULL; struct gigaset_driver *drv; unsigned index; spin_lock_irqsave(&driver_lock, flags); list_for_each_entry(drv, &drivers, list) { if (minor < drv->minor || minor >= drv->minor + drv->minors) continue; index = minor - drv->minor; spin_lock(&drv->lock); if (drv->cs[index].flags & VALID_MINOR) ret = drv->cs + index; spin_unlock(&drv->lock); if (ret) break; } spin_unlock_irqrestore(&driver_lock, flags); return ret; } struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty) { if (tty->index < 0 || tty->index >= tty->driver->num) return NULL; return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start); } /** * gigaset_freedriver() - free all associated ressources of a driver * @drv: driver descriptor structure. * * Unregisters the driver from the system and deallocates the driver * structure @drv and all structures referenced from it. * All devices should be shut down before calling this. */ void gigaset_freedriver(struct gigaset_driver *drv) { unsigned long flags; spin_lock_irqsave(&driver_lock, flags); list_del(&drv->list); spin_unlock_irqrestore(&driver_lock, flags); gigaset_if_freedriver(drv); kfree(drv->cs); kfree(drv); } EXPORT_SYMBOL_GPL(gigaset_freedriver); /** * gigaset_initdriver() - initialize driver structure * @minor: First minor number * @minors: Number of minors this driver can handle * @procname: Name of the driver * @devname: Name of the device files (prefix without minor number) * * Allocate and initialize gigaset_driver structure. Initialize interface. * * Return value: * Pointer to the gigaset_driver structure on success, NULL on failure. */ struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, const char *procname, const char *devname, const struct gigaset_ops *ops, struct module *owner) { struct gigaset_driver *drv; unsigned long flags; unsigned i; drv = kmalloc(sizeof *drv, GFP_KERNEL); if (!drv) return NULL; drv->have_tty = 0; drv->minor = minor; drv->minors = minors; spin_lock_init(&drv->lock); drv->blocked = 0; drv->ops = ops; drv->owner = owner; INIT_LIST_HEAD(&drv->list); drv->cs = kmalloc(minors * sizeof *drv->cs, GFP_KERNEL); if (!drv->cs) goto error; for (i = 0; i < minors; ++i) { drv->cs[i].flags = 0; drv->cs[i].driver = drv; drv->cs[i].ops = drv->ops; drv->cs[i].minor_index = i; mutex_init(&drv->cs[i].mutex); } gigaset_if_initdriver(drv, procname, devname); spin_lock_irqsave(&driver_lock, flags); list_add(&drv->list, &drivers); spin_unlock_irqrestore(&driver_lock, flags); return drv; error: kfree(drv->cs); kfree(drv); return NULL; } EXPORT_SYMBOL_GPL(gigaset_initdriver); /** * gigaset_blockdriver() - block driver * @drv: driver descriptor structure. * * Prevents the driver from attaching new devices, in preparation for * deregistration. */ void gigaset_blockdriver(struct gigaset_driver *drv) { drv->blocked = 1; } EXPORT_SYMBOL_GPL(gigaset_blockdriver); static int __init gigaset_init_module(void) { /* in accordance with the principle of least astonishment, * setting the 'debug' parameter to 1 activates a sensible * set of default debug levels */ if (gigaset_debuglevel == 1) gigaset_debuglevel = DEBUG_DEFAULT; pr_info(DRIVER_DESC DRIVER_DESC_DEBUG "\n"); gigaset_isdn_regdrv(); return 0; } static void __exit gigaset_exit_module(void) { gigaset_isdn_unregdrv(); } module_init(gigaset_init_module); module_exit(gigaset_exit_module); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
bq/linux-e60q22
drivers/video/offb.c
2053
19513
/* * linux/drivers/video/offb.c -- Open Firmware based frame buffer device * * Copyright (C) 1997 Geert Uytterhoeven * * This driver is partly based on the PowerMac console driver: * * Copyright (C) 1996 Paul Mackerras * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/pci.h> #include <asm/io.h> #ifdef CONFIG_PPC64 #include <asm/pci-bridge.h> #endif #ifdef CONFIG_PPC32 #include <asm/bootx.h> #endif #include "macmodes.h" /* Supported palette hacks */ enum { cmap_unknown, cmap_m64, /* ATI Mach64 */ cmap_r128, /* ATI Rage128 */ cmap_M3A, /* ATI Rage Mobility M3 Head A */ cmap_M3B, /* ATI Rage Mobility M3 Head B */ cmap_radeon, /* ATI Radeon */ cmap_gxt2000, /* IBM GXT2000 */ cmap_avivo, /* ATI R5xx */ }; struct offb_par { volatile void __iomem *cmap_adr; volatile void __iomem *cmap_data; int cmap_type; int blanked; }; struct offb_par default_par; #ifdef CONFIG_PPC32 extern boot_infos_t *boot_infos; #endif /* Definitions used by the Avivo palette hack */ #define AVIVO_DC_LUT_RW_SELECT 0x6480 #define AVIVO_DC_LUT_RW_MODE 0x6484 #define AVIVO_DC_LUT_RW_INDEX 0x6488 #define AVIVO_DC_LUT_SEQ_COLOR 0x648c #define AVIVO_DC_LUT_PWL_DATA 0x6490 #define AVIVO_DC_LUT_30_COLOR 0x6494 #define AVIVO_DC_LUT_READ_PIPE_SELECT 0x6498 #define AVIVO_DC_LUT_WRITE_EN_MASK 0x649c #define AVIVO_DC_LUT_AUTOFILL 0x64a0 #define AVIVO_DC_LUTA_CONTROL 0x64c0 #define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE 0x64c4 #define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN 0x64c8 #define AVIVO_DC_LUTA_BLACK_OFFSET_RED 0x64cc #define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE 0x64d0 #define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN 0x64d4 #define AVIVO_DC_LUTA_WHITE_OFFSET_RED 0x64d8 #define AVIVO_DC_LUTB_CONTROL 0x6cc0 #define AVIVO_DC_LUTB_BLACK_OFFSET_BLUE 0x6cc4 #define AVIVO_DC_LUTB_BLACK_OFFSET_GREEN 0x6cc8 #define AVIVO_DC_LUTB_BLACK_OFFSET_RED 0x6ccc #define AVIVO_DC_LUTB_WHITE_OFFSET_BLUE 0x6cd0 #define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4 #define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8 /* * Set a single color register. The values supplied are already * rounded down to the hardware's capabilities (according to the * entries in the var structure). Return != 0 for invalid regno. */ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct offb_par *par = (struct offb_par *) info->par; if (info->fix.visual == FB_VISUAL_TRUECOLOR) { u32 *pal = info->pseudo_palette; u32 cr = red >> (16 - info->var.red.length); u32 cg = green >> (16 - info->var.green.length); u32 cb = blue >> (16 - info->var.blue.length); u32 value; if (regno >= 16) return -EINVAL; value = (cr << info->var.red.offset) | (cg << info->var.green.offset) | (cb << info->var.blue.offset); if (info->var.transp.length > 0) { u32 mask = (1 << info->var.transp.length) - 1; mask <<= info->var.transp.offset; value |= mask; } pal[regno] = value; return 0; } if (regno > 255) return -EINVAL; red >>= 8; green >>= 8; blue >>= 8; if (!par->cmap_adr) return 0; switch (par->cmap_type) { case cmap_m64: writeb(regno, par->cmap_adr); writeb(red, par->cmap_data); writeb(green, par->cmap_data); writeb(blue, par->cmap_data); break; case cmap_M3A: /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) & ~0x20); case cmap_r128: /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, regno); out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue)); break; case cmap_M3B: /* Set PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) | 0x20); /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, regno); out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue)); break; case cmap_radeon: /* Set palette index & data (could be smarter) */ out_8(par->cmap_adr + 0xb0, regno); out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue)); break; case cmap_gxt2000: out_le32(((unsigned __iomem *) par->cmap_adr) + regno, (red << 16 | green << 8 | blue)); break; case cmap_avivo: /* Write to both LUTs for now */ writel(1, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT); writeb(regno, par->cmap_adr + AVIVO_DC_LUT_RW_INDEX); writel(((red) << 22) | ((green) << 12) | ((blue) << 2), par->cmap_adr + AVIVO_DC_LUT_30_COLOR); writel(0, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT); writeb(regno, par->cmap_adr + AVIVO_DC_LUT_RW_INDEX); writel(((red) << 22) | ((green) << 12) | ((blue) << 2), par->cmap_adr + AVIVO_DC_LUT_30_COLOR); break; } return 0; } /* * Blank the display. */ static int offb_blank(int blank, struct fb_info *info) { struct offb_par *par = (struct offb_par *) info->par; int i, j; if (!par->cmap_adr) return 0; if (!par->blanked) if (!blank) return 0; par->blanked = blank; if (blank) for (i = 0; i < 256; i++) { switch (par->cmap_type) { case cmap_m64: writeb(i, par->cmap_adr); for (j = 0; j < 3; j++) writeb(0, par->cmap_data); break; case cmap_M3A: /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) & ~0x20); case cmap_r128: /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, i); out_le32(par->cmap_adr + 0xb4, 0); break; case cmap_M3B: /* Set PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) | 0x20); /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, i); out_le32(par->cmap_adr + 0xb4, 0); break; case cmap_radeon: out_8(par->cmap_adr + 0xb0, i); out_le32(par->cmap_adr + 0xb4, 0); break; case cmap_gxt2000: out_le32(((unsigned __iomem *) par->cmap_adr) + i, 0); break; case cmap_avivo: writel(1, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT); writeb(i, par->cmap_adr + AVIVO_DC_LUT_RW_INDEX); writel(0, par->cmap_adr + AVIVO_DC_LUT_30_COLOR); writel(0, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT); writeb(i, par->cmap_adr + AVIVO_DC_LUT_RW_INDEX); writel(0, par->cmap_adr + AVIVO_DC_LUT_30_COLOR); break; } } else fb_set_cmap(&info->cmap, info); return 0; } static int offb_set_par(struct fb_info *info) { struct offb_par *par = (struct offb_par *) info->par; /* On avivo, initialize palette control */ if (par->cmap_type == cmap_avivo) { writel(0, par->cmap_adr + AVIVO_DC_LUTA_CONTROL); writel(0, par->cmap_adr + AVIVO_DC_LUTA_BLACK_OFFSET_BLUE); writel(0, par->cmap_adr + AVIVO_DC_LUTA_BLACK_OFFSET_GREEN); writel(0, par->cmap_adr + AVIVO_DC_LUTA_BLACK_OFFSET_RED); writel(0x0000ffff, par->cmap_adr + AVIVO_DC_LUTA_WHITE_OFFSET_BLUE); writel(0x0000ffff, par->cmap_adr + AVIVO_DC_LUTA_WHITE_OFFSET_GREEN); writel(0x0000ffff, par->cmap_adr + AVIVO_DC_LUTA_WHITE_OFFSET_RED); writel(0, par->cmap_adr + AVIVO_DC_LUTB_CONTROL); writel(0, par->cmap_adr + AVIVO_DC_LUTB_BLACK_OFFSET_BLUE); writel(0, par->cmap_adr + AVIVO_DC_LUTB_BLACK_OFFSET_GREEN); writel(0, par->cmap_adr + AVIVO_DC_LUTB_BLACK_OFFSET_RED); writel(0x0000ffff, par->cmap_adr + AVIVO_DC_LUTB_WHITE_OFFSET_BLUE); writel(0x0000ffff, par->cmap_adr + AVIVO_DC_LUTB_WHITE_OFFSET_GREEN); writel(0x0000ffff, par->cmap_adr + AVIVO_DC_LUTB_WHITE_OFFSET_RED); writel(1, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT); writel(0, par->cmap_adr + AVIVO_DC_LUT_RW_MODE); writel(0x0000003f, par->cmap_adr + AVIVO_DC_LUT_WRITE_EN_MASK); writel(0, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT); writel(0, par->cmap_adr + AVIVO_DC_LUT_RW_MODE); writel(0x0000003f, par->cmap_adr + AVIVO_DC_LUT_WRITE_EN_MASK); } return 0; } static void offb_destroy(struct fb_info *info) { if (info->screen_base) iounmap(info->screen_base); release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); framebuffer_release(info); } static struct fb_ops offb_ops = { .owner = THIS_MODULE, .fb_destroy = offb_destroy, .fb_setcolreg = offb_setcolreg, .fb_set_par = offb_set_par, .fb_blank = offb_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static void __iomem *offb_map_reg(struct device_node *np, int index, unsigned long offset, unsigned long size) { const u32 *addrp; u64 asize, taddr; unsigned int flags; addrp = of_get_pci_address(np, index, &asize, &flags); if (addrp == NULL) addrp = of_get_address(np, index, &asize, &flags); if (addrp == NULL) return NULL; if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0) return NULL; if ((offset + size) > asize) return NULL; taddr = of_translate_address(np, addrp); if (taddr == OF_BAD_ADDR) return NULL; return ioremap(taddr + offset, size); } static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp, const char *name, unsigned long address) { struct offb_par *par = (struct offb_par *) info->par; if (dp && !strncmp(name, "ATY,Rage128", 11)) { par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_r128; } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12) || !strncmp(name, "ATY,RageM3p12A", 14))) { par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_M3A; } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) { par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_M3B; } else if (dp && !strncmp(name, "ATY,Rage6", 9)) { par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); if (par->cmap_adr) par->cmap_type = cmap_radeon; } else if (!strncmp(name, "ATY,", 4)) { unsigned long base = address & 0xff000000UL; par->cmap_adr = ioremap(base + 0x7ff000, 0x1000) + 0xcc0; par->cmap_data = par->cmap_adr + 1; par->cmap_type = cmap_m64; } else if (dp && (of_device_is_compatible(dp, "pci1014,b7") || of_device_is_compatible(dp, "pci1014,21c"))) { par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); if (par->cmap_adr) par->cmap_type = cmap_gxt2000; } else if (dp && !strncmp(name, "vga,Display-", 12)) { /* Look for AVIVO initialized by SLOF */ struct device_node *pciparent = of_get_parent(dp); const u32 *vid, *did; vid = of_get_property(pciparent, "vendor-id", NULL); did = of_get_property(pciparent, "device-id", NULL); /* This will match most R5xx */ if (vid && did && *vid == 0x1002 && ((*did >= 0x7100 && *did < 0x7800) || (*did >= 0x9400))) { par->cmap_adr = offb_map_reg(pciparent, 2, 0, 0x10000); if (par->cmap_adr) par->cmap_type = cmap_avivo; } of_node_put(pciparent); } info->fix.visual = (par->cmap_type != cmap_unknown) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_STATIC_PSEUDOCOLOR; } static void __init offb_init_fb(const char *name, const char *full_name, int width, int height, int depth, int pitch, unsigned long address, int foreign_endian, struct device_node *dp) { unsigned long res_size = pitch * height; struct offb_par *par = &default_par; unsigned long res_start = address; struct fb_fix_screeninfo *fix; struct fb_var_screeninfo *var; struct fb_info *info; if (!request_mem_region(res_start, res_size, "offb")) return; printk(KERN_INFO "Using unsupported %dx%d %s at %lx, depth=%d, pitch=%d\n", width, height, name, address, depth, pitch); if (depth != 8 && depth != 15 && depth != 16 && depth != 32) { printk(KERN_ERR "%s: can't use depth = %d\n", full_name, depth); release_mem_region(res_start, res_size); return; } info = framebuffer_alloc(sizeof(u32) * 16, NULL); if (info == 0) { release_mem_region(res_start, res_size); return; } fix = &info->fix; var = &info->var; info->par = par; strcpy(fix->id, "OFfb "); strncat(fix->id, name, sizeof(fix->id) - sizeof("OFfb ")); fix->id[sizeof(fix->id) - 1] = '\0'; var->xres = var->xres_virtual = width; var->yres = var->yres_virtual = height; fix->line_length = pitch; fix->smem_start = address; fix->smem_len = pitch * height; fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; par->cmap_type = cmap_unknown; if (depth == 8) offb_init_palette_hacks(info, dp, name, address); else fix->visual = FB_VISUAL_TRUECOLOR; var->xoffset = var->yoffset = 0; switch (depth) { case 8: var->bits_per_pixel = 8; var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case 15: /* RGB 555 */ var->bits_per_pixel = 16; var->red.offset = 10; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case 16: /* RGB 565 */ var->bits_per_pixel = 16; var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case 32: /* RGB 888 */ var->bits_per_pixel = 32; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; } var->red.msb_right = var->green.msb_right = var->blue.msb_right = var->transp.msb_right = 0; var->grayscale = 0; var->nonstd = 0; var->activate = 0; var->height = var->width = -1; var->pixclock = 10000; var->left_margin = var->right_margin = 16; var->upper_margin = var->lower_margin = 16; var->hsync_len = var->vsync_len = 8; var->sync = 0; var->vmode = FB_VMODE_NONINTERLACED; /* set offb aperture size for generic probing */ info->apertures = alloc_apertures(1); if (!info->apertures) goto out_aper; info->apertures->ranges[0].base = address; info->apertures->ranges[0].size = fix->smem_len; info->fbops = &offb_ops; info->screen_base = ioremap(address, fix->smem_len); info->pseudo_palette = (void *) (info + 1); info->flags = FBINFO_DEFAULT | FBINFO_MISC_FIRMWARE | foreign_endian; fb_alloc_cmap(&info->cmap, 256, 0); if (register_framebuffer(info) < 0) goto out_err; printk(KERN_INFO "fb%d: Open Firmware frame buffer device on %s\n", info->node, full_name); return; out_err: iounmap(info->screen_base); out_aper: iounmap(par->cmap_adr); par->cmap_adr = NULL; framebuffer_release(info); release_mem_region(res_start, res_size); } static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) { unsigned int len; int i, width = 640, height = 480, depth = 8, pitch = 640; unsigned int flags, rsize, addr_prop = 0; unsigned long max_size = 0; u64 rstart, address = OF_BAD_ADDR; const u32 *pp, *addrp, *up; u64 asize; int foreign_endian = 0; #ifdef __BIG_ENDIAN if (of_get_property(dp, "little-endian", NULL)) foreign_endian = FBINFO_FOREIGN_ENDIAN; #else if (of_get_property(dp, "big-endian", NULL)) foreign_endian = FBINFO_FOREIGN_ENDIAN; #endif pp = of_get_property(dp, "linux,bootx-depth", &len); if (pp == NULL) pp = of_get_property(dp, "depth", &len); if (pp && len == sizeof(u32)) depth = *pp; pp = of_get_property(dp, "linux,bootx-width", &len); if (pp == NULL) pp = of_get_property(dp, "width", &len); if (pp && len == sizeof(u32)) width = *pp; pp = of_get_property(dp, "linux,bootx-height", &len); if (pp == NULL) pp = of_get_property(dp, "height", &len); if (pp && len == sizeof(u32)) height = *pp; pp = of_get_property(dp, "linux,bootx-linebytes", &len); if (pp == NULL) pp = of_get_property(dp, "linebytes", &len); if (pp && len == sizeof(u32) && (*pp != 0xffffffffu)) pitch = *pp; else pitch = width * ((depth + 7) / 8); rsize = (unsigned long)pitch * (unsigned long)height; /* Ok, now we try to figure out the address of the framebuffer. * * Unfortunately, Open Firmware doesn't provide a standard way to do * so. All we can do is a dodgy heuristic that happens to work in * practice. On most machines, the "address" property contains what * we need, though not on Matrox cards found in IBM machines. What I've * found that appears to give good results is to go through the PCI * ranges and pick one that is both big enough and if possible encloses * the "address" property. If none match, we pick the biggest */ up = of_get_property(dp, "linux,bootx-addr", &len); if (up == NULL) up = of_get_property(dp, "address", &len); if (up && len == sizeof(u32)) addr_prop = *up; /* Hack for when BootX is passing us */ if (no_real_node) goto skip_addr; for (i = 0; (addrp = of_get_address(dp, i, &asize, &flags)) != NULL; i++) { int match_addrp = 0; if (!(flags & IORESOURCE_MEM)) continue; if (asize < rsize) continue; rstart = of_translate_address(dp, addrp); if (rstart == OF_BAD_ADDR) continue; if (addr_prop && (rstart <= addr_prop) && ((rstart + asize) >= (addr_prop + rsize))) match_addrp = 1; if (match_addrp) { address = addr_prop; break; } if (rsize > max_size) { max_size = rsize; address = OF_BAD_ADDR; } if (address == OF_BAD_ADDR) address = rstart; } skip_addr: if (address == OF_BAD_ADDR && addr_prop) address = (u64)addr_prop; if (address != OF_BAD_ADDR) { /* kludge for valkyrie */ if (strcmp(dp->name, "valkyrie") == 0) address += 0x1000; offb_init_fb(no_real_node ? "bootx" : dp->name, no_real_node ? "display" : dp->full_name, width, height, depth, pitch, address, foreign_endian, no_real_node ? NULL : dp); } } static int __init offb_init(void) { struct device_node *dp = NULL, *boot_disp = NULL; if (fb_get_options("offb", NULL)) return -ENODEV; /* Check if we have a MacOS display without a node spec */ if (of_get_property(of_chosen, "linux,bootx-noscreen", NULL) != NULL) { /* The old code tried to work out which node was the MacOS * display based on the address. I'm dropping that since the * lack of a node spec only happens with old BootX versions * (users can update) and with this code, they'll still get * a display (just not the palette hacks). */ offb_init_nodriver(of_chosen, 1); } for (dp = NULL; (dp = of_find_node_by_type(dp, "display"));) { if (of_get_property(dp, "linux,opened", NULL) && of_get_property(dp, "linux,boot-display", NULL)) { boot_disp = dp; offb_init_nodriver(dp, 0); } } for (dp = NULL; (dp = of_find_node_by_type(dp, "display"));) { if (of_get_property(dp, "linux,opened", NULL) && dp != boot_disp) offb_init_nodriver(dp, 0); } return 0; } module_init(offb_init); MODULE_LICENSE("GPL");
gpl-2.0
SomethingExplosive/android_kernel_samsung_tuna
arch/arm/mach-vt8500/bv07.c
2821
2186
/* * arch/arm/mach-vt8500/bv07.c * * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/io.h> #include <linux/pm.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "devices.h" static void __iomem *pmc_hiber; static struct platform_device *devices[] __initdata = { &vt8500_device_uart0, &vt8500_device_lcdc, &vt8500_device_ehci, &vt8500_device_ge_rops, &vt8500_device_pwm, &vt8500_device_pwmbl, &vt8500_device_rtc, }; static void vt8500_power_off(void) { local_irq_disable(); writew(5, pmc_hiber); asm("mcr%? p15, 0, %0, c7, c0, 4" : : "r" (0)); } void __init bv07_init(void) { #ifdef CONFIG_FB_VT8500 void __iomem *gpio_mux_reg = ioremap(wmt_gpio_base + 0x200, 4); if (gpio_mux_reg) { writel(readl(gpio_mux_reg) | 1, gpio_mux_reg); iounmap(gpio_mux_reg); } else { printk(KERN_ERR "Could not remap the GPIO mux register, display may not work properly!\n"); } #endif pmc_hiber = ioremap(wmt_pmc_base + 0x12, 2); if (pmc_hiber) pm_power_off = &vt8500_power_off; else printk(KERN_ERR "PMC Hibernation register could not be remapped, not enabling power off!\n"); vt8500_set_resources(); platform_add_devices(devices, ARRAY_SIZE(devices)); vt8500_gpio_init(); } MACHINE_START(BV07, "Benign BV07 Mini Netbook") .boot_params = 0x00000100, .reserve = vt8500_reserve_mem, .map_io = vt8500_map_io, .init_irq = vt8500_init_irq, .timer = &vt8500_timer, .init_machine = bv07_init, MACHINE_END
gpl-2.0
aka-mccloud/ployer-momo7-kernel
arch/arm/mach-mxs/pm.c
3077
1068
/* * Copyright (C) 2010 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/suspend.h> #include <linux/io.h> #include <mach/system.h> static int mxs_suspend_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_MEM: arch_idle(); break; default: return -EINVAL; } return 0; } static struct platform_suspend_ops mxs_suspend_ops = { .enter = mxs_suspend_enter, .valid = suspend_valid_only_mem, }; static int __init mxs_pm_init(void) { suspend_set_ops(&mxs_suspend_ops); return 0; } device_initcall(mxs_pm_init);
gpl-2.0
kbc-developers/kernel_samsung_exynos4412
drivers/media/dvb/dvb-usb/gp8psk.c
3077
9105
/* DVB USB compliant Linux driver for the * - GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module * * Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com) * Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com) * * Thanks to GENPIX for the sample code used to implement this module. * * This module is based off the vp7045 and vp702x modules * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information */ #include "gp8psk.h" /* debug */ static char bcm4500_firmware[] = "dvb-usb-gp8psk-02.fw"; int dvb_usb_gp8psk_debug; module_param_named(debug,dvb_usb_gp8psk_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2,rc=4 (or-able))." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers) { return (gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6)); } static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers) { return (gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1)); } static void gp8psk_info(struct dvb_usb_device *d) { u8 fpga_vers, fw_vers[6]; if (!gp8psk_get_fw_version(d, fw_vers)) info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i", fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers), 2000 + fw_vers[5], fw_vers[4], fw_vers[3]); else info("failed to get FW version"); if (!gp8psk_get_fpga_version(d, &fpga_vers)) info("FPGA Version = %i", fpga_vers); else info("failed to get FPGA version"); } int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { int ret = 0,try = 0; if ((ret = mutex_lock_interruptible(&d->usb_mutex))) return ret; while (ret >= 0 && ret != blen && try < 3) { ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev,0), req, USB_TYPE_VENDOR | USB_DIR_IN, value,index,b,blen, 2000); deb_info("reading number %d (ret: %d)\n",try,ret); try++; } if (ret < 0 || ret != blen) { warn("usb in %d operation failed.", req); ret = -EIO; } else ret = 0; deb_xfer("in: req. %x, val: %x, ind: %x, buffer: ",req,value,index); debug_dump(b,blen,deb_xfer); mutex_unlock(&d->usb_mutex); return ret; } int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { int ret; deb_xfer("out: req. %x, val: %x, ind: %x, buffer: ",req,value,index); debug_dump(b,blen,deb_xfer); if ((ret = mutex_lock_interruptible(&d->usb_mutex))) return ret; if (usb_control_msg(d->udev, usb_sndctrlpipe(d->udev,0), req, USB_TYPE_VENDOR | USB_DIR_OUT, value,index,b,blen, 2000) != blen) { warn("usb out operation failed."); ret = -EIO; } else ret = 0; mutex_unlock(&d->usb_mutex); return ret; } static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d) { int ret; const struct firmware *fw = NULL; const u8 *ptr; u8 *buf; if ((ret = request_firmware(&fw, bcm4500_firmware, &d->udev->dev)) != 0) { err("did not find the bcm4500 firmware file. (%s) " "Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)", bcm4500_firmware,ret); return ret; } ret = -EINVAL; if (gp8psk_usb_out_op(d, LOAD_BCM4500,1,0,NULL, 0)) goto out_rel_fw; info("downloading bcm4500 firmware from file '%s'",bcm4500_firmware); ptr = fw->data; buf = kmalloc(64, GFP_KERNEL | GFP_DMA); if (!buf) { ret = -ENOMEM; goto out_rel_fw; } while (ptr[0] != 0xff) { u16 buflen = ptr[0] + 4; if (ptr + buflen >= fw->data + fw->size) { err("failed to load bcm4500 firmware."); goto out_free; } memcpy(buf, ptr, buflen); if (dvb_usb_generic_write(d, buf, buflen)) { err("failed to load bcm4500 firmware."); goto out_free; } ptr += buflen; } ret = 0; out_free: kfree(buf); out_rel_fw: release_firmware(fw); return ret; } static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff) { u8 status, buf; int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct); if (onoff) { gp8psk_usb_in_op(d, GET_8PSK_CONFIG,0,0,&status,1); if (! (status & bm8pskStarted)) { /* started */ if(gp_product_id == USB_PID_GENPIX_SKYWALKER_CW3K) gp8psk_usb_out_op(d, CW3K_INIT, 1, 0, NULL, 0); if (gp8psk_usb_in_op(d, BOOT_8PSK, 1, 0, &buf, 1)) return -EINVAL; gp8psk_info(d); } if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM) if (! (status & bm8pskFW_Loaded)) /* BCM4500 firmware loaded */ if(gp8psk_load_bcm4500fw(d)) return -EINVAL; if (! (status & bmIntersilOn)) /* LNB Power */ if (gp8psk_usb_in_op(d, START_INTERSIL, 1, 0, &buf, 1)) return -EINVAL; /* Set DVB mode to 1 */ if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM) if (gp8psk_usb_out_op(d, SET_DVB_MODE, 1, 0, NULL, 0)) return -EINVAL; /* Abort possible TS (if previous tune crashed) */ if (gp8psk_usb_out_op(d, ARM_TRANSFER, 0, 0, NULL, 0)) return -EINVAL; } else { /* Turn off LNB power */ if (gp8psk_usb_in_op(d, START_INTERSIL, 0, 0, &buf, 1)) return -EINVAL; /* Turn off 8psk power */ if (gp8psk_usb_in_op(d, BOOT_8PSK, 0, 0, &buf, 1)) return -EINVAL; if(gp_product_id == USB_PID_GENPIX_SKYWALKER_CW3K) gp8psk_usb_out_op(d, CW3K_INIT, 0, 0, NULL, 0); } return 0; } int gp8psk_bcm4500_reload(struct dvb_usb_device *d) { u8 buf; int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct); /* Turn off 8psk power */ if (gp8psk_usb_in_op(d, BOOT_8PSK, 0, 0, &buf, 1)) return -EINVAL; /* Turn On 8psk power */ if (gp8psk_usb_in_op(d, BOOT_8PSK, 1, 0, &buf, 1)) return -EINVAL; /* load BCM4500 firmware */ if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM) if (gp8psk_load_bcm4500fw(d)) return -EINVAL; return 0; } static int gp8psk_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { return gp8psk_usb_out_op(adap->dev, ARM_TRANSFER, onoff, 0 , NULL, 0); } static int gp8psk_frontend_attach(struct dvb_usb_adapter *adap) { adap->fe = gp8psk_fe_attach(adap->dev); return 0; } static struct dvb_usb_device_properties gp8psk_properties; static int gp8psk_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret; struct usb_device *udev = interface_to_usbdev(intf); ret = dvb_usb_device_init(intf, &gp8psk_properties, THIS_MODULE, NULL, adapter_nr); if (ret == 0) { info("found Genpix USB device pID = %x (hex)", le16_to_cpu(udev->descriptor.idProduct)); } return ret; } static struct usb_device_id gp8psk_usb_table [] = { { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_8PSK_REV_1_COLD) }, { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_8PSK_REV_1_WARM) }, { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_8PSK_REV_2) }, { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_1) }, { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_2) }, /* { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_CW3K) }, */ { 0 }, }; MODULE_DEVICE_TABLE(usb, gp8psk_usb_table); static struct dvb_usb_device_properties gp8psk_properties = { .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-gp8psk-01.fw", .num_adapters = 1, .adapter = { { .streaming_ctrl = gp8psk_streaming_ctrl, .frontend_attach = gp8psk_frontend_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 7, .endpoint = 0x82, .u = { .bulk = { .buffersize = 8192, } } }, } }, .power_ctrl = gp8psk_power_ctrl, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 4, .devices = { { .name = "Genpix 8PSK-to-USB2 Rev.1 DVB-S receiver", .cold_ids = { &gp8psk_usb_table[0], NULL }, .warm_ids = { &gp8psk_usb_table[1], NULL }, }, { .name = "Genpix 8PSK-to-USB2 Rev.2 DVB-S receiver", .cold_ids = { NULL }, .warm_ids = { &gp8psk_usb_table[2], NULL }, }, { .name = "Genpix SkyWalker-1 DVB-S receiver", .cold_ids = { NULL }, .warm_ids = { &gp8psk_usb_table[3], NULL }, }, { .name = "Genpix SkyWalker-2 DVB-S receiver", .cold_ids = { NULL }, .warm_ids = { &gp8psk_usb_table[4], NULL }, }, { NULL }, } }; /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver gp8psk_usb_driver = { .name = "dvb_usb_gp8psk", .probe = gp8psk_usb_probe, .disconnect = dvb_usb_device_exit, .id_table = gp8psk_usb_table, }; /* module stuff */ static int __init gp8psk_usb_module_init(void) { int result; if ((result = usb_register(&gp8psk_usb_driver))) { err("usb_register failed. (%d)",result); return result; } return 0; } static void __exit gp8psk_usb_module_exit(void) { /* deregister this driver from the USB subsystem */ usb_deregister(&gp8psk_usb_driver); } module_init(gp8psk_usb_module_init); module_exit(gp8psk_usb_module_exit); MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>"); MODULE_DESCRIPTION("Driver for Genpix DVB-S"); MODULE_VERSION("1.1"); MODULE_LICENSE("GPL");
gpl-2.0
vantinh1991/F240L-JB
arch/tile/kernel/stack.c
4613
13777
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/module.h> #include <linux/pfn.h> #include <linux/kallsyms.h> #include <linux/stacktrace.h> #include <linux/uaccess.h> #include <linux/mmzone.h> #include <linux/dcache.h> #include <linux/fs.h> #include <asm/backtrace.h> #include <asm/page.h> #include <asm/ucontext.h> #include <asm/switch_to.h> #include <asm/sigframe.h> #include <asm/stack.h> #include <arch/abi.h> #include <arch/interrupts.h> #define KBT_ONGOING 0 /* Backtrace still ongoing */ #define KBT_DONE 1 /* Backtrace cleanly completed */ #define KBT_RUNNING 2 /* Can't run backtrace on a running task */ #define KBT_LOOP 3 /* Backtrace entered a loop */ /* Is address on the specified kernel stack? */ static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp) { ulong kstack_base = (ulong) kbt->task->stack; if (kstack_base == 0) /* corrupt task pointer; just follow stack... */ return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory; return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; } /* Callback for backtracer; basically a glorified memcpy */ static bool read_memory_func(void *result, unsigned long address, unsigned int size, void *vkbt) { int retval; struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; if (address == 0) return 0; if (__kernel_text_address(address)) { /* OK to read kernel code. */ } else if (address >= PAGE_OFFSET) { /* We only tolerate kernel-space reads of this task's stack */ if (!in_kernel_stack(kbt, address)) return 0; } else if (!kbt->is_current) { return 0; /* can't read from other user address spaces */ } pagefault_disable(); retval = __copy_from_user_inatomic(result, (void __user __force *)address, size); pagefault_enable(); return (retval == 0); } /* Return a pt_regs pointer for a valid fault handler frame */ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) { const char *fault = NULL; /* happy compiler */ char fault_buf[64]; unsigned long sp = kbt->it.sp; struct pt_regs *p; if (sp % sizeof(long) != 0) return NULL; if (!in_kernel_stack(kbt, sp)) return NULL; if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) return NULL; p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE); if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN) fault = "syscall"; else { if (kbt->verbose) { /* else we aren't going to use it */ snprintf(fault_buf, sizeof(fault_buf), "interrupt %ld", p->faultnum); fault = fault_buf; } } if (EX1_PL(p->ex1) == KERNEL_PL && __kernel_text_address(p->pc) && in_kernel_stack(kbt, p->sp) && p->sp >= sp) { if (kbt->verbose) pr_err(" <%s while in kernel mode>\n", fault); } else if (EX1_PL(p->ex1) == USER_PL && p->pc < PAGE_OFFSET && p->sp < PAGE_OFFSET) { if (kbt->verbose) pr_err(" <%s while in user mode>\n", fault); } else if (kbt->verbose) { pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n", p->pc, p->sp, p->ex1); p = NULL; } if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) return p; return NULL; } /* Is the pc pointing to a sigreturn trampoline? */ static int is_sigreturn(unsigned long pc) { return (pc == VDSO_BASE); } /* Return a pt_regs pointer for a valid signal handler frame */ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt, struct rt_sigframe* kframe) { BacktraceIterator *b = &kbt->it; if (b->pc == VDSO_BASE && b->sp < PAGE_OFFSET && b->sp % sizeof(long) == 0) { int retval; pagefault_disable(); retval = __copy_from_user_inatomic( kframe, (void __user __force *)b->sp, sizeof(*kframe)); pagefault_enable(); if (retval != 0 || (unsigned int)(kframe->info.si_signo) >= _NSIG) return NULL; if (kbt->verbose) { pr_err(" <received signal %d>\n", kframe->info.si_signo); } return (struct pt_regs *)&kframe->uc.uc_mcontext; } return NULL; } static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt) { return is_sigreturn(kbt->it.pc); } static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) { struct pt_regs *p; struct rt_sigframe kframe; p = valid_fault_handler(kbt); if (p == NULL) p = valid_sigframe(kbt, &kframe); if (p == NULL) return 0; backtrace_init(&kbt->it, read_memory_func, kbt, p->pc, p->lr, p->sp, p->regs[52]); kbt->new_context = 1; return 1; } /* Find a frame that isn't a sigreturn, if there is one. */ static int KBacktraceIterator_next_item_inclusive( struct KBacktraceIterator *kbt) { for (;;) { do { if (!KBacktraceIterator_is_sigreturn(kbt)) return KBT_ONGOING; } while (backtrace_next(&kbt->it)); if (!KBacktraceIterator_restart(kbt)) return KBT_DONE; } } /* * If the current sp is on a page different than what we recorded * as the top-of-kernel-stack last time we context switched, we have * probably blown the stack, and nothing is going to work out well. * If we can at least get out a warning, that may help the debug, * though we probably won't be able to backtrace into the code that * actually did the recursive damage. */ static void validate_stack(struct pt_regs *regs) { int cpu = smp_processor_id(); unsigned long ksp0 = get_current_ksp0(); unsigned long ksp0_base = ksp0 - THREAD_SIZE; unsigned long sp = stack_pointer; if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) { pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n" " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); } else if (sp < ksp0_base + sizeof(struct thread_info)) { pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n" " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); } } void KBacktraceIterator_init(struct KBacktraceIterator *kbt, struct task_struct *t, struct pt_regs *regs) { unsigned long pc, lr, sp, r52; int is_current; /* * Set up callback information. We grab the kernel stack base * so we will allow reads of that address range. */ is_current = (t == NULL || t == current); kbt->is_current = is_current; if (is_current) t = validate_current(); kbt->task = t; kbt->verbose = 0; /* override in caller if desired */ kbt->profile = 0; /* override in caller if desired */ kbt->end = KBT_ONGOING; kbt->new_context = 1; if (is_current) validate_stack(regs); if (regs == NULL) { if (is_current || t->state == TASK_RUNNING) { /* Can't do this; we need registers */ kbt->end = KBT_RUNNING; return; } pc = get_switch_to_pc(); lr = t->thread.pc; sp = t->thread.ksp; r52 = 0; } else { pc = regs->pc; lr = regs->lr; sp = regs->sp; r52 = regs->regs[52]; } backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52); kbt->end = KBacktraceIterator_next_item_inclusive(kbt); } EXPORT_SYMBOL(KBacktraceIterator_init); int KBacktraceIterator_end(struct KBacktraceIterator *kbt) { return kbt->end != KBT_ONGOING; } EXPORT_SYMBOL(KBacktraceIterator_end); void KBacktraceIterator_next(struct KBacktraceIterator *kbt) { unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp; kbt->new_context = 0; if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) { kbt->end = KBT_DONE; return; } kbt->end = KBacktraceIterator_next_item_inclusive(kbt); if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) { /* Trapped in a loop; give up. */ kbt->end = KBT_LOOP; } } EXPORT_SYMBOL(KBacktraceIterator_next); static void describe_addr(struct KBacktraceIterator *kbt, unsigned long address, int have_mmap_sem, char *buf, size_t bufsize) { struct vm_area_struct *vma; size_t namelen, remaining; unsigned long size, offset, adjust; char *p, *modname; const char *name; int rc; /* * Look one byte back for every caller frame (i.e. those that * aren't a new context) so we look up symbol data for the * call itself, not the following instruction, which may be on * a different line (or in a different function). */ adjust = !kbt->new_context; address -= adjust; if (address >= PAGE_OFFSET) { /* Handle kernel symbols. */ BUG_ON(bufsize < KSYM_NAME_LEN); name = kallsyms_lookup(address, &size, &offset, &modname, buf); if (name == NULL) { buf[0] = '\0'; return; } namelen = strlen(buf); remaining = (bufsize - 1) - namelen; p = buf + namelen; rc = snprintf(p, remaining, "+%#lx/%#lx ", offset + adjust, size); if (modname && rc < remaining) snprintf(p + rc, remaining - rc, "[%s] ", modname); buf[bufsize-1] = '\0'; return; } /* If we don't have the mmap_sem, we can't show any more info. */ buf[0] = '\0'; if (!have_mmap_sem) return; /* Find vma info. */ vma = find_vma(kbt->task->mm, address); if (vma == NULL || address < vma->vm_start) { snprintf(buf, bufsize, "[unmapped address] "); return; } if (vma->vm_file) { char *s; p = d_path(&vma->vm_file->f_path, buf, bufsize); if (IS_ERR(p)) p = "?"; s = strrchr(p, '/'); if (s) p = s+1; } else { p = "anon"; } /* Generate a string description of the vma info. */ namelen = strlen(p); remaining = (bufsize - 1) - namelen; memmove(buf, p, namelen); snprintf(buf + namelen, remaining, "[%lx+%lx] ", vma->vm_start, vma->vm_end - vma->vm_start); } /* * This method wraps the backtracer's more generic support. * It is only invoked from the architecture-specific code; show_stack() * and dump_stack() (in entry.S) are architecture-independent entry points. */ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) { int i; int have_mmap_sem = 0; if (headers) { /* * Add a blank line since if we are called from panic(), * then bust_spinlocks() spit out a space in front of us * and it will mess up our KERN_ERR. */ pr_err("\n"); pr_err("Starting stack dump of tid %d, pid %d (%s)" " on cpu %d at cycle %lld\n", kbt->task->pid, kbt->task->tgid, kbt->task->comm, smp_processor_id(), get_cycles()); } kbt->verbose = 1; i = 0; for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { char namebuf[KSYM_NAME_LEN+100]; unsigned long address = kbt->it.pc; /* Try to acquire the mmap_sem as we pass into userspace. */ if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm) have_mmap_sem = down_read_trylock(&kbt->task->mm->mmap_sem); describe_addr(kbt, address, have_mmap_sem, namebuf, sizeof(namebuf)); pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", i++, address, namebuf, (unsigned long)(kbt->it.sp)); if (i >= 100) { pr_err("Stack dump truncated" " (%d frames)\n", i); break; } } if (kbt->end == KBT_LOOP) pr_err("Stack dump stopped; next frame identical to this one\n"); if (headers) pr_err("Stack dump complete\n"); if (have_mmap_sem) up_read(&kbt->task->mm->mmap_sem); } EXPORT_SYMBOL(tile_show_stack); /* This is called from show_regs() and _dump_stack() */ void dump_stack_regs(struct pt_regs *regs) { struct KBacktraceIterator kbt; KBacktraceIterator_init(&kbt, NULL, regs); tile_show_stack(&kbt, 1); } EXPORT_SYMBOL(dump_stack_regs); static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs, ulong pc, ulong lr, ulong sp, ulong r52) { memset(regs, 0, sizeof(struct pt_regs)); regs->pc = pc; regs->lr = lr; regs->sp = sp; regs->regs[52] = r52; return regs; } /* This is called from dump_stack() and just converts to pt_regs */ void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) { struct pt_regs regs; dump_stack_regs(regs_to_pt_regs(&regs, pc, lr, sp, r52)); } /* This is called from KBacktraceIterator_init_current() */ void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc, ulong lr, ulong sp, ulong r52) { struct pt_regs regs; KBacktraceIterator_init(kbt, NULL, regs_to_pt_regs(&regs, pc, lr, sp, r52)); } /* This is called only from kernel/sched.c, with esp == NULL */ void show_stack(struct task_struct *task, unsigned long *esp) { struct KBacktraceIterator kbt; if (task == NULL || task == current) KBacktraceIterator_init_current(&kbt); else KBacktraceIterator_init(&kbt, task, NULL); tile_show_stack(&kbt, 0); } #ifdef CONFIG_STACKTRACE /* Support generic Linux stack API too */ void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) { struct KBacktraceIterator kbt; int skip = trace->skip; int i = 0; if (task == NULL || task == current) KBacktraceIterator_init_current(&kbt); else KBacktraceIterator_init(&kbt, task, NULL); for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) { if (skip) { --skip; continue; } if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET) break; trace->entries[i++] = kbt.it.pc; } trace->nr_entries = i; } EXPORT_SYMBOL(save_stack_trace_tsk); void save_stack_trace(struct stack_trace *trace) { save_stack_trace_tsk(NULL, trace); } #endif /* In entry.S */ EXPORT_SYMBOL(KBacktraceIterator_init_current);
gpl-2.0
SamueleCiprietti/nova_kernel
fs/yaffs2/yaffs_yaffs1.c
7941
11299
/* * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. * * Copyright (C) 2002-2010 Aleph One Ltd. * for Toby Churchill Ltd and Brightstar Engineering * * Created by Charles Manning <charles@aleph1.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "yaffs_yaffs1.h" #include "yportenv.h" #include "yaffs_trace.h" #include "yaffs_bitmap.h" #include "yaffs_getblockinfo.h" #include "yaffs_nand.h" #include "yaffs_attribs.h" int yaffs1_scan(struct yaffs_dev *dev) { struct yaffs_ext_tags tags; int blk; int result; int chunk; int c; int deleted; enum yaffs_block_state state; struct yaffs_obj *hard_list = NULL; struct yaffs_block_info *bi; u32 seq_number; struct yaffs_obj_hdr *oh; struct yaffs_obj *in; struct yaffs_obj *parent; int alloc_failed = 0; struct yaffs_shadow_fixer *shadow_fixers = NULL; u8 *chunk_data; yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan starts intstartblk %d intendblk %d...", dev->internal_start_block, dev->internal_end_block); chunk_data = yaffs_get_temp_buffer(dev, __LINE__); dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER; /* Scan all the blocks to determine their state */ bi = dev->block_info; for (blk = dev->internal_start_block; blk <= dev->internal_end_block; blk++) { yaffs_clear_chunk_bits(dev, blk); bi->pages_in_use = 0; bi->soft_del_pages = 0; yaffs_query_init_block_state(dev, blk, &state, &seq_number); bi->block_state = state; bi->seq_number = seq_number; if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK) bi->block_state = state = YAFFS_BLOCK_STATE_DEAD; yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block scanning block %d state %d seq %d", blk, state, seq_number); if (state == YAFFS_BLOCK_STATE_DEAD) { yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "block %d is bad", blk); } else if (state == YAFFS_BLOCK_STATE_EMPTY) { yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty "); dev->n_erased_blocks++; dev->n_free_chunks += dev->param.chunks_per_block; } bi++; } /* For each block.... */ for (blk = dev->internal_start_block; !alloc_failed && blk <= dev->internal_end_block; blk++) { cond_resched(); bi = yaffs_get_block_info(dev, blk); state = bi->block_state; deleted = 0; /* For each chunk in each block that needs scanning.... */ for (c = 0; !alloc_failed && c < dev->param.chunks_per_block && state == YAFFS_BLOCK_STATE_NEEDS_SCANNING; c++) { /* Read the tags and decide what to do */ chunk = blk * dev->param.chunks_per_block + c; result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL, &tags); /* Let's have a good look at this chunk... */ if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED || tags.is_deleted) { /* YAFFS1 only... * A deleted chunk */ deleted++; dev->n_free_chunks++; /*T((" %d %d deleted\n",blk,c)); */ } else if (!tags.chunk_used) { /* An unassigned chunk in the block * This means that either the block is empty or * this is the one being allocated from */ if (c == 0) { /* We're looking at the first chunk in the block so the block is unused */ state = YAFFS_BLOCK_STATE_EMPTY; dev->n_erased_blocks++; } else { /* this is the block being allocated from */ yaffs_trace(YAFFS_TRACE_SCAN, " Allocating from %d %d", blk, c); state = YAFFS_BLOCK_STATE_ALLOCATING; dev->alloc_block = blk; dev->alloc_page = c; dev->alloc_block_finder = blk; /* Set block finder here to encourage the allocator to go forth from here. */ } dev->n_free_chunks += (dev->param.chunks_per_block - c); } else if (tags.chunk_id > 0) { /* chunk_id > 0 so it is a data chunk... */ unsigned int endpos; yaffs_set_chunk_bit(dev, blk, c); bi->pages_in_use++; in = yaffs_find_or_create_by_number(dev, tags.obj_id, YAFFS_OBJECT_TYPE_FILE); /* PutChunkIntoFile checks for a clash (two data chunks with * the same chunk_id). */ if (!in) alloc_failed = 1; if (in) { if (!yaffs_put_chunk_in_file (in, tags.chunk_id, chunk, 1)) alloc_failed = 1; } endpos = (tags.chunk_id - 1) * dev->data_bytes_per_chunk + tags.n_bytes; if (in && in->variant_type == YAFFS_OBJECT_TYPE_FILE && in->variant.file_variant.scanned_size < endpos) { in->variant.file_variant.scanned_size = endpos; if (!dev->param.use_header_file_size) { in->variant. file_variant.file_size = in->variant. file_variant.scanned_size; } } /* T((" %d %d data %d %d\n",blk,c,tags.obj_id,tags.chunk_id)); */ } else { /* chunk_id == 0, so it is an ObjectHeader. * Thus, we read in the object header and make the object */ yaffs_set_chunk_bit(dev, blk, c); bi->pages_in_use++; result = yaffs_rd_chunk_tags_nand(dev, chunk, chunk_data, NULL); oh = (struct yaffs_obj_hdr *)chunk_data; in = yaffs_find_by_number(dev, tags.obj_id); if (in && in->variant_type != oh->type) { /* This should not happen, but somehow * Wev'e ended up with an obj_id that has been reused but not yet * deleted, and worse still it has changed type. Delete the old object. */ yaffs_del_obj(in); in = 0; } in = yaffs_find_or_create_by_number(dev, tags.obj_id, oh->type); if (!in) alloc_failed = 1; if (in && oh->shadows_obj > 0) { struct yaffs_shadow_fixer *fixer; fixer = kmalloc(sizeof (struct yaffs_shadow_fixer), GFP_NOFS); if (fixer) { fixer->next = shadow_fixers; shadow_fixers = fixer; fixer->obj_id = tags.obj_id; fixer->shadowed_id = oh->shadows_obj; yaffs_trace(YAFFS_TRACE_SCAN, " Shadow fixer: %d shadows %d", fixer->obj_id, fixer->shadowed_id); } } if (in && in->valid) { /* We have already filled this one. We have a duplicate and need to resolve it. */ unsigned existing_serial = in->serial; unsigned new_serial = tags.serial_number; if (((existing_serial + 1) & 3) == new_serial) { /* Use new one - destroy the exisiting one */ yaffs_chunk_del(dev, in->hdr_chunk, 1, __LINE__); in->valid = 0; } else { /* Use existing - destroy this one. */ yaffs_chunk_del(dev, chunk, 1, __LINE__); } } if (in && !in->valid && (tags.obj_id == YAFFS_OBJECTID_ROOT || tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND)) { /* We only load some info, don't fiddle with directory structure */ in->valid = 1; in->variant_type = oh->type; in->yst_mode = oh->yst_mode; yaffs_load_attribs(in, oh); in->hdr_chunk = chunk; in->serial = tags.serial_number; } else if (in && !in->valid) { /* we need to load this info */ in->valid = 1; in->variant_type = oh->type; in->yst_mode = oh->yst_mode; yaffs_load_attribs(in, oh); in->hdr_chunk = chunk; in->serial = tags.serial_number; yaffs_set_obj_name_from_oh(in, oh); in->dirty = 0; /* directory stuff... * hook up to parent */ parent = yaffs_find_or_create_by_number (dev, oh->parent_obj_id, YAFFS_OBJECT_TYPE_DIRECTORY); if (!parent) alloc_failed = 1; if (parent && parent->variant_type == YAFFS_OBJECT_TYPE_UNKNOWN) { /* Set up as a directory */ parent->variant_type = YAFFS_OBJECT_TYPE_DIRECTORY; INIT_LIST_HEAD(&parent-> variant.dir_variant.children); } else if (!parent || parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { /* Hoosterman, another problem.... * We're trying to use a non-directory as a directory */ yaffs_trace(YAFFS_TRACE_ERROR, "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found." ); parent = dev->lost_n_found; } yaffs_add_obj_to_dir(parent, in); if (0 && (parent == dev->del_dir || parent == dev->unlinked_dir)) { in->deleted = 1; /* If it is unlinked at start up then it wants deleting */ dev->n_deleted_files++; } /* Note re hardlinks. * Since we might scan a hardlink before its equivalent object is scanned * we put them all in a list. * After scanning is complete, we should have all the objects, so we run through this * list and fix up all the chains. */ switch (in->variant_type) { case YAFFS_OBJECT_TYPE_UNKNOWN: /* Todo got a problem */ break; case YAFFS_OBJECT_TYPE_FILE: if (dev->param. use_header_file_size) in->variant. file_variant.file_size = oh->file_size; break; case YAFFS_OBJECT_TYPE_HARDLINK: in->variant. hardlink_variant.equiv_id = oh->equiv_id; in->hard_links.next = (struct list_head *) hard_list; hard_list = in; break; case YAFFS_OBJECT_TYPE_DIRECTORY: /* Do nothing */ break; case YAFFS_OBJECT_TYPE_SPECIAL: /* Do nothing */ break; case YAFFS_OBJECT_TYPE_SYMLINK: in->variant.symlink_variant. alias = yaffs_clone_str(oh->alias); if (!in->variant. symlink_variant.alias) alloc_failed = 1; break; } } } } if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) { /* If we got this far while scanning, then the block is fully allocated. */ state = YAFFS_BLOCK_STATE_FULL; } if (state == YAFFS_BLOCK_STATE_ALLOCATING) { /* If the block was partially allocated then treat it as fully allocated. */ state = YAFFS_BLOCK_STATE_FULL; dev->alloc_block = -1; } bi->block_state = state; /* Now let's see if it was dirty */ if (bi->pages_in_use == 0 && !bi->has_shrink_hdr && bi->block_state == YAFFS_BLOCK_STATE_FULL) { yaffs_block_became_dirty(dev, blk); } } /* Ok, we've done all the scanning. * Fix up the hard link chains. * We should now have scanned all the objects, now it's time to add these * hardlinks. */ yaffs_link_fixup(dev, hard_list); /* Fix up any shadowed objects */ { struct yaffs_shadow_fixer *fixer; struct yaffs_obj *obj; while (shadow_fixers) { fixer = shadow_fixers; shadow_fixers = fixer->next; /* Complete the rename transaction by deleting the shadowed object * then setting the object header to unshadowed. */ obj = yaffs_find_by_number(dev, fixer->shadowed_id); if (obj) yaffs_del_obj(obj); obj = yaffs_find_by_number(dev, fixer->obj_id); if (obj) yaffs_update_oh(obj, NULL, 1, 0, 0, NULL); kfree(fixer); } } yaffs_release_temp_buffer(dev, chunk_data, __LINE__); if (alloc_failed) return YAFFS_FAIL; yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan ends"); return YAFFS_OK; }
gpl-2.0
dotCipher/xcrypt-linux-kernel-module
drivers/char/ppdev.c
7941
19633
/* * linux/drivers/char/ppdev.c * * This is the code behind /dev/parport* -- it allows a user-space * application to use the parport subsystem. * * Copyright (C) 1998-2000, 2002 Tim Waugh <tim@cyberelk.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * A /dev/parportx device node represents an arbitrary device * on port 'x'. The following operations are possible: * * open do nothing, set up default IEEE 1284 protocol to be COMPAT * close release port and unregister device (if necessary) * ioctl * EXCL register device exclusively (may fail) * CLAIM (register device first time) parport_claim_or_block * RELEASE parport_release * SETMODE set the IEEE 1284 protocol to use for read/write * SETPHASE set the IEEE 1284 phase of a particular mode. Not to be * confused with ioctl(fd, SETPHASER, &stun). ;-) * DATADIR data_forward / data_reverse * WDATA write_data * RDATA read_data * WCONTROL write_control * RCONTROL read_control * FCONTROL frob_control * RSTATUS read_status * NEGOT parport_negotiate * YIELD parport_yield_blocking * WCTLONIRQ on interrupt, set control lines * CLRIRQ clear (and return) interrupt count * SETTIME sets device timeout (struct timeval) * GETTIME gets device timeout (struct timeval) * GETMODES gets hardware supported modes (unsigned int) * GETMODE gets the current IEEE1284 mode * GETPHASE gets the current IEEE1284 phase * GETFLAGS gets current (user-visible) flags * SETFLAGS sets current (user-visible) flags * read/write read or write in current IEEE 1284 protocol * select wait for interrupt (in readfds) * * Changes: * Added SETTIME/GETTIME ioctl, Fred Barnes, 1999. * * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 2000/08/25 * - On error, copy_from_user and copy_to_user do not return -EFAULT, * They return the positive number of bytes *not* copied due to address * space errors. * * Added GETMODES/GETMODE/GETPHASE ioctls, Fred Barnes <frmb2@ukc.ac.uk>, 03/01/2001. * Added GETFLAGS/SETFLAGS ioctls, Fred Barnes, 04/2001 */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/device.h> #include <linux/ioctl.h> #include <linux/parport.h> #include <linux/ctype.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/major.h> #include <linux/ppdev.h> #include <linux/mutex.h> #include <linux/uaccess.h> #define PP_VERSION "ppdev: user-space parallel port driver" #define CHRDEV "ppdev" struct pp_struct { struct pardevice * pdev; wait_queue_head_t irq_wait; atomic_t irqc; unsigned int flags; int irqresponse; unsigned char irqctl; struct ieee1284_info state; struct ieee1284_info saved_state; long default_inactivity; }; /* pp_struct.flags bitfields */ #define PP_CLAIMED (1<<0) #define PP_EXCL (1<<1) /* Other constants */ #define PP_INTERRUPT_TIMEOUT (10 * HZ) /* 10s */ #define PP_BUFFER_SIZE 1024 #define PARDEVICE_MAX 8 /* ROUND_UP macro from fs/select.c */ #define ROUND_UP(x,y) (((x)+(y)-1)/(y)) static DEFINE_MUTEX(pp_do_mutex); static inline void pp_enable_irq (struct pp_struct *pp) { struct parport *port = pp->pdev->port; port->ops->enable_irq (port); } static ssize_t pp_read (struct file * file, char __user * buf, size_t count, loff_t * ppos) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct pp_struct *pp = file->private_data; char * kbuffer; ssize_t bytes_read = 0; struct parport *pport; int mode; if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } /* Trivial case. */ if (count == 0) return 0; kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); if (!kbuffer) { return -ENOMEM; } pport = pp->pdev->port; mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); parport_set_timeout (pp->pdev, (file->f_flags & O_NONBLOCK) ? PARPORT_INACTIVITY_O_NONBLOCK : pp->default_inactivity); while (bytes_read == 0) { ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE); if (mode == IEEE1284_MODE_EPP) { /* various specials for EPP mode */ int flags = 0; size_t (*fn)(struct parport *, void *, size_t, int); if (pp->flags & PP_W91284PIC) { flags |= PARPORT_W91284PIC; } if (pp->flags & PP_FASTREAD) { flags |= PARPORT_EPP_FAST; } if (pport->ieee1284.mode & IEEE1284_ADDR) { fn = pport->ops->epp_read_addr; } else { fn = pport->ops->epp_read_data; } bytes_read = (*fn)(pport, kbuffer, need, flags); } else { bytes_read = parport_read (pport, kbuffer, need); } if (bytes_read != 0) break; if (file->f_flags & O_NONBLOCK) { bytes_read = -EAGAIN; break; } if (signal_pending (current)) { bytes_read = -ERESTARTSYS; break; } cond_resched(); } parport_set_timeout (pp->pdev, pp->default_inactivity); if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read)) bytes_read = -EFAULT; kfree (kbuffer); pp_enable_irq (pp); return bytes_read; } static ssize_t pp_write (struct file * file, const char __user * buf, size_t count, loff_t * ppos) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct pp_struct *pp = file->private_data; char * kbuffer; ssize_t bytes_written = 0; ssize_t wrote; int mode; struct parport *pport; if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); if (!kbuffer) { return -ENOMEM; } pport = pp->pdev->port; mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); parport_set_timeout (pp->pdev, (file->f_flags & O_NONBLOCK) ? PARPORT_INACTIVITY_O_NONBLOCK : pp->default_inactivity); while (bytes_written < count) { ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE); if (copy_from_user (kbuffer, buf + bytes_written, n)) { bytes_written = -EFAULT; break; } if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) { /* do a fast EPP write */ if (pport->ieee1284.mode & IEEE1284_ADDR) { wrote = pport->ops->epp_write_addr (pport, kbuffer, n, PARPORT_EPP_FAST); } else { wrote = pport->ops->epp_write_data (pport, kbuffer, n, PARPORT_EPP_FAST); } } else { wrote = parport_write (pp->pdev->port, kbuffer, n); } if (wrote <= 0) { if (!bytes_written) { bytes_written = wrote; } break; } bytes_written += wrote; if (file->f_flags & O_NONBLOCK) { if (!bytes_written) bytes_written = -EAGAIN; break; } if (signal_pending (current)) { if (!bytes_written) { bytes_written = -EINTR; } break; } cond_resched(); } parport_set_timeout (pp->pdev, pp->default_inactivity); kfree (kbuffer); pp_enable_irq (pp); return bytes_written; } static void pp_irq (void *private) { struct pp_struct *pp = private; if (pp->irqresponse) { parport_write_control (pp->pdev->port, pp->irqctl); pp->irqresponse = 0; } atomic_inc (&pp->irqc); wake_up_interruptible (&pp->irq_wait); } static int register_device (int minor, struct pp_struct *pp) { struct parport *port; struct pardevice * pdev = NULL; char *name; int fl; name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); if (name == NULL) return -ENOMEM; port = parport_find_number (minor); if (!port) { printk (KERN_WARNING "%s: no associated port!\n", name); kfree (name); return -ENXIO; } fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; pdev = parport_register_device (port, name, NULL, NULL, pp_irq, fl, pp); parport_put_port (port); if (!pdev) { printk (KERN_WARNING "%s: failed to register device!\n", name); kfree (name); return -ENXIO; } pp->pdev = pdev; pr_debug("%s: registered pardevice\n", name); return 0; } static enum ieee1284_phase init_phase (int mode) { switch (mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR)) { case IEEE1284_MODE_NIBBLE: case IEEE1284_MODE_BYTE: return IEEE1284_PH_REV_IDLE; } return IEEE1284_PH_FWD_IDLE; } static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct pp_struct *pp = file->private_data; struct parport * port; void __user *argp = (void __user *)arg; /* First handle the cases that don't take arguments. */ switch (cmd) { case PPCLAIM: { struct ieee1284_info *info; int ret; if (pp->flags & PP_CLAIMED) { pr_debug(CHRDEV "%x: you've already got it!\n", minor); return -EINVAL; } /* Deferred device registration. */ if (!pp->pdev) { int err = register_device (minor, pp); if (err) { return err; } } ret = parport_claim_or_block (pp->pdev); if (ret < 0) return ret; pp->flags |= PP_CLAIMED; /* For interrupt-reporting to work, we need to be * informed of each interrupt. */ pp_enable_irq (pp); /* We may need to fix up the state machine. */ info = &pp->pdev->port->ieee1284; pp->saved_state.mode = info->mode; pp->saved_state.phase = info->phase; info->mode = pp->state.mode; info->phase = pp->state.phase; pp->default_inactivity = parport_set_timeout (pp->pdev, 0); parport_set_timeout (pp->pdev, pp->default_inactivity); return 0; } case PPEXCL: if (pp->pdev) { pr_debug(CHRDEV "%x: too late for PPEXCL; " "already registered\n", minor); if (pp->flags & PP_EXCL) /* But it's not really an error. */ return 0; /* There's no chance of making the driver happy. */ return -EINVAL; } /* Just remember to register the device exclusively * when we finally do the registration. */ pp->flags |= PP_EXCL; return 0; case PPSETMODE: { int mode; if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; /* FIXME: validate mode */ pp->state.mode = mode; pp->state.phase = init_phase (mode); if (pp->flags & PP_CLAIMED) { pp->pdev->port->ieee1284.mode = mode; pp->pdev->port->ieee1284.phase = pp->state.phase; } return 0; } case PPGETMODE: { int mode; if (pp->flags & PP_CLAIMED) { mode = pp->pdev->port->ieee1284.mode; } else { mode = pp->state.mode; } if (copy_to_user (argp, &mode, sizeof (mode))) { return -EFAULT; } return 0; } case PPSETPHASE: { int phase; if (copy_from_user (&phase, argp, sizeof (phase))) { return -EFAULT; } /* FIXME: validate phase */ pp->state.phase = phase; if (pp->flags & PP_CLAIMED) { pp->pdev->port->ieee1284.phase = phase; } return 0; } case PPGETPHASE: { int phase; if (pp->flags & PP_CLAIMED) { phase = pp->pdev->port->ieee1284.phase; } else { phase = pp->state.phase; } if (copy_to_user (argp, &phase, sizeof (phase))) { return -EFAULT; } return 0; } case PPGETMODES: { unsigned int modes; port = parport_find_number (minor); if (!port) return -ENODEV; modes = port->modes; parport_put_port(port); if (copy_to_user (argp, &modes, sizeof (modes))) { return -EFAULT; } return 0; } case PPSETFLAGS: { int uflags; if (copy_from_user (&uflags, argp, sizeof (uflags))) { return -EFAULT; } pp->flags &= ~PP_FLAGMASK; pp->flags |= (uflags & PP_FLAGMASK); return 0; } case PPGETFLAGS: { int uflags; uflags = pp->flags & PP_FLAGMASK; if (copy_to_user (argp, &uflags, sizeof (uflags))) { return -EFAULT; } return 0; } } /* end switch() */ /* Everything else requires the port to be claimed, so check * that now. */ if ((pp->flags & PP_CLAIMED) == 0) { pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } port = pp->pdev->port; switch (cmd) { struct ieee1284_info *info; unsigned char reg; unsigned char mask; int mode; int ret; struct timeval par_timeout; long to_jiffies; case PPRSTATUS: reg = parport_read_status (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPRDATA: reg = parport_read_data (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPRCONTROL: reg = parport_read_control (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPYIELD: parport_yield_blocking (pp->pdev); return 0; case PPRELEASE: /* Save the state machine's state. */ info = &pp->pdev->port->ieee1284; pp->state.mode = info->mode; pp->state.phase = info->phase; info->mode = pp->saved_state.mode; info->phase = pp->saved_state.phase; parport_release (pp->pdev); pp->flags &= ~PP_CLAIMED; return 0; case PPWCONTROL: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; parport_write_control (port, reg); return 0; case PPWDATA: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; parport_write_data (port, reg); return 0; case PPFCONTROL: if (copy_from_user (&mask, argp, sizeof (mask))) return -EFAULT; if (copy_from_user (&reg, 1 + (unsigned char __user *) arg, sizeof (reg))) return -EFAULT; parport_frob_control (port, mask, reg); return 0; case PPDATADIR: if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; if (mode) port->ops->data_reverse (port); else port->ops->data_forward (port); return 0; case PPNEGOT: if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; switch ((ret = parport_negotiate (port, mode))) { case 0: break; case -1: /* handshake failed, peripheral not IEEE 1284 */ ret = -EIO; break; case 1: /* handshake succeeded, peripheral rejected mode */ ret = -ENXIO; break; } pp_enable_irq (pp); return ret; case PPWCTLONIRQ: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; /* Remember what to set the control lines to, for next * time we get an interrupt. */ pp->irqctl = reg; pp->irqresponse = 1; return 0; case PPCLRIRQ: ret = atomic_read (&pp->irqc); if (copy_to_user (argp, &ret, sizeof (ret))) return -EFAULT; atomic_sub (ret, &pp->irqc); return 0; case PPSETTIME: if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) { return -EFAULT; } /* Convert to jiffies, place in pp->pdev->timeout */ if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) { return -EINVAL; } to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ); to_jiffies += par_timeout.tv_sec * (long)HZ; if (to_jiffies <= 0) { return -EINVAL; } pp->pdev->timeout = to_jiffies; return 0; case PPGETTIME: to_jiffies = pp->pdev->timeout; memset(&par_timeout, 0, sizeof(par_timeout)); par_timeout.tv_sec = to_jiffies / HZ; par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ); if (copy_to_user (argp, &par_timeout, sizeof(struct timeval))) return -EFAULT; return 0; default: pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd); return -EINVAL; } /* Keep the compiler happy */ return 0; } static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; mutex_lock(&pp_do_mutex); ret = pp_do_ioctl(file, cmd, arg); mutex_unlock(&pp_do_mutex); return ret; } static int pp_open (struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct pp_struct *pp; if (minor >= PARPORT_MAX) return -ENXIO; pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL); if (!pp) return -ENOMEM; pp->state.mode = IEEE1284_MODE_COMPAT; pp->state.phase = init_phase (pp->state.mode); pp->flags = 0; pp->irqresponse = 0; atomic_set (&pp->irqc, 0); init_waitqueue_head (&pp->irq_wait); /* Defer the actual device registration until the first claim. * That way, we know whether or not the driver wants to have * exclusive access to the port (PPEXCL). */ pp->pdev = NULL; file->private_data = pp; return 0; } static int pp_release (struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct pp_struct *pp = file->private_data; int compat_negot; compat_negot = 0; if (!(pp->flags & PP_CLAIMED) && pp->pdev && (pp->state.mode != IEEE1284_MODE_COMPAT)) { struct ieee1284_info *info; /* parport released, but not in compatibility mode */ parport_claim_or_block (pp->pdev); pp->flags |= PP_CLAIMED; info = &pp->pdev->port->ieee1284; pp->saved_state.mode = info->mode; pp->saved_state.phase = info->phase; info->mode = pp->state.mode; info->phase = pp->state.phase; compat_negot = 1; } else if ((pp->flags & PP_CLAIMED) && pp->pdev && (pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT)) { compat_negot = 2; } if (compat_negot) { parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT); pr_debug(CHRDEV "%x: negotiated back to compatibility " "mode because user-space forgot\n", minor); } if (pp->flags & PP_CLAIMED) { struct ieee1284_info *info; info = &pp->pdev->port->ieee1284; pp->state.mode = info->mode; pp->state.phase = info->phase; info->mode = pp->saved_state.mode; info->phase = pp->saved_state.phase; parport_release (pp->pdev); if (compat_negot != 1) { pr_debug(CHRDEV "%x: released pardevice " "because user-space forgot\n", minor); } } if (pp->pdev) { const char *name = pp->pdev->name; parport_unregister_device (pp->pdev); kfree (name); pp->pdev = NULL; pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); } kfree (pp); return 0; } /* No kernel lock held - fine */ static unsigned int pp_poll (struct file * file, poll_table * wait) { struct pp_struct *pp = file->private_data; unsigned int mask = 0; poll_wait (file, &pp->irq_wait, wait); if (atomic_read (&pp->irqc)) mask |= POLLIN | POLLRDNORM; return mask; } static struct class *ppdev_class; static const struct file_operations pp_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = pp_read, .write = pp_write, .poll = pp_poll, .unlocked_ioctl = pp_ioctl, .open = pp_open, .release = pp_release, }; static void pp_attach(struct parport *port) { device_create(ppdev_class, port->dev, MKDEV(PP_MAJOR, port->number), NULL, "parport%d", port->number); } static void pp_detach(struct parport *port) { device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number)); } static struct parport_driver pp_driver = { .name = CHRDEV, .attach = pp_attach, .detach = pp_detach, }; static int __init ppdev_init (void) { int err = 0; if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) { printk (KERN_WARNING CHRDEV ": unable to get major %d\n", PP_MAJOR); return -EIO; } ppdev_class = class_create(THIS_MODULE, CHRDEV); if (IS_ERR(ppdev_class)) { err = PTR_ERR(ppdev_class); goto out_chrdev; } if (parport_register_driver(&pp_driver)) { printk (KERN_WARNING CHRDEV ": unable to register with parport\n"); goto out_class; } printk (KERN_INFO PP_VERSION "\n"); goto out; out_class: class_destroy(ppdev_class); out_chrdev: unregister_chrdev(PP_MAJOR, CHRDEV); out: return err; } static void __exit ppdev_cleanup (void) { /* Clean up all parport stuff */ parport_unregister_driver(&pp_driver); class_destroy(ppdev_class); unregister_chrdev (PP_MAJOR, CHRDEV); } module_init(ppdev_init); module_exit(ppdev_cleanup); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(PP_MAJOR);
gpl-2.0
TimmyTossPot/kernel_endeavoru
arch/powerpc/boot/cuboot-kilauea.c
13829
1275
/* * Old U-boot compatibility for PPC405EX. This image is already included * a dtb. * * Author: Tiejun Chen <tiejun.chen@windriver.com> * * Copyright (C) 2009 Wind River Systems, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "io.h" #include "dcr.h" #include "stdio.h" #include "4xx.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" #define KILAUEA_SYS_EXT_SERIAL_CLOCK 11059200 /* ext. 11.059MHz clk */ static bd_t bd; static void kilauea_fixups(void) { unsigned long sysclk = 33333333; ibm405ex_fixup_clocks(sysclk, KILAUEA_SYS_EXT_SERIAL_CLOCK); dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = kilauea_fixups; platform_ops.exit = ibm40x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
kasperhettinga/p4wifi_stock
drivers/ps3/ps3av_cmd.c
14597
24361
/* * Copyright (C) 2006 Sony Computer Entertainment Inc. * Copyright 2006, 2007 Sony Corporation * * AV backend support for PS3 * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published * by the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <asm/ps3av.h> #include <asm/ps3.h> #include <asm/ps3gpu.h> #include "vuart.h" static const struct video_fmt { u32 format; u32 order; } ps3av_video_fmt_table[] = { { PS3AV_CMD_VIDEO_FORMAT_ARGB_8BIT, PS3AV_CMD_VIDEO_ORDER_RGB }, { PS3AV_CMD_VIDEO_FORMAT_ARGB_8BIT, PS3AV_CMD_VIDEO_ORDER_BGR }, }; static const struct { int cs; u32 av; u32 bl; } ps3av_cs_video2av_table[] = { { .cs = PS3AV_CMD_VIDEO_CS_RGB_8, .av = PS3AV_CMD_AV_CS_RGB_8, .bl = PS3AV_CMD_AV_CS_8 }, { .cs = PS3AV_CMD_VIDEO_CS_RGB_10, .av = PS3AV_CMD_AV_CS_RGB_8, .bl = PS3AV_CMD_AV_CS_8 }, { .cs = PS3AV_CMD_VIDEO_CS_RGB_12, .av = PS3AV_CMD_AV_CS_RGB_8, .bl = PS3AV_CMD_AV_CS_8 }, { .cs = PS3AV_CMD_VIDEO_CS_YUV444_8, .av = PS3AV_CMD_AV_CS_YUV444_8, .bl = PS3AV_CMD_AV_CS_8 }, { .cs = PS3AV_CMD_VIDEO_CS_YUV444_10, .av = PS3AV_CMD_AV_CS_YUV444_8, .bl = PS3AV_CMD_AV_CS_10 }, { .cs = PS3AV_CMD_VIDEO_CS_YUV444_12, .av = PS3AV_CMD_AV_CS_YUV444_8, .bl = PS3AV_CMD_AV_CS_10 }, { .cs = PS3AV_CMD_VIDEO_CS_YUV422_8, .av = PS3AV_CMD_AV_CS_YUV422_8, .bl = PS3AV_CMD_AV_CS_10 }, { .cs = PS3AV_CMD_VIDEO_CS_YUV422_10, .av = PS3AV_CMD_AV_CS_YUV422_8, .bl = PS3AV_CMD_AV_CS_10 }, { .cs = PS3AV_CMD_VIDEO_CS_YUV422_12, .av = PS3AV_CMD_AV_CS_YUV422_8, .bl = PS3AV_CMD_AV_CS_12 }, { .cs = PS3AV_CMD_VIDEO_CS_XVYCC_8, .av = PS3AV_CMD_AV_CS_XVYCC_8, .bl = PS3AV_CMD_AV_CS_12 }, { .cs = PS3AV_CMD_VIDEO_CS_XVYCC_10, .av = PS3AV_CMD_AV_CS_XVYCC_8, .bl = PS3AV_CMD_AV_CS_12 }, { .cs = PS3AV_CMD_VIDEO_CS_XVYCC_12, .av = PS3AV_CMD_AV_CS_XVYCC_8, .bl = PS3AV_CMD_AV_CS_12 } }; static u32 ps3av_cs_video2av(int cs) { unsigned int i; for (i = 0; i < ARRAY_SIZE(ps3av_cs_video2av_table); i++) if (ps3av_cs_video2av_table[i].cs == cs) return ps3av_cs_video2av_table[i].av; return PS3AV_CMD_AV_CS_RGB_8; } static u32 ps3av_cs_video2av_bitlen(int cs) { unsigned int i; for (i = 0; i < ARRAY_SIZE(ps3av_cs_video2av_table); i++) if (ps3av_cs_video2av_table[i].cs == cs) return ps3av_cs_video2av_table[i].bl; return PS3AV_CMD_AV_CS_8; } static const struct { int vid; u32 av; } ps3av_vid_video2av_table[] = { { PS3AV_CMD_VIDEO_VID_480I, PS3AV_CMD_AV_VID_480I }, { PS3AV_CMD_VIDEO_VID_480P, PS3AV_CMD_AV_VID_480P }, { PS3AV_CMD_VIDEO_VID_576I, PS3AV_CMD_AV_VID_576I }, { PS3AV_CMD_VIDEO_VID_576P, PS3AV_CMD_AV_VID_576P }, { PS3AV_CMD_VIDEO_VID_1080I_60HZ, PS3AV_CMD_AV_VID_1080I_60HZ }, { PS3AV_CMD_VIDEO_VID_720P_60HZ, PS3AV_CMD_AV_VID_720P_60HZ }, { PS3AV_CMD_VIDEO_VID_1080P_60HZ, PS3AV_CMD_AV_VID_1080P_60HZ }, { PS3AV_CMD_VIDEO_VID_1080I_50HZ, PS3AV_CMD_AV_VID_1080I_50HZ }, { PS3AV_CMD_VIDEO_VID_720P_50HZ, PS3AV_CMD_AV_VID_720P_50HZ }, { PS3AV_CMD_VIDEO_VID_1080P_50HZ, PS3AV_CMD_AV_VID_1080P_50HZ }, { PS3AV_CMD_VIDEO_VID_WXGA, PS3AV_CMD_AV_VID_WXGA }, { PS3AV_CMD_VIDEO_VID_SXGA, PS3AV_CMD_AV_VID_SXGA }, { PS3AV_CMD_VIDEO_VID_WUXGA, PS3AV_CMD_AV_VID_WUXGA } }; static u32 ps3av_vid_video2av(int vid) { unsigned int i; for (i = 0; i < ARRAY_SIZE(ps3av_vid_video2av_table); i++) if (ps3av_vid_video2av_table[i].vid == vid) return ps3av_vid_video2av_table[i].av; return PS3AV_CMD_AV_VID_480P; } static int ps3av_hdmi_range(void) { if (ps3_compare_firmware_version(1, 8, 0) < 0) return 0; else return 1; /* supported */ } int ps3av_cmd_init(void) { int res; struct ps3av_pkt_av_init av_init; struct ps3av_pkt_video_init video_init; struct ps3av_pkt_audio_init audio_init; /* video init */ memset(&video_init, 0, sizeof(video_init)); res = ps3av_do_pkt(PS3AV_CID_VIDEO_INIT, sizeof(video_init.send_hdr), sizeof(video_init), &video_init.send_hdr); if (res < 0) return res; res = get_status(&video_init); if (res) { printk(KERN_ERR "PS3AV_CID_VIDEO_INIT: failed %x\n", res); return res; } /* audio init */ memset(&audio_init, 0, sizeof(audio_init)); res = ps3av_do_pkt(PS3AV_CID_AUDIO_INIT, sizeof(audio_init.send_hdr), sizeof(audio_init), &audio_init.send_hdr); if (res < 0) return res; res = get_status(&audio_init); if (res) { printk(KERN_ERR "PS3AV_CID_AUDIO_INIT: failed %x\n", res); return res; } /* av init */ memset(&av_init, 0, sizeof(av_init)); av_init.event_bit = 0; res = ps3av_do_pkt(PS3AV_CID_AV_INIT, sizeof(av_init), sizeof(av_init), &av_init.send_hdr); if (res < 0) return res; res = get_status(&av_init); if (res) printk(KERN_ERR "PS3AV_CID_AV_INIT: failed %x\n", res); return res; } int ps3av_cmd_fin(void) { int res; struct ps3av_pkt_av_fin av_fin; memset(&av_fin, 0, sizeof(av_fin)); res = ps3av_do_pkt(PS3AV_CID_AV_FIN, sizeof(av_fin.send_hdr), sizeof(av_fin), &av_fin.send_hdr); if (res < 0) return res; res = get_status(&av_fin); if (res) printk(KERN_ERR "PS3AV_CID_AV_FIN: failed %x\n", res); return res; } int ps3av_cmd_av_video_mute(int num_of_port, u32 *port, u32 mute) { int i, send_len, res; struct ps3av_pkt_av_video_mute av_video_mute; if (num_of_port > PS3AV_MUTE_PORT_MAX) return -EINVAL; memset(&av_video_mute, 0, sizeof(av_video_mute)); for (i = 0; i < num_of_port; i++) { av_video_mute.mute[i].avport = port[i]; av_video_mute.mute[i].mute = mute; } send_len = sizeof(av_video_mute.send_hdr) + sizeof(struct ps3av_av_mute) * num_of_port; res = ps3av_do_pkt(PS3AV_CID_AV_VIDEO_MUTE, send_len, sizeof(av_video_mute), &av_video_mute.send_hdr); if (res < 0) return res; res = get_status(&av_video_mute); if (res) printk(KERN_ERR "PS3AV_CID_AV_VIDEO_MUTE: failed %x\n", res); return res; } int ps3av_cmd_av_video_disable_sig(u32 port) { int res; struct ps3av_pkt_av_video_disable_sig av_video_sig; memset(&av_video_sig, 0, sizeof(av_video_sig)); av_video_sig.avport = port; res = ps3av_do_pkt(PS3AV_CID_AV_VIDEO_DISABLE_SIG, sizeof(av_video_sig), sizeof(av_video_sig), &av_video_sig.send_hdr); if (res < 0) return res; res = get_status(&av_video_sig); if (res) printk(KERN_ERR "PS3AV_CID_AV_VIDEO_DISABLE_SIG: failed %x port:%x\n", res, port); return res; } int ps3av_cmd_av_tv_mute(u32 avport, u32 mute) { int res; struct ps3av_pkt_av_tv_mute tv_mute; memset(&tv_mute, 0, sizeof(tv_mute)); tv_mute.avport = avport; tv_mute.mute = mute; res = ps3av_do_pkt(PS3AV_CID_AV_TV_MUTE, sizeof(tv_mute), sizeof(tv_mute), &tv_mute.send_hdr); if (res < 0) return res; res = get_status(&tv_mute); if (res) printk(KERN_ERR "PS3AV_CID_AV_TV_MUTE: failed %x port:%x\n", res, avport); return res; } int ps3av_cmd_enable_event(void) { int res; struct ps3av_pkt_av_event av_event; memset(&av_event, 0, sizeof(av_event)); av_event.event_bit = PS3AV_CMD_EVENT_BIT_UNPLUGGED | PS3AV_CMD_EVENT_BIT_PLUGGED | PS3AV_CMD_EVENT_BIT_HDCP_DONE; res = ps3av_do_pkt(PS3AV_CID_AV_ENABLE_EVENT, sizeof(av_event), sizeof(av_event), &av_event.send_hdr); if (res < 0) return res; res = get_status(&av_event); if (res) printk(KERN_ERR "PS3AV_CID_AV_ENABLE_EVENT: failed %x\n", res); return res; } int ps3av_cmd_av_hdmi_mode(u8 mode) { int res; struct ps3av_pkt_av_hdmi_mode hdmi_mode; memset(&hdmi_mode, 0, sizeof(hdmi_mode)); hdmi_mode.mode = mode; res = ps3av_do_pkt(PS3AV_CID_AV_HDMI_MODE, sizeof(hdmi_mode), sizeof(hdmi_mode), &hdmi_mode.send_hdr); if (res < 0) return res; res = get_status(&hdmi_mode); if (res && res != PS3AV_STATUS_UNSUPPORTED_HDMI_MODE) printk(KERN_ERR "PS3AV_CID_AV_HDMI_MODE: failed %x\n", res); return res; } u32 ps3av_cmd_set_av_video_cs(void *p, u32 avport, int video_vid, int cs_out, int aspect, u32 id) { struct ps3av_pkt_av_video_cs *av_video_cs; av_video_cs = (struct ps3av_pkt_av_video_cs *)p; if (video_vid == -1) video_vid = PS3AV_CMD_VIDEO_VID_720P_60HZ; if (cs_out == -1) cs_out = PS3AV_CMD_VIDEO_CS_YUV444_8; if (aspect == -1) aspect = 0; memset(av_video_cs, 0, sizeof(*av_video_cs)); ps3av_set_hdr(PS3AV_CID_AV_VIDEO_CS, sizeof(*av_video_cs), &av_video_cs->send_hdr); av_video_cs->avport = avport; /* should be same as video_mode.resolution */ av_video_cs->av_vid = ps3av_vid_video2av(video_vid); av_video_cs->av_cs_out = ps3av_cs_video2av(cs_out); /* should be same as video_mode.video_cs_out */ av_video_cs->av_cs_in = ps3av_cs_video2av(PS3AV_CMD_VIDEO_CS_RGB_8); av_video_cs->bitlen_out = ps3av_cs_video2av_bitlen(cs_out); if ((id & PS3AV_MODE_WHITE) && ps3av_hdmi_range()) av_video_cs->super_white = PS3AV_CMD_AV_SUPER_WHITE_ON; else /* default off */ av_video_cs->super_white = PS3AV_CMD_AV_SUPER_WHITE_OFF; av_video_cs->aspect = aspect; if (id & PS3AV_MODE_DITHER) { av_video_cs->dither = PS3AV_CMD_AV_DITHER_ON | PS3AV_CMD_AV_DITHER_8BIT; } else { /* default off */ av_video_cs->dither = PS3AV_CMD_AV_DITHER_OFF; } return sizeof(*av_video_cs); } u32 ps3av_cmd_set_video_mode(void *p, u32 head, int video_vid, int video_fmt, u32 id) { struct ps3av_pkt_video_mode *video_mode; u32 x, y; video_mode = (struct ps3av_pkt_video_mode *)p; if (video_vid == -1) video_vid = PS3AV_CMD_VIDEO_VID_720P_60HZ; if (video_fmt == -1) video_fmt = PS3AV_CMD_VIDEO_FMT_X8R8G8B8; if (ps3av_video_mode2res(id, &x, &y)) return 0; /* video mode */ memset(video_mode, 0, sizeof(*video_mode)); ps3av_set_hdr(PS3AV_CID_VIDEO_MODE, sizeof(*video_mode), &video_mode->send_hdr); video_mode->video_head = head; if (video_vid == PS3AV_CMD_VIDEO_VID_480I && head == PS3AV_CMD_VIDEO_HEAD_B) video_mode->video_vid = PS3AV_CMD_VIDEO_VID_480I_A; else video_mode->video_vid = video_vid; video_mode->width = (u16) x; video_mode->height = (u16) y; video_mode->pitch = video_mode->width * 4; /* line_length */ video_mode->video_out_format = PS3AV_CMD_VIDEO_OUT_FORMAT_RGB_12BIT; video_mode->video_format = ps3av_video_fmt_table[video_fmt].format; if ((id & PS3AV_MODE_COLOR) && ps3av_hdmi_range()) video_mode->video_cl_cnv = PS3AV_CMD_VIDEO_CL_CNV_DISABLE_LUT; else /* default enable */ video_mode->video_cl_cnv = PS3AV_CMD_VIDEO_CL_CNV_ENABLE_LUT; video_mode->video_order = ps3av_video_fmt_table[video_fmt].order; pr_debug("%s: video_mode:vid:%x width:%d height:%d pitch:%d out_format:%d format:%x order:%x\n", __func__, video_vid, video_mode->width, video_mode->height, video_mode->pitch, video_mode->video_out_format, video_mode->video_format, video_mode->video_order); return sizeof(*video_mode); } int ps3av_cmd_video_format_black(u32 head, u32 video_fmt, u32 mute) { int res; struct ps3av_pkt_video_format video_format; memset(&video_format, 0, sizeof(video_format)); video_format.video_head = head; if (mute != PS3AV_CMD_MUTE_OFF) video_format.video_format = PS3AV_CMD_VIDEO_FORMAT_BLACK; else video_format.video_format = ps3av_video_fmt_table[video_fmt].format; video_format.video_order = ps3av_video_fmt_table[video_fmt].order; res = ps3av_do_pkt(PS3AV_CID_VIDEO_FORMAT, sizeof(video_format), sizeof(video_format), &video_format.send_hdr); if (res < 0) return res; res = get_status(&video_format); if (res) printk(KERN_ERR "PS3AV_CID_VIDEO_FORMAT: failed %x\n", res); return res; } int ps3av_cmd_av_audio_mute(int num_of_port, u32 *port, u32 mute) { int i, res; struct ps3av_pkt_av_audio_mute av_audio_mute; if (num_of_port > PS3AV_MUTE_PORT_MAX) return -EINVAL; /* audio mute */ memset(&av_audio_mute, 0, sizeof(av_audio_mute)); for (i = 0; i < num_of_port; i++) { av_audio_mute.mute[i].avport = port[i]; av_audio_mute.mute[i].mute = mute; } res = ps3av_do_pkt(PS3AV_CID_AV_AUDIO_MUTE, sizeof(av_audio_mute.send_hdr) + sizeof(struct ps3av_av_mute) * num_of_port, sizeof(av_audio_mute), &av_audio_mute.send_hdr); if (res < 0) return res; res = get_status(&av_audio_mute); if (res) printk(KERN_ERR "PS3AV_CID_AV_AUDIO_MUTE: failed %x\n", res); return res; } static const struct { u32 fs; u8 mclk; } ps3av_cnv_mclk_table[] = { { PS3AV_CMD_AUDIO_FS_44K, PS3AV_CMD_AV_MCLK_512 }, { PS3AV_CMD_AUDIO_FS_48K, PS3AV_CMD_AV_MCLK_512 }, { PS3AV_CMD_AUDIO_FS_88K, PS3AV_CMD_AV_MCLK_256 }, { PS3AV_CMD_AUDIO_FS_96K, PS3AV_CMD_AV_MCLK_256 }, { PS3AV_CMD_AUDIO_FS_176K, PS3AV_CMD_AV_MCLK_128 }, { PS3AV_CMD_AUDIO_FS_192K, PS3AV_CMD_AV_MCLK_128 } }; static u8 ps3av_cnv_mclk(u32 fs) { unsigned int i; for (i = 0; i < ARRAY_SIZE(ps3av_cnv_mclk_table); i++) if (ps3av_cnv_mclk_table[i].fs == fs) return ps3av_cnv_mclk_table[i].mclk; printk(KERN_ERR "%s failed, fs:%x\n", __func__, fs); return 0; } #define BASE PS3AV_CMD_AUDIO_FS_44K static const u32 ps3av_ns_table[][5] = { /* D1, D2, D3, D4, D5 */ [PS3AV_CMD_AUDIO_FS_44K-BASE] = { 6272, 6272, 17836, 17836, 8918 }, [PS3AV_CMD_AUDIO_FS_48K-BASE] = { 6144, 6144, 11648, 11648, 5824 }, [PS3AV_CMD_AUDIO_FS_88K-BASE] = { 12544, 12544, 35672, 35672, 17836 }, [PS3AV_CMD_AUDIO_FS_96K-BASE] = { 12288, 12288, 23296, 23296, 11648 }, [PS3AV_CMD_AUDIO_FS_176K-BASE] = { 25088, 25088, 71344, 71344, 35672 }, [PS3AV_CMD_AUDIO_FS_192K-BASE] = { 24576, 24576, 46592, 46592, 23296 } }; static void ps3av_cnv_ns(u8 *ns, u32 fs, u32 video_vid) { u32 av_vid, ns_val; int d; d = ns_val = 0; av_vid = ps3av_vid_video2av(video_vid); switch (av_vid) { case PS3AV_CMD_AV_VID_480I: case PS3AV_CMD_AV_VID_576I: d = 0; break; case PS3AV_CMD_AV_VID_480P: case PS3AV_CMD_AV_VID_576P: d = 1; break; case PS3AV_CMD_AV_VID_1080I_60HZ: case PS3AV_CMD_AV_VID_1080I_50HZ: d = 2; break; case PS3AV_CMD_AV_VID_720P_60HZ: case PS3AV_CMD_AV_VID_720P_50HZ: d = 3; break; case PS3AV_CMD_AV_VID_1080P_60HZ: case PS3AV_CMD_AV_VID_1080P_50HZ: case PS3AV_CMD_AV_VID_WXGA: case PS3AV_CMD_AV_VID_SXGA: case PS3AV_CMD_AV_VID_WUXGA: d = 4; break; default: printk(KERN_ERR "%s failed, vid:%x\n", __func__, video_vid); break; } if (fs < PS3AV_CMD_AUDIO_FS_44K || fs > PS3AV_CMD_AUDIO_FS_192K) printk(KERN_ERR "%s failed, fs:%x\n", __func__, fs); else ns_val = ps3av_ns_table[PS3AV_CMD_AUDIO_FS_44K-BASE][d]; *ns++ = ns_val & 0x000000FF; *ns++ = (ns_val & 0x0000FF00) >> 8; *ns = (ns_val & 0x00FF0000) >> 16; } #undef BASE static u8 ps3av_cnv_enable(u32 source, const u8 *enable) { u8 ret = 0; if (source == PS3AV_CMD_AUDIO_SOURCE_SPDIF) { ret = 0x03; } else if (source == PS3AV_CMD_AUDIO_SOURCE_SERIAL) { ret = ((enable[0] << 4) + (enable[1] << 5) + (enable[2] << 6) + (enable[3] << 7)) | 0x01; } else printk(KERN_ERR "%s failed, source:%x\n", __func__, source); return ret; } static u8 ps3av_cnv_fifomap(const u8 *map) { u8 ret = 0; ret = map[0] + (map[1] << 2) + (map[2] << 4) + (map[3] << 6); return ret; } static u8 ps3av_cnv_inputlen(u32 word_bits) { u8 ret = 0; switch (word_bits) { case PS3AV_CMD_AUDIO_WORD_BITS_16: ret = PS3AV_CMD_AV_INPUTLEN_16; break; case PS3AV_CMD_AUDIO_WORD_BITS_20: ret = PS3AV_CMD_AV_INPUTLEN_20; break; case PS3AV_CMD_AUDIO_WORD_BITS_24: ret = PS3AV_CMD_AV_INPUTLEN_24; break; default: printk(KERN_ERR "%s failed, word_bits:%x\n", __func__, word_bits); break; } return ret; } static u8 ps3av_cnv_layout(u32 num_of_ch) { if (num_of_ch > PS3AV_CMD_AUDIO_NUM_OF_CH_8) { printk(KERN_ERR "%s failed, num_of_ch:%x\n", __func__, num_of_ch); return 0; } return num_of_ch == PS3AV_CMD_AUDIO_NUM_OF_CH_2 ? 0x0 : 0x1; } static void ps3av_cnv_info(struct ps3av_audio_info_frame *info, const struct ps3av_pkt_audio_mode *mode) { info->pb1.cc = mode->audio_num_of_ch + 1; /* CH2:0x01 --- CH8:0x07 */ info->pb1.ct = 0; info->pb2.sf = 0; info->pb2.ss = 0; info->pb3 = 0; /* check mode->audio_format ?? */ info->pb4 = mode->audio_layout; info->pb5.dm = mode->audio_downmix; info->pb5.lsv = mode->audio_downmix_level; } static void ps3av_cnv_chstat(u8 *chstat, const u8 *cs_info) { memcpy(chstat, cs_info, 5); } u32 ps3av_cmd_set_av_audio_param(void *p, u32 port, const struct ps3av_pkt_audio_mode *audio_mode, u32 video_vid) { struct ps3av_pkt_av_audio_param *param; param = (struct ps3av_pkt_av_audio_param *)p; memset(param, 0, sizeof(*param)); ps3av_set_hdr(PS3AV_CID_AV_AUDIO_PARAM, sizeof(*param), &param->send_hdr); param->avport = port; param->mclk = ps3av_cnv_mclk(audio_mode->audio_fs) | 0x80; ps3av_cnv_ns(param->ns, audio_mode->audio_fs, video_vid); param->enable = ps3av_cnv_enable(audio_mode->audio_source, audio_mode->audio_enable); param->swaplr = 0x09; param->fifomap = ps3av_cnv_fifomap(audio_mode->audio_map); param->inputctrl = 0x49; param->inputlen = ps3av_cnv_inputlen(audio_mode->audio_word_bits); param->layout = ps3av_cnv_layout(audio_mode->audio_num_of_ch); ps3av_cnv_info(&param->info, audio_mode); ps3av_cnv_chstat(param->chstat, audio_mode->audio_cs_info); return sizeof(*param); } /* default cs val */ u8 ps3av_mode_cs_info[] = { 0x00, 0x09, 0x00, 0x02, 0x01, 0x00, 0x00, 0x00 }; EXPORT_SYMBOL_GPL(ps3av_mode_cs_info); #define CS_44 0x00 #define CS_48 0x02 #define CS_88 0x08 #define CS_96 0x0a #define CS_176 0x0c #define CS_192 0x0e #define CS_MASK 0x0f #define CS_BIT 0x40 void ps3av_cmd_set_audio_mode(struct ps3av_pkt_audio_mode *audio, u32 avport, u32 ch, u32 fs, u32 word_bits, u32 format, u32 source) { int spdif_through; int i; if (!(ch | fs | format | word_bits | source)) { ch = PS3AV_CMD_AUDIO_NUM_OF_CH_2; fs = PS3AV_CMD_AUDIO_FS_48K; word_bits = PS3AV_CMD_AUDIO_WORD_BITS_16; format = PS3AV_CMD_AUDIO_FORMAT_PCM; source = PS3AV_CMD_AUDIO_SOURCE_SERIAL; } /* audio mode */ memset(audio, 0, sizeof(*audio)); ps3av_set_hdr(PS3AV_CID_AUDIO_MODE, sizeof(*audio), &audio->send_hdr); audio->avport = (u8) avport; audio->mask = 0x0FFF; /* XXX set all */ audio->audio_num_of_ch = ch; audio->audio_fs = fs; audio->audio_word_bits = word_bits; audio->audio_format = format; audio->audio_source = source; switch (ch) { case PS3AV_CMD_AUDIO_NUM_OF_CH_8: audio->audio_enable[3] = 1; /* fall through */ case PS3AV_CMD_AUDIO_NUM_OF_CH_6: audio->audio_enable[2] = 1; audio->audio_enable[1] = 1; /* fall through */ case PS3AV_CMD_AUDIO_NUM_OF_CH_2: default: audio->audio_enable[0] = 1; } /* audio swap L/R */ for (i = 0; i < 4; i++) audio->audio_swap[i] = PS3AV_CMD_AUDIO_SWAP_0; /* no swap */ /* audio serial input mapping */ audio->audio_map[0] = PS3AV_CMD_AUDIO_MAP_OUTPUT_0; audio->audio_map[1] = PS3AV_CMD_AUDIO_MAP_OUTPUT_1; audio->audio_map[2] = PS3AV_CMD_AUDIO_MAP_OUTPUT_2; audio->audio_map[3] = PS3AV_CMD_AUDIO_MAP_OUTPUT_3; /* audio speaker layout */ if (avport == PS3AV_CMD_AVPORT_HDMI_0 || avport == PS3AV_CMD_AVPORT_HDMI_1) { switch (ch) { case PS3AV_CMD_AUDIO_NUM_OF_CH_8: audio->audio_layout = PS3AV_CMD_AUDIO_LAYOUT_8CH; break; case PS3AV_CMD_AUDIO_NUM_OF_CH_6: audio->audio_layout = PS3AV_CMD_AUDIO_LAYOUT_6CH; break; case PS3AV_CMD_AUDIO_NUM_OF_CH_2: default: audio->audio_layout = PS3AV_CMD_AUDIO_LAYOUT_2CH; break; } } else { audio->audio_layout = PS3AV_CMD_AUDIO_LAYOUT_2CH; } /* audio downmix permission */ audio->audio_downmix = PS3AV_CMD_AUDIO_DOWNMIX_PERMITTED; /* audio downmix level shift (0:0dB to 15:15dB) */ audio->audio_downmix_level = 0; /* 0dB */ /* set ch status */ for (i = 0; i < 8; i++) audio->audio_cs_info[i] = ps3av_mode_cs_info[i]; switch (fs) { case PS3AV_CMD_AUDIO_FS_44K: audio->audio_cs_info[3] &= ~CS_MASK; audio->audio_cs_info[3] |= CS_44; break; case PS3AV_CMD_AUDIO_FS_88K: audio->audio_cs_info[3] &= ~CS_MASK; audio->audio_cs_info[3] |= CS_88; break; case PS3AV_CMD_AUDIO_FS_96K: audio->audio_cs_info[3] &= ~CS_MASK; audio->audio_cs_info[3] |= CS_96; break; case PS3AV_CMD_AUDIO_FS_176K: audio->audio_cs_info[3] &= ~CS_MASK; audio->audio_cs_info[3] |= CS_176; break; case PS3AV_CMD_AUDIO_FS_192K: audio->audio_cs_info[3] &= ~CS_MASK; audio->audio_cs_info[3] |= CS_192; break; default: break; } /* non-audio bit */ spdif_through = audio->audio_cs_info[0] & 0x02; /* pass through setting */ if (spdif_through && (avport == PS3AV_CMD_AVPORT_SPDIF_0 || avport == PS3AV_CMD_AVPORT_SPDIF_1 || avport == PS3AV_CMD_AVPORT_HDMI_0 || avport == PS3AV_CMD_AVPORT_HDMI_1)) { audio->audio_word_bits = PS3AV_CMD_AUDIO_WORD_BITS_16; audio->audio_format = PS3AV_CMD_AUDIO_FORMAT_BITSTREAM; } } int ps3av_cmd_audio_mode(struct ps3av_pkt_audio_mode *audio_mode) { int res; res = ps3av_do_pkt(PS3AV_CID_AUDIO_MODE, sizeof(*audio_mode), sizeof(*audio_mode), &audio_mode->send_hdr); if (res < 0) return res; res = get_status(audio_mode); if (res) printk(KERN_ERR "PS3AV_CID_AUDIO_MODE: failed %x\n", res); return res; } int ps3av_cmd_audio_mute(int num_of_port, u32 *port, u32 mute) { int i, res; struct ps3av_pkt_audio_mute audio_mute; if (num_of_port > PS3AV_OPT_PORT_MAX) return -EINVAL; /* audio mute */ memset(&audio_mute, 0, sizeof(audio_mute)); for (i = 0; i < num_of_port; i++) { audio_mute.mute[i].avport = port[i]; audio_mute.mute[i].mute = mute; } res = ps3av_do_pkt(PS3AV_CID_AUDIO_MUTE, sizeof(audio_mute.send_hdr) + sizeof(struct ps3av_audio_mute) * num_of_port, sizeof(audio_mute), &audio_mute.send_hdr); if (res < 0) return res; res = get_status(&audio_mute); if (res) printk(KERN_ERR "PS3AV_CID_AUDIO_MUTE: failed %x\n", res); return res; } int ps3av_cmd_audio_active(int active, u32 port) { int res; struct ps3av_pkt_audio_active audio_active; u32 cid; /* audio active */ memset(&audio_active, 0, sizeof(audio_active)); audio_active.audio_port = port; cid = active ? PS3AV_CID_AUDIO_ACTIVE : PS3AV_CID_AUDIO_INACTIVE; res = ps3av_do_pkt(cid, sizeof(audio_active), sizeof(audio_active), &audio_active.send_hdr); if (res < 0) return res; res = get_status(&audio_active); if (res) printk(KERN_ERR "PS3AV_CID_AUDIO_ACTIVE:%x failed %x\n", cid, res); return res; } int ps3av_cmd_avb_param(struct ps3av_pkt_avb_param *avb, u32 send_len) { int res; mutex_lock(&ps3_gpu_mutex); /* avb packet */ res = ps3av_do_pkt(PS3AV_CID_AVB_PARAM, send_len, sizeof(*avb), &avb->send_hdr); if (res < 0) goto out; res = get_status(avb); if (res) pr_debug("%s: PS3AV_CID_AVB_PARAM: failed %x\n", __func__, res); out: mutex_unlock(&ps3_gpu_mutex); return res; } int ps3av_cmd_av_get_hw_conf(struct ps3av_pkt_av_get_hw_conf *hw_conf) { int res; memset(hw_conf, 0, sizeof(*hw_conf)); res = ps3av_do_pkt(PS3AV_CID_AV_GET_HW_CONF, sizeof(hw_conf->send_hdr), sizeof(*hw_conf), &hw_conf->send_hdr); if (res < 0) return res; res = get_status(hw_conf); if (res) printk(KERN_ERR "PS3AV_CID_AV_GET_HW_CONF: failed %x\n", res); return res; } int ps3av_cmd_video_get_monitor_info(struct ps3av_pkt_av_get_monitor_info *info, u32 avport) { int res; memset(info, 0, sizeof(*info)); info->avport = avport; res = ps3av_do_pkt(PS3AV_CID_AV_GET_MONITOR_INFO, sizeof(info->send_hdr) + sizeof(info->avport) + sizeof(info->reserved), sizeof(*info), &info->send_hdr); if (res < 0) return res; res = get_status(info); if (res) printk(KERN_ERR "PS3AV_CID_AV_GET_MONITOR_INFO: failed %x\n", res); return res; } #define PS3AV_AV_LAYOUT_0 (PS3AV_CMD_AV_LAYOUT_32 \ | PS3AV_CMD_AV_LAYOUT_44 \ | PS3AV_CMD_AV_LAYOUT_48) #define PS3AV_AV_LAYOUT_1 (PS3AV_AV_LAYOUT_0 \ | PS3AV_CMD_AV_LAYOUT_88 \ | PS3AV_CMD_AV_LAYOUT_96 \ | PS3AV_CMD_AV_LAYOUT_176 \ | PS3AV_CMD_AV_LAYOUT_192)
gpl-2.0
Mazout360/lge-kernel-gb
arch/arm/mach-tegra/tegra_gps.c
6
5571
/* * arch/arm/mach-tegra/tegra_gps.c * *GPS device using NVIDIA Tegra ODM kit * board_nvodm.c */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/platform_device.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/gpio.h> #include <linux/wakelock.h> //GPIO control À» À§Çؼ­ Ãß°¡ ÇÔ. #include <nvodm_services.h> #include "nvodm_query_discovery.h" //NvOdmPeripheralConnectivity *pConnectivity = NULL; #include "nvos.h" #define GPS_GUID NV_ODM_GUID('N','V','O','D','M','G','P','S') typedef struct GPSDeviceRec { NvOdmServicesGpioHandle hGpio; NvOdmGpioPinHandle s_hResetGPSGpioPin; NvOdmGpioPinHandle s_hStandbyGPSGpioPin; NvOdmGpioPinHandle s_hExtLNAGPSGpioPin; NvU32 pin[3], port[3]; struct wake_lock gps_wakelock; } GPS_Device; static GPS_Device s_hGPSHandle; static ssize_t gps_gpio_reset_show(struct device *dev, struct device_attribute *attr, char *buf) { /* int pinValue = gpio_get_value(GPS_GPIO_RESET); */ NvU32 pinValue; NvOdmGpioGetState(s_hGPSHandle.hGpio, s_hGPSHandle.s_hResetGPSGpioPin, &pinValue); printk(KERN_DEBUG "gps_gpio_reset_show\n"); return sprintf(buf, "%d\n", pinValue); } static ssize_t gps_gpio_reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int value; printk(KERN_DEBUG "gps_gpio_reset_store\n"); sscanf(buf, "%d", &value); /* gpio_set_value(GPS_GPIO_RESET, value); */ NvOdmGpioSetState(s_hGPSHandle.hGpio, s_hGPSHandle.s_hResetGPSGpioPin, value); return size; } static ssize_t gps_gpio_poweron_show(struct device *dev, struct device_attribute *attr, char *buf) { /* int pinValue = gpio_get_value(GPS_GPIO_POWERON); */ NvU32 pinValue; NvOdmGpioGetState(s_hGPSHandle.hGpio, s_hGPSHandle.s_hStandbyGPSGpioPin, &pinValue); printk(KERN_DEBUG "gps_gpio_poweron_show\n"); NvOdmGpioGetState(s_hGPSHandle.hGpio, s_hGPSHandle.s_hExtLNAGPSGpioPin, &pinValue); printk(KERN_DEBUG "gps_gpio_ExtLNA_show\n"); return sprintf(buf, "%d\n", pinValue); } static ssize_t gps_gpio_poweron_store( struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int value; printk(KERN_DEBUG "gps_gpio_poweron_store\n"); printk(KERN_DEBUG "gps_gpio_ExtLNA_store\n"); sscanf(buf, "%d", &value); /* gpio_set_value(GPS_GPIO_POWERON, value); */ NvOdmGpioSetState(s_hGPSHandle.hGpio, s_hGPSHandle.s_hStandbyGPSGpioPin, value); NvOdmGpioSetState(s_hGPSHandle.hGpio, s_hGPSHandle.s_hExtLNAGPSGpioPin, value); if (value) { wake_lock(&s_hGPSHandle.gps_wakelock); } else { wake_unlock(&s_hGPSHandle.gps_wakelock); } return size; } static DEVICE_ATTR(reset, S_IRUGO | S_IWUSR, gps_gpio_reset_show, gps_gpio_reset_store); static DEVICE_ATTR(poweron, S_IRUGO | S_IWUSR, gps_gpio_poweron_show, gps_gpio_poweron_store); static int tegra_gps_gpio_probe(struct platform_device *pdev) { int retval; const NvOdmPeripheralConnectivity *pConnectivity = NULL; int i,j; printk(KERN_DEBUG "tegra_gps_reset_probe\n"); pConnectivity = NvOdmPeripheralGetGuid(GPS_GUID); if (!pConnectivity) { printk("pConnectivity fail."); return 0; } s_hGPSHandle.hGpio = NvOdmGpioOpen(); for (i = 0,j=0 ; i < pConnectivity->NumAddress; i++) { //only GPIO if (pConnectivity->AddressList[i].Interface ==NvOdmIoModule_Gpio ) { s_hGPSHandle.port[j] = pConnectivity->AddressList[i].Instance; s_hGPSHandle.pin[j] = pConnectivity->AddressList[i].Address; printk("port = %d pin = %d\n",s_hGPSHandle.port[j],s_hGPSHandle.pin[j]); j++; } } s_hGPSHandle.s_hResetGPSGpioPin = NvOdmGpioAcquirePinHandle(s_hGPSHandle.hGpio, s_hGPSHandle.port[0], s_hGPSHandle.pin[0]); s_hGPSHandle.s_hStandbyGPSGpioPin = NvOdmGpioAcquirePinHandle(s_hGPSHandle.hGpio, s_hGPSHandle.port[1], s_hGPSHandle.pin[1]); s_hGPSHandle.s_hExtLNAGPSGpioPin = NvOdmGpioAcquirePinHandle(s_hGPSHandle.hGpio, s_hGPSHandle.port[2], s_hGPSHandle.pin[2]); NvOdmGpioConfig(s_hGPSHandle.hGpio, s_hGPSHandle.s_hResetGPSGpioPin, NvOdmGpioPinMode_Output); NvOdmGpioConfig(s_hGPSHandle.hGpio, s_hGPSHandle.s_hStandbyGPSGpioPin, NvOdmGpioPinMode_Output); NvOdmGpioConfig(s_hGPSHandle.hGpio, s_hGPSHandle.s_hExtLNAGPSGpioPin, NvOdmGpioPinMode_Output); retval = device_create_file(&pdev->dev, &dev_attr_reset); if (retval) goto error; retval = device_create_file(&pdev->dev, &dev_attr_poweron); if (retval) goto error; wake_lock_init(&s_hGPSHandle.gps_wakelock, WAKE_LOCK_SUSPEND, "gps_wakelock"); return retval; error: printk(KERN_ERR "tegra_gps_reset_probe -Error\n"); device_remove_file(&pdev->dev, &dev_attr_reset); device_remove_file(&pdev->dev, &dev_attr_poweron); return 0; } static int tegra_gps_gpio_remove(struct platform_device *pdev) { printk(KERN_DEBUG "tegra_gps_reset_remove\n"); device_remove_file(&pdev->dev, &dev_attr_reset); device_remove_file(&pdev->dev, &dev_attr_poweron); return 0; } // platform_driver static struct platform_driver tegra_gps_gpio_driver = { .probe = tegra_gps_gpio_probe, .remove = tegra_gps_gpio_remove, .driver = { .name = "tegra_gps_gpio" }, }; static int __devinit gps_gpio_init(void) { printk(KERN_DEBUG "gps_gpio_init\n"); return platform_driver_register(&tegra_gps_gpio_driver); } static void __exit gps_gpio_exit(void) { printk(KERN_DEBUG "gps_gpio_exit\n"); platform_driver_unregister(&tegra_gps_gpio_driver); } module_init(gps_gpio_init); module_exit(gps_gpio_exit); MODULE_DESCRIPTION("heaven GPS Driver");
gpl-2.0
ellert/doxygen
testing/070_ref_variadic_template.cpp
6
2752
// objective: test \ref command with a variadic template function // check: 070__ref__variadic__template_8cpp.xml /** \file * * @attention * @parblock * At the time of writing, the part between \<\> is totally ignored: * %func<Args...>(Args... args) is interpreted as %func(Args... args). * * Beware that a function parameter with either a \& or \* operator, * e.g. 'const Args&... args', requires \\link and \\ref to specify * such parameter as verbatim, i.e. 'const Args&... args'. At the * time of writing, the form %func(const Args&...) will fail, unless * the function parameter was declared just as 'const Args&...'. * @endparblock * * \ref Test::func(int,Args...)const "variadic template method" * * References to the variadic template function overloads: * @li \ref func(int,Args&... args) "First overload" * @li \ref func(int,Args&&... args) "Second overload" * @li \ref func(int,const Args&... args) "Third overload" * @li \ref func(int,const Args&&... args) "Fourth overload" * @li \ref func(int,Args*... args) "Fifth overload" * @li \ref func(int,Args**... args) "Sixth overload" * @li \ref func(int,const Args*... args) "Seventh overload" * @li \ref func(int,const Args**... args) "Eighth overload" * @li \ref func(int,Args...) "Ninth overload" * * The following are interpreted the same: * @li \ref func(int,const Args&... args) "without template argument" * @li \ref func<Args...>(int,const Args&... args) "with template argument" * * See the \ref Test "test" class. */ /** A function */ void func(int p); /** A variadic template function overload */ template <typename... Args> void func(int p, Args&... args); /** A variadic template function overload */ template <typename... Args> void func(int p, Args&&... args); /** A variadic template function overload */ template <typename... Args> void func(int p, const Args&... args); /** A variadic template function overload */ template <typename... Args> void func(int p, const Args&&... args); /** A variadic template function overload */ template <typename... Args> void func(int p, Args*... args); /** A variadic template function overload */ template <typename... Args> void func(int p, Args**... args); /** A variadic template function overload */ template <typename... Args> void func(int p, const Args*... args); /** A variadic template function overload */ template <typename... Args> void func(int p, const Args**... args); /** A variadic template function overload */ template <typename... Args> void func(int p, Args... args); /** A test */ class Test { public: /** A variadic template method */ template <typename... Args> void func(int p, Args... args) const; };
gpl-2.0
Vassilko/lichee
drivers/net/wireless/rtl8188eu/hal/OUTSRC/rtl8188e/HalPhyRf_8188e.c
6
114245
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * ******************************************************************************/ #include "../odm_precomp.h" /*---------------------------Define Local Constant---------------------------*/ // 2010/04/25 MH Define the max tx power tracking tx agc power. #define ODM_TXPWRTRACK_MAX_IDX_88E 6 #define CALCULATE_SWINGTALBE_OFFSET(_offset, _direction, _size, _deltaThermal) \ do {\ for(_offset = 0; _offset < _size; _offset++)\ {\ if(_deltaThermal < thermalThreshold[_direction][_offset])\ {\ if(_offset != 0)\ _offset--;\ break;\ }\ } \ if(_offset >= _size)\ _offset = _size-1;\ } while(0) //3============================================================ //3 Tx Power Tracking //3============================================================ void setIqkMatrix( PDM_ODM_T pDM_Odm, u1Byte OFDM_index, u1Byte RFPath, s4Byte IqkResult_X, s4Byte IqkResult_Y ) { s4Byte ele_A=0, ele_D, ele_C=0, TempCCk, value32; //printk("%s==> OFDM_index:%d \n",__FUNCTION__,OFDM_index); //if(OFDM_index> OFDM_TABLE_SIZE_92D) //{ //printk("%s==> OFDM_index> 43\n",__FUNCTION__); //} ele_D = (OFDMSwingTable[OFDM_index] & 0xFFC00000)>>22; //new element A = element D x X if((IqkResult_X != 0) && (*(pDM_Odm->pBandType) == ODM_BAND_2_4G)) { if ((IqkResult_X & 0x00000200) != 0) //consider minus IqkResult_X = IqkResult_X | 0xFFFFFC00; ele_A = ((IqkResult_X * ele_D)>>8)&0x000003FF; //new element C = element D x Y if ((IqkResult_Y & 0x00000200) != 0) IqkResult_Y = IqkResult_Y | 0xFFFFFC00; ele_C = ((IqkResult_Y * ele_D)>>8)&0x000003FF; if (RFPath == RF_PATH_A) switch (RFPath) { case RF_PATH_A: //wirte new elements A, C, D to regC80 and regC94, element B is always 0 value32 = (ele_D<<22)|((ele_C&0x3F)<<16)|ele_A; ODM_SetBBReg(pDM_Odm, rOFDM0_XATxIQImbalance, bMaskDWord, value32); value32 = (ele_C&0x000003C0)>>6; ODM_SetBBReg(pDM_Odm, rOFDM0_XCTxAFE, bMaskH4Bits, value32); value32 = ((IqkResult_X * ele_D)>>7)&0x01; ODM_SetBBReg(pDM_Odm, rOFDM0_ECCAThreshold, BIT24, value32); break; case RF_PATH_B: //wirte new elements A, C, D to regC88 and regC9C, element B is always 0 value32=(ele_D<<22)|((ele_C&0x3F)<<16) |ele_A; ODM_SetBBReg(pDM_Odm, rOFDM0_XBTxIQImbalance, bMaskDWord, value32); value32 = (ele_C&0x000003C0)>>6; ODM_SetBBReg(pDM_Odm, rOFDM0_XDTxAFE, bMaskH4Bits, value32); value32 = ((IqkResult_X * ele_D)>>7)&0x01; ODM_SetBBReg(pDM_Odm, rOFDM0_ECCAThreshold, BIT28, value32); break; default: break; } } else { switch (RFPath) { case RF_PATH_A: ODM_SetBBReg(pDM_Odm, rOFDM0_XATxIQImbalance, bMaskDWord, OFDMSwingTable[OFDM_index]); ODM_SetBBReg(pDM_Odm, rOFDM0_XCTxAFE, bMaskH4Bits, 0x00); ODM_SetBBReg(pDM_Odm, rOFDM0_ECCAThreshold, BIT24, 0x00); break; case RF_PATH_B: ODM_SetBBReg(pDM_Odm, rOFDM0_XBTxIQImbalance, bMaskDWord, OFDMSwingTable[OFDM_index]); ODM_SetBBReg(pDM_Odm, rOFDM0_XDTxAFE, bMaskH4Bits, 0x00); ODM_SetBBReg(pDM_Odm, rOFDM0_ECCAThreshold, BIT28, 0x00); break; default: break; } } ODM_RT_TRACE(pDM_Odm,ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("TxPwrTracking path B: X = 0x%x, Y = 0x%x ele_A = 0x%x ele_C = 0x%x ele_D = 0x%x 0xeb4 = 0x%x 0xebc = 0x%x\n", (u4Byte)IqkResult_X, (u4Byte)IqkResult_Y, (u4Byte)ele_A, (u4Byte)ele_C, (u4Byte)ele_D, (u4Byte)IqkResult_X, (u4Byte)IqkResult_Y)); } void doIQK( PDM_ODM_T pDM_Odm, u1Byte DeltaThermalIndex, u1Byte ThermalValue, u1Byte Threshold ) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) PADAPTER Adapter = pDM_Odm->Adapter; HAL_DATA_TYPE *pHalData = GET_HAL_DATA(Adapter); #endif ODM_ResetIQKResult(pDM_Odm); #if(DM_ODM_SUPPORT_TYPE & ODM_MP) #if (DEV_BUS_TYPE == RT_PCI_INTERFACE) #if USE_WORKITEM PlatformAcquireMutex(&pHalData->mxChnlBwControl); #else PlatformAcquireSpinLock(Adapter, RT_CHANNEL_AND_BANDWIDTH_SPINLOCK); #endif #elif((DEV_BUS_TYPE == RT_USB_INTERFACE) || (DEV_BUS_TYPE == RT_SDIO_INTERFACE)) PlatformAcquireMutex(&pHalData->mxChnlBwControl); #endif #endif pDM_Odm->RFCalibrateInfo.ThermalValue_IQK= ThermalValue; PHY_IQCalibrate_8188E(Adapter, FALSE); #if(DM_ODM_SUPPORT_TYPE & ODM_MP) #if (DEV_BUS_TYPE == RT_PCI_INTERFACE) #if USE_WORKITEM PlatformReleaseMutex(&pHalData->mxChnlBwControl); #else PlatformReleaseSpinLock(Adapter, RT_CHANNEL_AND_BANDWIDTH_SPINLOCK); #endif #elif((DEV_BUS_TYPE == RT_USB_INTERFACE) || (DEV_BUS_TYPE == RT_SDIO_INTERFACE)) PlatformReleaseMutex(&pHalData->mxChnlBwControl); #endif #endif } /*----------------------------------------------------------------------------- * Function: ODM_TxPwrTrackAdjust88E() * * Overview: 88E we can not write 0xc80/c94/c4c/ 0xa2x. Instead of write TX agc. * No matter OFDM & CCK use the same method. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 04/23/2012 MHC Create Version 0. * 04/23/2012 MHC Adjust TX agc directly not throughput BB digital. * *---------------------------------------------------------------------------*/ VOID ODM_TxPwrTrackAdjust88E( PDM_ODM_T pDM_Odm, u1Byte Type, // 0 = OFDM, 1 = CCK pu1Byte pDirection, // 1 = +(increase) 2 = -(decrease) pu4Byte pOutWriteVal // Tx tracking CCK/OFDM BB swing index adjust ) { u1Byte pwr_value = 0; // // Tx power tracking BB swing table. // The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB // if (Type == 0) // For OFDM afjust { ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("BbSwingIdxOfdm = %d BbSwingFlagOfdm=%d\n", pDM_Odm->BbSwingIdxOfdm, pDM_Odm->BbSwingFlagOfdm)); //printk("BbSwingIdxOfdm = %d BbSwingFlagOfdm=%d\n", pDM_Odm->BbSwingIdxOfdm, pDM_Odm->BbSwingFlagOfdm); if (pDM_Odm->BbSwingIdxOfdm <= pDM_Odm->BbSwingIdxOfdmBase) { *pDirection = 1; pwr_value = (pDM_Odm->BbSwingIdxOfdmBase - pDM_Odm->BbSwingIdxOfdm); } else { *pDirection = 2; pwr_value = (pDM_Odm->BbSwingIdxOfdm - pDM_Odm->BbSwingIdxOfdmBase); } ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("BbSwingIdxOfdm = %d BbSwingIdxOfdmBase=%d\n", pDM_Odm->BbSwingIdxOfdm, pDM_Odm->BbSwingIdxOfdmBase)); //printk("BbSwingIdxOfdm = %d BbSwingIdxOfdmBase=%d pwr_value=%d\n", pDM_Odm->BbSwingIdxOfdm, pDM_Odm->BbSwingIdxOfdmBase,pwr_value); } else if (Type == 1) // For CCK adjust. { ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("pDM_Odm->BbSwingIdxCck = %d pDM_Odm->BbSwingIdxCckBase = %d\n", pDM_Odm->BbSwingIdxCck, pDM_Odm->BbSwingIdxCckBase)); //printk("pDM_Odm->BbSwingIdxCck = %d pDM_Odm->BbSwingIdxCckBase = %d\n", pDM_Odm->BbSwingIdxCck, pDM_Odm->BbSwingIdxCckBase); if (pDM_Odm->BbSwingIdxCck <= pDM_Odm->BbSwingIdxCckBase) { *pDirection = 1; pwr_value = (pDM_Odm->BbSwingIdxCckBase - pDM_Odm->BbSwingIdxCck); } else { *pDirection = 2; pwr_value = (pDM_Odm->BbSwingIdxCck - pDM_Odm->BbSwingIdxCckBase); } //printk("pDM_Odm->BbSwingIdxCck = %d pDM_Odm->BbSwingIdxCckBase = %d pwr_value:%d\n", pDM_Odm->BbSwingIdxCck, pDM_Odm->BbSwingIdxCckBase,pwr_value); } // // 2012/04/25 MH According to Ed/Luke.Lees estimate for EVM the max tx power tracking // need to be less than 6 power index for 88E. // if (pwr_value >= ODM_TXPWRTRACK_MAX_IDX_88E && *pDirection == 1) pwr_value = ODM_TXPWRTRACK_MAX_IDX_88E; *pOutWriteVal = pwr_value | (pwr_value<<8) | (pwr_value<<16) | (pwr_value<<24); } // ODM_TxPwrTrackAdjust88E /*----------------------------------------------------------------------------- * Function: odm_TxPwrTrackSetPwr88E() * * Overview: 88E change all channel tx power accordign to flag. * OFDM & CCK are all different. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 04/23/2012 MHC Create Version 0. * *---------------------------------------------------------------------------*/ VOID odm_TxPwrTrackSetPwr88E( PDM_ODM_T pDM_Odm, PWRTRACK_METHOD Method, u1Byte RFPath, u1Byte ChannelMappedIndex ) { if (Method == TXAGC) { u1Byte cckPowerLevel[MAX_TX_COUNT], ofdmPowerLevel[MAX_TX_COUNT]; u1Byte BW20PowerLevel[MAX_TX_COUNT], BW40PowerLevel[MAX_TX_COUNT]; u1Byte rf = 0; u4Byte pwr = 0, TxAGC = 0; PADAPTER Adapter = pDM_Odm->Adapter; //printk("odm_TxPwrTrackSetPwr88E CH=%d, modify TXAGC \n", *(pDM_Odm->pChannel)); ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("odm_TxPwrTrackSetPwr88E CH=%d\n", *(pDM_Odm->pChannel))); #if (DM_ODM_SUPPORT_TYPE & (ODM_MP|ODM_CE )) //#if (MP_DRIVER != 1) if ( *(pDM_Odm->mp_mode) != 1){ PHY_SetTxPowerLevel8188E(pDM_Odm->Adapter, *pDM_Odm->pChannel); } else //#else { pwr = PHY_QueryBBReg(Adapter, rTxAGC_A_Rate18_06, 0xFF); pwr += (pDM_Odm->BbSwingIdxCck - pDM_Odm->BbSwingIdxCckBase); PHY_SetBBReg(Adapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, pwr); TxAGC = (pwr<<16)|(pwr<<8)|(pwr); PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, TxAGC); DBG_871X("ODM_TxPwrTrackSetPwr88E: CCK Tx-rf(A) Power = 0x%x\n", TxAGC); pwr = PHY_QueryBBReg(Adapter, rTxAGC_A_Rate18_06, 0xFF); pwr += (pDM_Odm->BbSwingIdxOfdm - pDM_Odm->BbSwingIdxOfdmBase); TxAGC |= ((pwr<<24)|(pwr<<16)|(pwr<<8)|pwr); PHY_SetBBReg(Adapter, rTxAGC_A_Rate18_06, bMaskDWord, TxAGC); PHY_SetBBReg(Adapter, rTxAGC_A_Rate54_24, bMaskDWord, TxAGC); PHY_SetBBReg(Adapter, rTxAGC_A_Mcs03_Mcs00, bMaskDWord, TxAGC); PHY_SetBBReg(Adapter, rTxAGC_A_Mcs07_Mcs04, bMaskDWord, TxAGC); PHY_SetBBReg(Adapter, rTxAGC_A_Mcs11_Mcs08, bMaskDWord, TxAGC); PHY_SetBBReg(Adapter, rTxAGC_A_Mcs15_Mcs12, bMaskDWord, TxAGC); DBG_871X("ODM_TxPwrTrackSetPwr88E: OFDM Tx-rf(A) Power = 0x%x\n", TxAGC); } //#endif #endif #if (DM_ODM_SUPPORT_TYPE & ODM_AP) PHY_RF6052SetCCKTxPower(pDM_Odm->priv, *(pDM_Odm->pChannel)); PHY_RF6052SetOFDMTxPower(pDM_Odm->priv, *(pDM_Odm->pChannel)); #endif } else if (Method == BBSWING) { //printk("odm_TxPwrTrackSetPwr88E CH=%d, modify BBSWING BbSwingIdxCck:%d \n", *(pDM_Odm->pChannel),pDM_Odm->BbSwingIdxCck); // Adjust BB swing by CCK filter coefficient //if(!pDM_Odm->RFCalibrateInfo.bCCKinCH14) if(* (pDM_Odm->pChannel) < 14) { ODM_Write1Byte(pDM_Odm, 0xa22, CCKSwingTable_Ch1_Ch13[pDM_Odm->BbSwingIdxCck][0]); ODM_Write1Byte(pDM_Odm, 0xa23, CCKSwingTable_Ch1_Ch13[pDM_Odm->BbSwingIdxCck][1]); ODM_Write1Byte(pDM_Odm, 0xa24, CCKSwingTable_Ch1_Ch13[pDM_Odm->BbSwingIdxCck][2]); ODM_Write1Byte(pDM_Odm, 0xa25, CCKSwingTable_Ch1_Ch13[pDM_Odm->BbSwingIdxCck][3]); ODM_Write1Byte(pDM_Odm, 0xa26, CCKSwingTable_Ch1_Ch13[pDM_Odm->BbSwingIdxCck][4]); ODM_Write1Byte(pDM_Odm, 0xa27, CCKSwingTable_Ch1_Ch13[pDM_Odm->BbSwingIdxCck][5]); ODM_Write1Byte(pDM_Odm, 0xa28, CCKSwingTable_Ch1_Ch13[pDM_Odm->BbSwingIdxCck][6]); ODM_Write1Byte(pDM_Odm, 0xa29, CCKSwingTable_Ch1_Ch13[pDM_Odm->BbSwingIdxCck][7]); } else { ODM_Write1Byte(pDM_Odm, 0xa22, CCKSwingTable_Ch14[pDM_Odm->BbSwingIdxCck][0]); ODM_Write1Byte(pDM_Odm, 0xa23, CCKSwingTable_Ch14[pDM_Odm->BbSwingIdxCck][1]); ODM_Write1Byte(pDM_Odm, 0xa24, CCKSwingTable_Ch14[pDM_Odm->BbSwingIdxCck][2]); ODM_Write1Byte(pDM_Odm, 0xa25, CCKSwingTable_Ch14[pDM_Odm->BbSwingIdxCck][3]); ODM_Write1Byte(pDM_Odm, 0xa26, CCKSwingTable_Ch14[pDM_Odm->BbSwingIdxCck][4]); ODM_Write1Byte(pDM_Odm, 0xa27, CCKSwingTable_Ch14[pDM_Odm->BbSwingIdxCck][5]); ODM_Write1Byte(pDM_Odm, 0xa28, CCKSwingTable_Ch14[pDM_Odm->BbSwingIdxCck][6]); ODM_Write1Byte(pDM_Odm, 0xa29, CCKSwingTable_Ch14[pDM_Odm->BbSwingIdxCck][7]); } // Adjust BB swing by OFDM IQ matrix if (RFPath == RF_PATH_A) { setIqkMatrix(pDM_Odm, pDM_Odm->BbSwingIdxOfdm, RF_PATH_A, pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][0], pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][1]); } /* else if (RFPath == RF_PATH_B) { setIqkMatrix(pDM_Odm, pDM_Odm->BbSwingIdxOfdm[RF_PATH_B], RF_PATH_B, pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][4], pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][5]); }*/ } else { return; } } // odm_TxPwrTrackSetPwr88E VOID odm_TXPowerTrackingCallback_ThermalMeter_8188E( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm #else IN PADAPTER Adapter #endif ) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(Adapter); //PMGNT_INFO pMgntInfo = &Adapter->MgntInfo; #endif u1Byte ThermalValue = 0, delta, delta_LCK, delta_IQK, offset; u1Byte ThermalValue_AVG_count = 0; u4Byte ThermalValue_AVG = 0; s4Byte ele_A=0, ele_D, TempCCk, X, value32; s4Byte Y, ele_C=0; s1Byte OFDM_index[2], CCK_index=0, OFDM_index_old[2]={0,0}, CCK_index_old=0, index; s1Byte deltaPowerIndex = 0; u4Byte i = 0, j = 0; BOOLEAN is2T = FALSE; BOOLEAN bInteralPA = FALSE; u1Byte OFDM_min_index = 6, rf = (is2T) ? 2 : 1; //OFDM BB Swing should be less than +3.0dB, which is required by Arthur u1Byte Indexforchannel = 0;/*GetRightChnlPlaceforIQK(pHalData->CurrentChannel)*/ enum _POWER_DEC_INC { POWER_DEC, POWER_INC }; #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; struct dm_priv *pdmpriv = &pHalData->dmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif //4 0.1 The following TWO tables decide the final index of OFDM/CCK swing table. s1Byte deltaSwingTableIdx[2][index_mapping_NUM_88E] = { // {{Power decreasing(lower temperature)}, {Power increasing(higher temperature)}} {0,0,2,3,4,4,5,6,7,7,8,9,10,10,11}, {0,0,-1,-2,-3,-4,-4,-4,-4,-5,-7,-8,-9,-9,-10} }; u1Byte thermalThreshold[2][index_mapping_NUM_88E]={ // {{Power decreasing(lower temperature)}, {Power increasing(higher temperature)}} {0,2,4,6,8,10,12,14,16,18,20,22,24,26,27}, {0,2,4,6,8,10,12,14,16,18,20,22,25,25,25} }; //4 0.1 Initilization ( 7 steps in total ) pDM_Odm->RFCalibrateInfo.TXPowerTrackingCallbackCnt++; //cosa add for debug pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = TRUE; #if (MP_DRIVER == 1) #if (DM_ODM_SUPPORT_TYPE == ODM_MP) pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = pHalData->TxPowerTrackControl; // <Kordan> We should keep updating the control variable according to HalData. #endif // <Kordan> RFCalibrateInfo.RegA24 will be initialized when ODM HW configuring, but MP configures with para files. pDM_Odm->RFCalibrateInfo.RegA24 = 0x090e1317; #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,("===>odm_TXPowerTrackingCallback_ThermalMeter_8188E, pDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase: %d \n", pDM_Odm->BbSwingIdxCckBase, pDM_Odm->BbSwingIdxOfdmBase)); ThermalValue = (u1Byte)ODM_GetRFReg(pDM_Odm, RF_PATH_A, RF_T_METER_88E, 0xfc00); //0x42: RF Reg[15:10] 88E if( ! ThermalValue || ! pDM_Odm->RFCalibrateInfo.TxPowerTrackControl) return; //4 3. Initialize ThermalValues of RFCalibrateInfo if( ! pDM_Odm->RFCalibrateInfo.ThermalValue) { pDM_Odm->RFCalibrateInfo.ThermalValue_LCK = ThermalValue; pDM_Odm->RFCalibrateInfo.ThermalValue_IQK = ThermalValue; } if(pDM_Odm->RFCalibrateInfo.bReloadtxpowerindex) { ODM_RT_TRACE(pDM_Odm,ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,("reload ofdm index for band switch\n")); } //4 4. Calculate average thermal meter pDM_Odm->RFCalibrateInfo.ThermalValue_AVG[pDM_Odm->RFCalibrateInfo.ThermalValue_AVG_index] = ThermalValue; pDM_Odm->RFCalibrateInfo.ThermalValue_AVG_index++; if(pDM_Odm->RFCalibrateInfo.ThermalValue_AVG_index == AVG_THERMAL_NUM_88E) pDM_Odm->RFCalibrateInfo.ThermalValue_AVG_index = 0; for(i = 0; i < AVG_THERMAL_NUM_88E; i++) { if(pDM_Odm->RFCalibrateInfo.ThermalValue_AVG[i]) { ThermalValue_AVG += pDM_Odm->RFCalibrateInfo.ThermalValue_AVG[i]; ThermalValue_AVG_count++; } } if(ThermalValue_AVG_count) { ThermalValue = (u1Byte)(ThermalValue_AVG / ThermalValue_AVG_count); ODM_RT_TRACE(pDM_Odm,ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,("AVG Thermal Meter = 0x%x \n", ThermalValue)); } //4 5. Calculate delta, delta_LCK, delta_IQK. delta = (ThermalValue > pDM_Odm->RFCalibrateInfo.ThermalValue)?(ThermalValue - pDM_Odm->RFCalibrateInfo.ThermalValue):(pDM_Odm->RFCalibrateInfo.ThermalValue - ThermalValue); delta_LCK = (ThermalValue > pDM_Odm->RFCalibrateInfo.ThermalValue_LCK)?(ThermalValue - pDM_Odm->RFCalibrateInfo.ThermalValue_LCK):(pDM_Odm->RFCalibrateInfo.ThermalValue_LCK - ThermalValue); delta_IQK = (ThermalValue > pDM_Odm->RFCalibrateInfo.ThermalValue_IQK)?(ThermalValue - pDM_Odm->RFCalibrateInfo.ThermalValue_IQK):(pDM_Odm->RFCalibrateInfo.ThermalValue_IQK - ThermalValue); //4 6. If necessary, do LCK. //if((delta_LCK > pHalData->Delta_LCK) && (pHalData->Delta_LCK != 0)) if ((delta_LCK >= 8)) // Delta temperature is equal to or larger than 20 centigrade. { pDM_Odm->RFCalibrateInfo.ThermalValue_LCK = ThermalValue; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) PHY_LCCalibrate_8188E(Adapter); #else PHY_LCCalibrate_8188E(pDM_Odm); #endif } //3 7. If necessary, move the index of swing table to adjust Tx power. if (delta > 0 && pDM_Odm->RFCalibrateInfo.TxPowerTrackControl) { #if (DM_ODM_SUPPORT_TYPE & (ODM_MP|ODM_CE)) delta = ThermalValue > pHalData->EEPROMThermalMeter?(ThermalValue - pHalData->EEPROMThermalMeter):(pHalData->EEPROMThermalMeter - ThermalValue); #else delta = (ThermalValue > pDM_Odm->priv->pmib->dot11RFEntry.ther)?(ThermalValue - pDM_Odm->priv->pmib->dot11RFEntry.ther):(pDM_Odm->priv->pmib->dot11RFEntry.ther - ThermalValue); #endif //4 7.1 The Final Power Index = BaseIndex + PowerIndexOffset #if (DM_ODM_SUPPORT_TYPE & (ODM_MP|ODM_CE)) if(ThermalValue > pHalData->EEPROMThermalMeter) { #else if(ThermalValue > pDM_Odm->priv->pmib->dot11RFEntry.ther) { #endif CALCULATE_SWINGTALBE_OFFSET(offset, POWER_INC, index_mapping_NUM_88E, delta); pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast = pDM_Odm->RFCalibrateInfo.DeltaPowerIndex; pDM_Odm->RFCalibrateInfo.DeltaPowerIndex = -1 * deltaSwingTableIdx[POWER_INC][offset]; } else { CALCULATE_SWINGTALBE_OFFSET(offset, POWER_DEC, index_mapping_NUM_88E, delta); pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast = pDM_Odm->RFCalibrateInfo.DeltaPowerIndex; pDM_Odm->RFCalibrateInfo.DeltaPowerIndex = -1 * deltaSwingTableIdx[POWER_DEC][offset]; } if (pDM_Odm->RFCalibrateInfo.DeltaPowerIndex == pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast) pDM_Odm->RFCalibrateInfo.PowerIndexOffset = 0; else pDM_Odm->RFCalibrateInfo.PowerIndexOffset = pDM_Odm->RFCalibrateInfo.DeltaPowerIndex - pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast; for(i = 0; i < rf; i++) pDM_Odm->RFCalibrateInfo.OFDM_index[i] = pDM_Odm->BbSwingIdxOfdmBase + pDM_Odm->RFCalibrateInfo.PowerIndexOffset; pDM_Odm->RFCalibrateInfo.CCK_index = pDM_Odm->BbSwingIdxCckBase + pDM_Odm->RFCalibrateInfo.PowerIndexOffset; pDM_Odm->BbSwingIdxCck = pDM_Odm->RFCalibrateInfo.CCK_index; pDM_Odm->BbSwingIdxOfdm = pDM_Odm->RFCalibrateInfo.OFDM_index[RF_PATH_A]; ODM_RT_TRACE(pDM_Odm,ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,("The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n", pDM_Odm->BbSwingIdxCck, pDM_Odm->BbSwingIdxCckBase, pDM_Odm->RFCalibrateInfo.PowerIndexOffset)); ODM_RT_TRACE(pDM_Odm,ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,("The 'OFDM' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n", pDM_Odm->BbSwingIdxOfdm, pDM_Odm->BbSwingIdxOfdmBase, pDM_Odm->RFCalibrateInfo.PowerIndexOffset)); //4 7.1 Handle boundary conditions of index. for(i = 0; i < rf; i++) { if(pDM_Odm->RFCalibrateInfo.OFDM_index[i] > OFDM_TABLE_SIZE_92D-1) { pDM_Odm->RFCalibrateInfo.OFDM_index[i] = OFDM_TABLE_SIZE_92D-1; } else if (pDM_Odm->RFCalibrateInfo.OFDM_index[i] < OFDM_min_index) { pDM_Odm->RFCalibrateInfo.OFDM_index[i] = OFDM_min_index; } } if(pDM_Odm->RFCalibrateInfo.CCK_index > CCK_TABLE_SIZE-1) pDM_Odm->RFCalibrateInfo.CCK_index = CCK_TABLE_SIZE-1; else if (pDM_Odm->RFCalibrateInfo.CCK_index < 0) pDM_Odm->RFCalibrateInfo.CCK_index = 0; } else { ODM_RT_TRACE(pDM_Odm,ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("The thermal meter is unchanged or TxPowerTracking OFF: ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d)\n", ThermalValue, pDM_Odm->RFCalibrateInfo.ThermalValue)); pDM_Odm->RFCalibrateInfo.PowerIndexOffset = 0; } ODM_RT_TRACE(pDM_Odm,ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n", pDM_Odm->RFCalibrateInfo.CCK_index, pDM_Odm->BbSwingIdxCckBase)); ODM_RT_TRACE(pDM_Odm,ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index: %d\n", pDM_Odm->RFCalibrateInfo.OFDM_index[RF_PATH_A], pDM_Odm->BbSwingIdxOfdmBase)); if (pDM_Odm->RFCalibrateInfo.PowerIndexOffset != 0 && pDM_Odm->RFCalibrateInfo.TxPowerTrackControl) { //4 7.2 Configure the Swing Table to adjust Tx Power. pDM_Odm->RFCalibrateInfo.bTxPowerChanged = TRUE; // Always TRUE after Tx Power is adjusted by power tracking. // // 2012/04/23 MH According to Luke's suggestion, we can not write BB digital // to increase TX power. Otherwise, EVM will be bad. // // 2012/04/25 MH Add for tx power tracking to set tx power in tx agc for 88E. if (ThermalValue > pDM_Odm->RFCalibrateInfo.ThermalValue) { ODM_RT_TRACE(pDM_Odm,ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("Temperature Increasing: delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n", pDM_Odm->RFCalibrateInfo.PowerIndexOffset, delta, ThermalValue, pHalData->EEPROMThermalMeter, pDM_Odm->RFCalibrateInfo.ThermalValue)); } else if (ThermalValue < pDM_Odm->RFCalibrateInfo.ThermalValue)// Low temperature { ODM_RT_TRACE(pDM_Odm,ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("Temperature Decreasing: delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n", pDM_Odm->RFCalibrateInfo.PowerIndexOffset, delta, ThermalValue, pHalData->EEPROMThermalMeter, pDM_Odm->RFCalibrateInfo.ThermalValue)); } if (ThermalValue > pHalData->EEPROMThermalMeter) { ODM_RT_TRACE(pDM_Odm,ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,("Temperature(%d) hugher than PG value(%d), increases the power by TxAGC\n", ThermalValue, pHalData->EEPROMThermalMeter)); odm_TxPwrTrackSetPwr88E(pDM_Odm, TXAGC, 0, 0); } else { ODM_RT_TRACE(pDM_Odm,ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,("Temperature(%d) lower than PG value(%d), increases the power by TxAGC\n", ThermalValue, pHalData->EEPROMThermalMeter)); odm_TxPwrTrackSetPwr88E(pDM_Odm, BBSWING, RF_PATH_A, Indexforchannel); //if(is2T) // odm_TxPwrTrackSetPwr88E(pDM_Odm, BBSWING, RF_PATH_B, Indexforchannel); } pDM_Odm->BbSwingIdxCckBase = pDM_Odm->BbSwingIdxCck; pDM_Odm->BbSwingIdxOfdmBase = pDM_Odm->BbSwingIdxOfdm; pDM_Odm->RFCalibrateInfo.ThermalValue = ThermalValue; } #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) // if((delta_IQK > pHalData->Delta_IQK) && (pHalData->Delta_IQK != 0)) if ((delta_IQK >= 8)){ // Delta temperature is equal to or larger than 20 centigrade. //printk("delta_IQK(%d) >=8 do_IQK\n",delta_IQK); doIQK(pDM_Odm, delta_IQK, ThermalValue, 8); } #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,("<===dm_TXPowerTrackingCallback_ThermalMeter_8188E\n")); pDM_Odm->RFCalibrateInfo.TXPowercount = 0; } //1 7. IQK #define MAX_TOLERANCE 5 #define IQK_DELAY_TIME 1 //ms u1Byte //bit0 = 1 => Tx OK, bit1 = 1 => Rx OK phy_PathA_IQK_8188E( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN BOOLEAN configPathB ) { u4Byte regEAC, regE94, regE9C, regEA4; u1Byte result = 0x00; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A IQK!\n")); //1 Tx IQK //path-A IQK setting ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A IQK setting!\n")); ODM_SetBBReg(pDM_Odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c); ODM_SetBBReg(pDM_Odm, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c); ODM_SetBBReg(pDM_Odm, rTx_IQK_PI_A, bMaskDWord, 0x8214032a); ODM_SetBBReg(pDM_Odm, rRx_IQK_PI_A, bMaskDWord, 0x28160000); //LO calibration setting ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n")); ODM_SetBBReg(pDM_Odm, rIQK_AGC_Rsp, bMaskDWord, 0x00462911); //One shot, path A LOK & IQK ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n")); ODM_SetBBReg(pDM_Odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000); ODM_SetBBReg(pDM_Odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000); // delay x ms ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E)); //PlatformStallExecution(IQK_DELAY_TIME_88E*1000); ODM_delay_ms(IQK_DELAY_TIME_88E); // Check failed regEAC = ODM_GetBBReg(pDM_Odm, rRx_Power_After_IQK_A_2, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC)); regE94 = ODM_GetBBReg(pDM_Odm, rTx_Power_Before_IQK_A, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x\n", regE94)); regE9C= ODM_GetBBReg(pDM_Odm, rTx_Power_After_IQK_A, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe9c = 0x%x\n", regE9C)); regEA4= ODM_GetBBReg(pDM_Odm, rRx_Power_Before_IQK_A_2, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea4 = 0x%x\n", regEA4)); if(!(regEAC & BIT28) && (((regE94 & 0x03FF0000)>>16) != 0x142) && (((regE9C & 0x03FF0000)>>16) != 0x42) ) result |= 0x01; else //if Tx not OK, ignore Rx return result; #if 0 if(!(regEAC & BIT27) && //if Tx is OK, check whether Rx is OK (((regEA4 & 0x03FF0000)>>16) != 0x132) && (((regEAC & 0x03FF0000)>>16) != 0x36)) result |= 0x02; else RTPRINT(FINIT, INIT_IQK, ("Path A Rx IQK fail!!\n")); #endif return result; } u1Byte //bit0 = 1 => Tx OK, bit1 = 1 => Rx OK phy_PathA_RxIQK( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN BOOLEAN configPathB ) { u4Byte regEAC, regE94, regE9C, regEA4, u4tmp; u1Byte result = 0x00; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK!\n")); //1 Get TXIMR setting //modify RXIQK mode table ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table!\n")); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000); ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0 ); ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000 ); ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f ); ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf117B ); //PA,PAD off ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x980 ); ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x56, bRFRegOffsetMask, 0x51000 ); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x80800000); //IQK setting ODM_SetBBReg(pDM_Odm, rTx_IQK, bMaskDWord, 0x01007c00); ODM_SetBBReg(pDM_Odm, rRx_IQK, bMaskDWord, 0x81004800); //path-A IQK setting ODM_SetBBReg(pDM_Odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c); ODM_SetBBReg(pDM_Odm, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c); ODM_SetBBReg(pDM_Odm, rTx_IQK_PI_A, bMaskDWord, 0x82160c1f); ODM_SetBBReg(pDM_Odm, rRx_IQK_PI_A, bMaskDWord, 0x28160000); //LO calibration setting ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n")); ODM_SetBBReg(pDM_Odm, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911); //One shot, path A LOK & IQK ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n")); ODM_SetBBReg(pDM_Odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000); ODM_SetBBReg(pDM_Odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000); // delay x ms ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E)); //PlatformStallExecution(IQK_DELAY_TIME_88E*1000); ODM_delay_ms(IQK_DELAY_TIME_88E); // Check failed regEAC = ODM_GetBBReg(pDM_Odm, rRx_Power_After_IQK_A_2, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC)); regE94 = ODM_GetBBReg(pDM_Odm, rTx_Power_Before_IQK_A, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x\n", regE94)); regE9C= ODM_GetBBReg(pDM_Odm, rTx_Power_After_IQK_A, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe9c = 0x%x\n", regE9C)); if(!(regEAC & BIT28) && (((regE94 & 0x03FF0000)>>16) != 0x142) && (((regE9C & 0x03FF0000)>>16) != 0x42) ) { result |= 0x01; } else { //reload RF 0xdf ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000); ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x180 );//if Tx not OK, ignore Rx return result; } u4tmp = 0x80007C00 | (regE94&0x3FF0000) | ((regE9C&0x3FF0000) >> 16); ODM_SetBBReg(pDM_Odm, rTx_IQK, bMaskDWord, u4tmp); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe40 = 0x%x u4tmp = 0x%x \n", ODM_GetBBReg(pDM_Odm, rTx_IQK, bMaskDWord), u4tmp)); //1 RX IQK //modify RXIQK mode table ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table 2!\n")); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000); ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0 ); ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000 ); ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f ); ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7ffa ); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x80800000); //IQK setting ODM_SetBBReg(pDM_Odm, rRx_IQK, bMaskDWord, 0x01004800); //path-A IQK setting ODM_SetBBReg(pDM_Odm, rTx_IQK_Tone_A, bMaskDWord, 0x38008c1c); ODM_SetBBReg(pDM_Odm, rRx_IQK_Tone_A, bMaskDWord, 0x18008c1c); ODM_SetBBReg(pDM_Odm, rTx_IQK_PI_A, bMaskDWord, 0x82160c05); ODM_SetBBReg(pDM_Odm, rRx_IQK_PI_A, bMaskDWord, 0x28160c1f); //LO calibration setting ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n")); ODM_SetBBReg(pDM_Odm, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911); //One shot, path A LOK & IQK ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n")); ODM_SetBBReg(pDM_Odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000); ODM_SetBBReg(pDM_Odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000); // delay x ms ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E)); //PlatformStallExecution(IQK_DELAY_TIME_88E*1000); ODM_delay_ms(IQK_DELAY_TIME_88E); // Check failed regEAC = ODM_GetBBReg(pDM_Odm, rRx_Power_After_IQK_A_2, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC)); regE94 = ODM_GetBBReg(pDM_Odm, rTx_Power_Before_IQK_A, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x\n", regE94)); regE9C= ODM_GetBBReg(pDM_Odm, rTx_Power_After_IQK_A, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe9c = 0x%x\n", regE9C)); regEA4= ODM_GetBBReg(pDM_Odm, rRx_Power_Before_IQK_A_2, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea4 = 0x%x\n", regEA4)); //reload RF 0xdf ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000); ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x180 ); #if 0 if(!(regEAC & BIT28) && (((regE94 & 0x03FF0000)>>16) != 0x142) && (((regE9C & 0x03FF0000)>>16) != 0x42) ) result |= 0x01; else //if Tx not OK, ignore Rx return result; #endif if(!(regEAC & BIT27) && //if Tx is OK, check whether Rx is OK (((regEA4 & 0x03FF0000)>>16) != 0x132) && (((regEAC & 0x03FF0000)>>16) != 0x36)) result |= 0x02; else ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK fail!!\n")); return result; } u1Byte //bit0 = 1 => Tx OK, bit1 = 1 => Rx OK phy_PathB_IQK_8188E( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm #else IN PADAPTER pAdapter #endif ) { u4Byte regEAC, regEB4, regEBC, regEC4, regECC; u1Byte result = 0x00; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQK!\n")); //One shot, path B LOK & IQK ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n")); ODM_SetBBReg(pDM_Odm, rIQK_AGC_Cont, bMaskDWord, 0x00000002); ODM_SetBBReg(pDM_Odm, rIQK_AGC_Cont, bMaskDWord, 0x00000000); // delay x ms ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path B LOK & IQK.\n", IQK_DELAY_TIME_88E)); //PlatformStallExecution(IQK_DELAY_TIME_88E*1000); ODM_delay_ms(IQK_DELAY_TIME_88E); // Check failed regEAC = ODM_GetBBReg(pDM_Odm, rRx_Power_After_IQK_A_2, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC)); regEB4 = ODM_GetBBReg(pDM_Odm, rTx_Power_Before_IQK_B, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeb4 = 0x%x\n", regEB4)); regEBC= ODM_GetBBReg(pDM_Odm, rTx_Power_After_IQK_B, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xebc = 0x%x\n", regEBC)); regEC4= ODM_GetBBReg(pDM_Odm, rRx_Power_Before_IQK_B_2, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xec4 = 0x%x\n", regEC4)); regECC= ODM_GetBBReg(pDM_Odm, rRx_Power_After_IQK_B_2, bMaskDWord); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xecc = 0x%x\n", regECC)); if(!(regEAC & BIT31) && (((regEB4 & 0x03FF0000)>>16) != 0x142) && (((regEBC & 0x03FF0000)>>16) != 0x42)) result |= 0x01; else return result; if(!(regEAC & BIT30) && (((regEC4 & 0x03FF0000)>>16) != 0x132) && (((regECC & 0x03FF0000)>>16) != 0x36)) result |= 0x02; else ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Rx IQK fail!!\n")); return result; } VOID _PHY_PathAFillIQKMatrix( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN BOOLEAN bIQKOK, IN s4Byte result[][8], IN u1Byte final_candidate, IN BOOLEAN bTxOnly ) { u4Byte Oldval_0, X, TX0_A, reg; s4Byte Y, TX0_C; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A IQ Calibration %s !\n",(bIQKOK)?"Success":"Failed")); if(final_candidate == 0xFF) return; else if(bIQKOK) { Oldval_0 = (ODM_GetBBReg(pDM_Odm, rOFDM0_XATxIQImbalance, bMaskDWord) >> 22) & 0x3FF; X = result[final_candidate][0]; if ((X & 0x00000200) != 0) X = X | 0xFFFFFC00; TX0_A = (X * Oldval_0) >> 8; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("X = 0x%x, TX0_A = 0x%x, Oldval_0 0x%x\n", X, TX0_A, Oldval_0)); ODM_SetBBReg(pDM_Odm, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A); ODM_SetBBReg(pDM_Odm, rOFDM0_ECCAThreshold, BIT(31), ((X* Oldval_0>>7) & 0x1)); Y = result[final_candidate][1]; if ((Y & 0x00000200) != 0) Y = Y | 0xFFFFFC00; TX0_C = (Y * Oldval_0) >> 8; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Y = 0x%x, TX = 0x%x\n", Y, TX0_C)); ODM_SetBBReg(pDM_Odm, rOFDM0_XCTxAFE, 0xF0000000, ((TX0_C&0x3C0)>>6)); ODM_SetBBReg(pDM_Odm, rOFDM0_XATxIQImbalance, 0x003F0000, (TX0_C&0x3F)); ODM_SetBBReg(pDM_Odm, rOFDM0_ECCAThreshold, BIT(29), ((Y* Oldval_0>>7) & 0x1)); if(bTxOnly) { ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("_PHY_PathAFillIQKMatrix only Tx OK\n")); return; } reg = result[final_candidate][2]; #if (DM_ODM_SUPPORT_TYPE==ODM_AP) if( RTL_ABS(reg ,0x100) >= 16) reg = 0x100; #endif ODM_SetBBReg(pDM_Odm, rOFDM0_XARxIQImbalance, 0x3FF, reg); reg = result[final_candidate][3] & 0x3F; ODM_SetBBReg(pDM_Odm, rOFDM0_XARxIQImbalance, 0xFC00, reg); reg = (result[final_candidate][3] >> 6) & 0xF; ODM_SetBBReg(pDM_Odm, rOFDM0_RxIQExtAnta, 0xF0000000, reg); } } VOID _PHY_PathBFillIQKMatrix( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN BOOLEAN bIQKOK, IN s4Byte result[][8], IN u1Byte final_candidate, IN BOOLEAN bTxOnly //do Tx only ) { u4Byte Oldval_1, X, TX1_A, reg; s4Byte Y, TX1_C; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQ Calibration %s !\n",(bIQKOK)?"Success":"Failed")); if(final_candidate == 0xFF) return; else if(bIQKOK) { Oldval_1 = (ODM_GetBBReg(pDM_Odm, rOFDM0_XBTxIQImbalance, bMaskDWord) >> 22) & 0x3FF; X = result[final_candidate][4]; if ((X & 0x00000200) != 0) X = X | 0xFFFFFC00; TX1_A = (X * Oldval_1) >> 8; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("X = 0x%x, TX1_A = 0x%x\n", X, TX1_A)); ODM_SetBBReg(pDM_Odm, rOFDM0_XBTxIQImbalance, 0x3FF, TX1_A); ODM_SetBBReg(pDM_Odm, rOFDM0_ECCAThreshold, BIT(27), ((X* Oldval_1>>7) & 0x1)); Y = result[final_candidate][5]; if ((Y & 0x00000200) != 0) Y = Y | 0xFFFFFC00; TX1_C = (Y * Oldval_1) >> 8; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Y = 0x%x, TX1_C = 0x%x\n", Y, TX1_C)); ODM_SetBBReg(pDM_Odm, rOFDM0_XDTxAFE, 0xF0000000, ((TX1_C&0x3C0)>>6)); ODM_SetBBReg(pDM_Odm, rOFDM0_XBTxIQImbalance, 0x003F0000, (TX1_C&0x3F)); ODM_SetBBReg(pDM_Odm, rOFDM0_ECCAThreshold, BIT(25), ((Y* Oldval_1>>7) & 0x1)); if(bTxOnly) return; reg = result[final_candidate][6]; ODM_SetBBReg(pDM_Odm, rOFDM0_XBRxIQImbalance, 0x3FF, reg); reg = result[final_candidate][7] & 0x3F; ODM_SetBBReg(pDM_Odm, rOFDM0_XBRxIQImbalance, 0xFC00, reg); reg = (result[final_candidate][7] >> 6) & 0xF; ODM_SetBBReg(pDM_Odm, rOFDM0_AGCRSSITable, 0x0000F000, reg); } } // // 2011/07/26 MH Add an API for testing IQK fail case. // // MP Already declare in odm.c #if !(DM_ODM_SUPPORT_TYPE & ODM_MP) BOOLEAN ODM_CheckPowerStatus( IN PADAPTER Adapter) { /* HAL_DATA_TYPE *pHalData = GET_HAL_DATA(Adapter); PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; RT_RF_POWER_STATE rtState; PMGNT_INFO pMgntInfo = &(Adapter->MgntInfo); // 2011/07/27 MH We are not testing ready~~!! We may fail to get correct value when init sequence. if (pMgntInfo->init_adpt_in_progress == TRUE) { ODM_RT_TRACE(pDM_Odm,COMP_INIT, DBG_LOUD, ("ODM_CheckPowerStatus Return TRUE, due to initadapter")); return TRUE; } // // 2011/07/19 MH We can not execute tx pwoer tracking/ LLC calibrate or IQK. // Adapter->HalFunc.GetHwRegHandler(Adapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState)); if(Adapter->bDriverStopped || Adapter->bDriverIsGoingToPnpSetPowerSleep || rtState == eRfOff) { ODM_RT_TRACE(pDM_Odm,COMP_INIT, DBG_LOUD, ("ODM_CheckPowerStatus Return FALSE, due to %d/%d/%d\n", Adapter->bDriverStopped, Adapter->bDriverIsGoingToPnpSetPowerSleep, rtState)); return FALSE; } */ return TRUE; } #endif VOID _PHY_SaveADDARegisters( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN pu4Byte ADDAReg, IN pu4Byte ADDABackup, IN u4Byte RegisterNum ) { u4Byte i; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif if (ODM_CheckPowerStatus(pAdapter) == FALSE) return; #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Save ADDA parameters.\n")); for( i = 0 ; i < RegisterNum ; i++){ ADDABackup[i] = ODM_GetBBReg(pDM_Odm, ADDAReg[i], bMaskDWord); } } VOID _PHY_SaveMACRegisters( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN pu4Byte MACReg, IN pu4Byte MACBackup ) { u4Byte i; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Save MAC parameters.\n")); for( i = 0 ; i < (IQK_MAC_REG_NUM - 1); i++){ MACBackup[i] = ODM_Read1Byte(pDM_Odm, MACReg[i]); } MACBackup[i] = ODM_Read4Byte(pDM_Odm, MACReg[i]); } VOID _PHY_ReloadADDARegisters( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN pu4Byte ADDAReg, IN pu4Byte ADDABackup, IN u4Byte RegiesterNum ) { u4Byte i; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Reload ADDA power saving parameters !\n")); for(i = 0 ; i < RegiesterNum; i++) { ODM_SetBBReg(pDM_Odm, ADDAReg[i], bMaskDWord, ADDABackup[i]); } } VOID _PHY_ReloadMACRegisters( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN pu4Byte MACReg, IN pu4Byte MACBackup ) { u4Byte i; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Reload MAC parameters !\n")); for(i = 0 ; i < (IQK_MAC_REG_NUM - 1); i++){ ODM_Write1Byte(pDM_Odm, MACReg[i], (u1Byte)MACBackup[i]); } ODM_Write4Byte(pDM_Odm, MACReg[i], MACBackup[i]); } VOID _PHY_PathADDAOn( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN pu4Byte ADDAReg, IN BOOLEAN isPathAOn, IN BOOLEAN is2T ) { u4Byte pathOn; u4Byte i; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("ADDA ON.\n")); pathOn = isPathAOn ? 0x04db25a4 : 0x0b1b25a4; if(FALSE == is2T){ pathOn = 0x0bdb25a0; ODM_SetBBReg(pDM_Odm, ADDAReg[0], bMaskDWord, 0x0b1b25a0); } else{ ODM_SetBBReg(pDM_Odm,ADDAReg[0], bMaskDWord, pathOn); } for( i = 1 ; i < IQK_ADDA_REG_NUM ; i++){ ODM_SetBBReg(pDM_Odm,ADDAReg[i], bMaskDWord, pathOn); } } VOID _PHY_MACSettingCalibration( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN pu4Byte MACReg, IN pu4Byte MACBackup ) { u4Byte i = 0; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("MAC settings for Calibration.\n")); ODM_Write1Byte(pDM_Odm, MACReg[i], 0x3F); for(i = 1 ; i < (IQK_MAC_REG_NUM - 1); i++){ ODM_Write1Byte(pDM_Odm, MACReg[i], (u1Byte)(MACBackup[i]&(~BIT3))); } ODM_Write1Byte(pDM_Odm, MACReg[i], (u1Byte)(MACBackup[i]&(~BIT5))); } VOID _PHY_PathAStandBy( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm #else IN PADAPTER pAdapter #endif ) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A standby mode!\n")); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x0); ODM_SetBBReg(pDM_Odm, 0x840, bMaskDWord, 0x00010000); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x80800000); } VOID _PHY_PIModeSwitch( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN BOOLEAN PIMode ) { u4Byte mode; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("BB Switch to %s mode!\n", (PIMode ? "PI" : "SI"))); mode = PIMode ? 0x01000100 : 0x01000000; ODM_SetBBReg(pDM_Odm, rFPGA0_XA_HSSIParameter1, bMaskDWord, mode); ODM_SetBBReg(pDM_Odm, rFPGA0_XB_HSSIParameter1, bMaskDWord, mode); } BOOLEAN phy_SimularityCompare_8188E( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN s4Byte result[][8], IN u1Byte c1, IN u1Byte c2 ) { u4Byte i, j, diff, SimularityBitMap, bound = 0; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif u1Byte final_candidate[2] = {0xFF, 0xFF}; //for path A and path B BOOLEAN bResult = TRUE; BOOLEAN is2T; s4Byte tmp1 = 0,tmp2 = 0; if( (pDM_Odm->RFType ==ODM_2T2R )||(pDM_Odm->RFType ==ODM_2T3R )||(pDM_Odm->RFType ==ODM_2T4R )) is2T = TRUE; else is2T = FALSE; if(is2T) bound = 8; else bound = 4; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("===> IQK:phy_SimularityCompare_8188E c1 %d c2 %d!!!\n", c1, c2)); SimularityBitMap = 0; for( i = 0; i < bound; i++ ) { // diff = (result[c1][i] > result[c2][i]) ? (result[c1][i] - result[c2][i]) : (result[c2][i] - result[c1][i]); if((i==1) || (i==3) || (i==5) || (i==7)) { if((result[c1][i]& 0x00000200) != 0) tmp1 = result[c1][i] | 0xFFFFFC00; else tmp1 = result[c1][i]; if((result[c2][i]& 0x00000200) != 0) tmp2 = result[c2][i] | 0xFFFFFC00; else tmp2 = result[c2][i]; } else { tmp1 = result[c1][i]; tmp2 = result[c2][i]; } diff = (tmp1 > tmp2) ? (tmp1 - tmp2) : (tmp2 - tmp1); if (diff > MAX_TOLERANCE) { ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:phy_SimularityCompare_8188E differnece overflow index %d compare1 0x%x compare2 0x%x!!!\n", i, result[c1][i], result[c2][i])); if((i == 2 || i == 6) && !SimularityBitMap) { if(result[c1][i]+result[c1][i+1] == 0) final_candidate[(i/4)] = c2; else if (result[c2][i]+result[c2][i+1] == 0) final_candidate[(i/4)] = c1; else SimularityBitMap = SimularityBitMap|(1<<i); } else SimularityBitMap = SimularityBitMap|(1<<i); } } ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:phy_SimularityCompare_8188E SimularityBitMap %d !!!\n", SimularityBitMap)); if ( SimularityBitMap == 0) { for( i = 0; i < (bound/4); i++ ) { if(final_candidate[i] != 0xFF) { for( j = i*4; j < (i+1)*4-2; j++) result[3][j] = result[final_candidate[i]][j]; bResult = FALSE; } } return bResult; } else { if (!(SimularityBitMap & 0x03)) //path A TX OK { for(i = 0; i < 2; i++) result[3][i] = result[c1][i]; } if (!(SimularityBitMap & 0x0c)) //path A RX OK { for(i = 2; i < 4; i++) result[3][i] = result[c1][i]; } if (!(SimularityBitMap & 0x30)) //path B TX OK { for(i = 4; i < 6; i++) result[3][i] = result[c1][i]; } if (!(SimularityBitMap & 0xc0)) //path B RX OK { for(i = 6; i < 8; i++) result[3][i] = result[c1][i]; } return FALSE; } } VOID phy_IQCalibrate_8188E( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN s4Byte result[][8], IN u1Byte t, IN BOOLEAN is2T ) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif u4Byte i; u1Byte PathAOK, PathBOK; u4Byte ADDA_REG[IQK_ADDA_REG_NUM] = { rFPGA0_XCD_SwitchControl, rBlue_Tooth, rRx_Wait_CCA, rTx_CCK_RFON, rTx_CCK_BBON, rTx_OFDM_RFON, rTx_OFDM_BBON, rTx_To_Rx, rTx_To_Tx, rRx_CCK, rRx_OFDM, rRx_Wait_RIFS, rRx_TO_Rx, rStandby, rSleep, rPMPD_ANAEN }; u4Byte IQK_MAC_REG[IQK_MAC_REG_NUM] = { REG_TXPAUSE, REG_BCN_CTRL, REG_BCN_CTRL_1, REG_GPIO_MUXCFG}; //since 92C & 92D have the different define in IQK_BB_REG u4Byte IQK_BB_REG_92C[IQK_BB_REG_NUM] = { rOFDM0_TRxPathEnable, rOFDM0_TRMuxPar, rFPGA0_XCD_RFInterfaceSW, rConfig_AntA, rConfig_AntB, rFPGA0_XAB_RFInterfaceSW, rFPGA0_XA_RFInterfaceOE, rFPGA0_XB_RFInterfaceOE, rFPGA0_RFMOD }; #if (DM_ODM_SUPPORT_TYPE & (ODM_AP|ODM_ADSL)) u4Byte retryCount = 2; #else #if MP_DRIVER u4Byte retryCount = 9; #else u4Byte retryCount = 2; #endif #endif if ( *(pDM_Odm->mp_mode) == 1) retryCount = 9; else retryCount = 2; // Note: IQ calibration must be performed after loading // PHY_REG.txt , and radio_a, radio_b.txt //u4Byte bbvalue; #if (DM_ODM_SUPPORT_TYPE & (ODM_AP|ODM_ADSL)) #ifdef MP_TEST if(pDM_Odm->priv->pshare->rf_ft_var.mp_specific) retryCount = 9; #endif #endif if(t==0) { // bbvalue = ODM_GetBBReg(pDM_Odm, rFPGA0_RFMOD, bMaskDWord); // RTPRINT(FINIT, INIT_IQK, ("phy_IQCalibrate_8188E()==>0x%08x\n",bbvalue)); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQ Calibration for %s for %d times\n", (is2T ? "2T2R" : "1T1R"), t)); // Save ADDA parameters, turn Path A ADDA on #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_SaveADDARegisters(pAdapter, ADDA_REG, pDM_Odm->RFCalibrateInfo.ADDA_backup, IQK_ADDA_REG_NUM); _PHY_SaveMACRegisters(pAdapter, IQK_MAC_REG, pDM_Odm->RFCalibrateInfo.IQK_MAC_backup); _PHY_SaveADDARegisters(pAdapter, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM); #else _PHY_SaveADDARegisters(pDM_Odm, ADDA_REG, pDM_Odm->RFCalibrateInfo.ADDA_backup, IQK_ADDA_REG_NUM); _PHY_SaveMACRegisters(pDM_Odm, IQK_MAC_REG, pDM_Odm->RFCalibrateInfo.IQK_MAC_backup); _PHY_SaveADDARegisters(pDM_Odm, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM); #endif } ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQ Calibration for %s for %d times\n", (is2T ? "2T2R" : "1T1R"), t)); #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_PathADDAOn(pAdapter, ADDA_REG, TRUE, is2T); #else _PHY_PathADDAOn(pDM_Odm, ADDA_REG, TRUE, is2T); #endif if(t==0) { pDM_Odm->RFCalibrateInfo.bRfPiEnable = (u1Byte)ODM_GetBBReg(pDM_Odm, rFPGA0_XA_HSSIParameter1, BIT(8)); } if(!pDM_Odm->RFCalibrateInfo.bRfPiEnable){ // Switch BB to PI mode to do IQ Calibration. #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_PIModeSwitch(pAdapter, TRUE); #else _PHY_PIModeSwitch(pDM_Odm, TRUE); #endif } //BB setting ODM_SetBBReg(pDM_Odm, rFPGA0_RFMOD, BIT24, 0x00); ODM_SetBBReg(pDM_Odm, rOFDM0_TRxPathEnable, bMaskDWord, 0x03a05600); ODM_SetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord, 0x000800e4); ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22204000); ODM_SetBBReg(pDM_Odm, rFPGA0_XAB_RFInterfaceSW, BIT10, 0x01); ODM_SetBBReg(pDM_Odm, rFPGA0_XAB_RFInterfaceSW, BIT26, 0x01); ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, BIT10, 0x00); ODM_SetBBReg(pDM_Odm, rFPGA0_XB_RFInterfaceOE, BIT10, 0x00); if(is2T) { ODM_SetBBReg(pDM_Odm, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00010000); ODM_SetBBReg(pDM_Odm, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00010000); } //MAC settings #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_MACSettingCalibration(pAdapter, IQK_MAC_REG, pDM_Odm->RFCalibrateInfo.IQK_MAC_backup); #else _PHY_MACSettingCalibration(pDM_Odm, IQK_MAC_REG, pDM_Odm->RFCalibrateInfo.IQK_MAC_backup); #endif //Page B init //AP or IQK ODM_SetBBReg(pDM_Odm, rConfig_AntA, bMaskDWord, 0x0f600000); if(is2T) { ODM_SetBBReg(pDM_Odm, rConfig_AntB, bMaskDWord, 0x0f600000); } // IQ calibration setting ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK setting!\n")); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x80800000); ODM_SetBBReg(pDM_Odm, rTx_IQK, bMaskDWord, 0x01007c00); ODM_SetBBReg(pDM_Odm, rRx_IQK, bMaskDWord, 0x81004800); for(i = 0 ; i < retryCount ; i++){ #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) PathAOK = phy_PathA_IQK_8188E(pAdapter, is2T); #else PathAOK = phy_PathA_IQK_8188E(pDM_Odm, is2T); #endif // if(PathAOK == 0x03){ if(PathAOK == 0x01){ ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Tx IQK Success!!\n")); result[t][0] = (ODM_GetBBReg(pDM_Odm, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16; result[t][1] = (ODM_GetBBReg(pDM_Odm, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16; break; } #if 0 else if (i == (retryCount-1) && PathAOK == 0x01) //Tx IQK OK { RTPRINT(FINIT, INIT_IQK, ("Path A IQK Only Tx Success!!\n")); result[t][0] = (ODM_GetBBReg(pDM_Odm, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16; result[t][1] = (ODM_GetBBReg(pDM_Odm, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16; } #endif } for(i = 0 ; i < retryCount ; i++){ #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) PathAOK = phy_PathA_RxIQK(pAdapter, is2T); #else PathAOK = phy_PathA_RxIQK(pDM_Odm, is2T); #endif if(PathAOK == 0x03){ ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK Success!!\n")); // result[t][0] = (ODM_GetBBReg(pDM_Odm, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16; // result[t][1] = (ODM_GetBBReg(pDM_Odm, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16; result[t][2] = (ODM_GetBBReg(pDM_Odm, rRx_Power_Before_IQK_A_2, bMaskDWord)&0x3FF0000)>>16; result[t][3] = (ODM_GetBBReg(pDM_Odm, rRx_Power_After_IQK_A_2, bMaskDWord)&0x3FF0000)>>16; break; } else { ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK Fail!!\n")); } } if(0x00 == PathAOK){ ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A IQK failed!!\n")); } if(is2T){ #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_PathAStandBy(pAdapter); // Turn Path B ADDA on _PHY_PathADDAOn(pAdapter, ADDA_REG, FALSE, is2T); #else _PHY_PathAStandBy(pDM_Odm); // Turn Path B ADDA on _PHY_PathADDAOn(pDM_Odm, ADDA_REG, FALSE, is2T); #endif for(i = 0 ; i < retryCount ; i++){ #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) PathBOK = phy_PathB_IQK_8188E(pAdapter); #else PathBOK = phy_PathB_IQK_8188E(pDM_Odm); #endif if(PathBOK == 0x03){ ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQK Success!!\n")); result[t][4] = (ODM_GetBBReg(pDM_Odm, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16; result[t][5] = (ODM_GetBBReg(pDM_Odm, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16; result[t][6] = (ODM_GetBBReg(pDM_Odm, rRx_Power_Before_IQK_B_2, bMaskDWord)&0x3FF0000)>>16; result[t][7] = (ODM_GetBBReg(pDM_Odm, rRx_Power_After_IQK_B_2, bMaskDWord)&0x3FF0000)>>16; break; } else if (i == (retryCount - 1) && PathBOK == 0x01) //Tx IQK OK { ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Only Tx IQK Success!!\n")); result[t][4] = (ODM_GetBBReg(pDM_Odm, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16; result[t][5] = (ODM_GetBBReg(pDM_Odm, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16; } } if(0x00 == PathBOK){ ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQK failed!!\n")); } } //Back to BB mode, load original value ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:Back to BB mode, load original value!\n")); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0); if(t!=0) { if(!pDM_Odm->RFCalibrateInfo.bRfPiEnable){ // Switch back BB to SI mode after finish IQ Calibration. #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_PIModeSwitch(pAdapter, FALSE); #else _PHY_PIModeSwitch(pDM_Odm, FALSE); #endif } #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) // Reload ADDA power saving parameters _PHY_ReloadADDARegisters(pAdapter, ADDA_REG, pDM_Odm->RFCalibrateInfo.ADDA_backup, IQK_ADDA_REG_NUM); // Reload MAC parameters _PHY_ReloadMACRegisters(pAdapter, IQK_MAC_REG, pDM_Odm->RFCalibrateInfo.IQK_MAC_backup); _PHY_ReloadADDARegisters(pAdapter, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM); #else // Reload ADDA power saving parameters _PHY_ReloadADDARegisters(pDM_Odm, ADDA_REG, pDM_Odm->RFCalibrateInfo.ADDA_backup, IQK_ADDA_REG_NUM); // Reload MAC parameters _PHY_ReloadMACRegisters(pDM_Odm, IQK_MAC_REG, pDM_Odm->RFCalibrateInfo.IQK_MAC_backup); _PHY_ReloadADDARegisters(pDM_Odm, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM); #endif // Restore RX initial gain ODM_SetBBReg(pDM_Odm, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00032ed3); if(is2T){ ODM_SetBBReg(pDM_Odm, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00032ed3); } //load 0xe30 IQC default value ODM_SetBBReg(pDM_Odm, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00); ODM_SetBBReg(pDM_Odm, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00); } ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_IQCalibrate_8188E() <==\n")); } VOID phy_LCCalibrate_8188E( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN BOOLEAN is2T ) { u1Byte tmpReg; u4Byte RF_Amode=0, RF_Bmode=0, LC_Cal; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif //Check continuous TX and Packet TX tmpReg = ODM_Read1Byte(pDM_Odm, 0xd03); if((tmpReg&0x70) != 0) //Deal with contisuous TX case ODM_Write1Byte(pDM_Odm, 0xd03, tmpReg&0x8F); //disable all continuous TX else // Deal with Packet TX case ODM_Write1Byte(pDM_Odm, REG_TXPAUSE, 0xFF); // block all queues if((tmpReg&0x70) != 0) { //1. Read original RF mode //Path-A #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) RF_Amode = PHY_QueryRFReg(pAdapter, RF_PATH_A, RF_AC, bMask12Bits); //Path-B if(is2T) RF_Bmode = PHY_QueryRFReg(pAdapter, RF_PATH_B, RF_AC, bMask12Bits); #else RF_Amode = ODM_GetRFReg(pDM_Odm, RF_PATH_A, RF_AC, bMask12Bits); //Path-B if(is2T) RF_Bmode = ODM_GetRFReg(pDM_Odm, RF_PATH_B, RF_AC, bMask12Bits); #endif //2. Set RF mode = standby mode //Path-A ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_AC, bMask12Bits, (RF_Amode&0x8FFFF)|0x10000); //Path-B if(is2T) ODM_SetRFReg(pDM_Odm, RF_PATH_B, RF_AC, bMask12Bits, (RF_Bmode&0x8FFFF)|0x10000); } //3. Read RF reg18 #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) LC_Cal = PHY_QueryRFReg(pAdapter, RF_PATH_A, RF_CHNLBW, bMask12Bits); #else LC_Cal = ODM_GetRFReg(pDM_Odm, RF_PATH_A, RF_CHNLBW, bMask12Bits); #endif //4. Set LC calibration begin bit15 ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_CHNLBW, bMask12Bits, LC_Cal|0x08000); ODM_sleep_ms(100); //Restore original situation if((tmpReg&0x70) != 0) //Deal with contisuous TX case { //Path-A ODM_Write1Byte(pDM_Odm, 0xd03, tmpReg); ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_AC, bMask12Bits, RF_Amode); //Path-B if(is2T) ODM_SetRFReg(pDM_Odm, RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode); } else // Deal with Packet TX case { ODM_Write1Byte(pDM_Odm, REG_TXPAUSE, 0x00); } } //Analog Pre-distortion calibration #define APK_BB_REG_NUM 8 #define APK_CURVE_REG_NUM 4 #define PATH_NUM 2 VOID phy_APCalibrate_8188E( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN s1Byte delta, IN BOOLEAN is2T ) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif u4Byte regD[PATH_NUM]; u4Byte tmpReg, index, offset, apkbound; u1Byte path, i, pathbound = PATH_NUM; u4Byte BB_backup[APK_BB_REG_NUM]; u4Byte BB_REG[APK_BB_REG_NUM] = { rFPGA1_TxBlock, rOFDM0_TRxPathEnable, rFPGA0_RFMOD, rOFDM0_TRMuxPar, rFPGA0_XCD_RFInterfaceSW, rFPGA0_XAB_RFInterfaceSW, rFPGA0_XA_RFInterfaceOE, rFPGA0_XB_RFInterfaceOE }; u4Byte BB_AP_MODE[APK_BB_REG_NUM] = { 0x00000020, 0x00a05430, 0x02040000, 0x000800e4, 0x00204000 }; u4Byte BB_normal_AP_MODE[APK_BB_REG_NUM] = { 0x00000020, 0x00a05430, 0x02040000, 0x000800e4, 0x22204000 }; u4Byte AFE_backup[IQK_ADDA_REG_NUM]; u4Byte AFE_REG[IQK_ADDA_REG_NUM] = { rFPGA0_XCD_SwitchControl, rBlue_Tooth, rRx_Wait_CCA, rTx_CCK_RFON, rTx_CCK_BBON, rTx_OFDM_RFON, rTx_OFDM_BBON, rTx_To_Rx, rTx_To_Tx, rRx_CCK, rRx_OFDM, rRx_Wait_RIFS, rRx_TO_Rx, rStandby, rSleep, rPMPD_ANAEN }; u4Byte MAC_backup[IQK_MAC_REG_NUM]; u4Byte MAC_REG[IQK_MAC_REG_NUM] = { REG_TXPAUSE, REG_BCN_CTRL, REG_BCN_CTRL_1, REG_GPIO_MUXCFG}; u4Byte APK_RF_init_value[PATH_NUM][APK_BB_REG_NUM] = { {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c}, {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e} }; u4Byte APK_normal_RF_init_value[PATH_NUM][APK_BB_REG_NUM] = { {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c}, //path settings equal to path b settings {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c} }; u4Byte APK_RF_value_0[PATH_NUM][APK_BB_REG_NUM] = { {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d}, {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050} }; u4Byte APK_normal_RF_value_0[PATH_NUM][APK_BB_REG_NUM] = { {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}, //path settings equal to path b settings {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a} }; u4Byte AFE_on_off[PATH_NUM] = { 0x04db25a4, 0x0b1b25a4}; //path A on path B off / path A off path B on u4Byte APK_offset[PATH_NUM] = { rConfig_AntA, rConfig_AntB}; u4Byte APK_normal_offset[PATH_NUM] = { rConfig_Pmpd_AntA, rConfig_Pmpd_AntB}; u4Byte APK_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000}; u4Byte APK_normal_value[PATH_NUM] = { 0x92680000, 0x12680000}; s1Byte APK_delta_mapping[APK_BB_REG_NUM][13] = { {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6}, {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6}, {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6}, {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6}, {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0} }; u4Byte APK_normal_setting_value_1[13] = { 0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28, 0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3, 0x12680000, 0x00880000, 0x00880000 }; u4Byte APK_normal_setting_value_2[16] = { 0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3, 0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025, 0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008, 0x00050006 }; u4Byte APK_result[PATH_NUM][APK_BB_REG_NUM]; //val_1_1a, val_1_2a, val_2a, val_3a, val_4a // u4Byte AP_curve[PATH_NUM][APK_CURVE_REG_NUM]; s4Byte BB_offset, delta_V, delta_offset; #if MP_DRIVER == 1 if ( *(pDM_Odm->mp_mode) == 1) { #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PMPT_CONTEXT pMptCtx = &(pAdapter->mppriv.MptCtx); #else PMPT_CONTEXT pMptCtx = &(pAdapter->MptCtx); #endif pMptCtx->APK_bound[0] = 45; pMptCtx->APK_bound[1] = 52; } #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("==>phy_APCalibrate_8188E() delta %d\n", delta)); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("AP Calibration for %s\n", (is2T ? "2T2R" : "1T1R"))); if(!is2T) pathbound = 1; //2 FOR NORMAL CHIP SETTINGS // Temporarily do not allow normal driver to do the following settings because these offset // and value will cause RF internal PA to be unpredictably disabled by HW, such that RF Tx signal // will disappear after disable/enable card many times on 88CU. RF SD and DD have not find the // root cause, so we remove these actions temporarily. Added by tynli and SD3 Allen. 2010.05.31. //#if MP_DRIVER != 1 if (*(pDM_Odm->mp_mode) != 1) return; //#endif //settings adjust for normal chip for(index = 0; index < PATH_NUM; index ++) { APK_offset[index] = APK_normal_offset[index]; APK_value[index] = APK_normal_value[index]; AFE_on_off[index] = 0x6fdb25a4; } for(index = 0; index < APK_BB_REG_NUM; index ++) { for(path = 0; path < pathbound; path++) { APK_RF_init_value[path][index] = APK_normal_RF_init_value[path][index]; APK_RF_value_0[path][index] = APK_normal_RF_value_0[path][index]; } BB_AP_MODE[index] = BB_normal_AP_MODE[index]; } apkbound = 6; //save BB default value for(index = 0; index < APK_BB_REG_NUM ; index++) { if(index == 0) //skip continue; BB_backup[index] = ODM_GetBBReg(pDM_Odm, BB_REG[index], bMaskDWord); } //save MAC default value #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_SaveMACRegisters(pAdapter, MAC_REG, MAC_backup); //save AFE default value _PHY_SaveADDARegisters(pAdapter, AFE_REG, AFE_backup, IQK_ADDA_REG_NUM); #else _PHY_SaveMACRegisters(pDM_Odm, MAC_REG, MAC_backup); //save AFE default value _PHY_SaveADDARegisters(pDM_Odm, AFE_REG, AFE_backup, IQK_ADDA_REG_NUM); #endif for(path = 0; path < pathbound; path++) { if(path == RF_PATH_A) { //path A APK //load APK setting //path-A offset = rPdp_AntA; for(index = 0; index < 11; index ++) { ODM_SetBBReg(pDM_Odm, offset, bMaskDWord, APK_normal_setting_value_1[index]); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n", offset, ODM_GetBBReg(pDM_Odm, offset, bMaskDWord))); offset += 0x04; } ODM_SetBBReg(pDM_Odm, rConfig_Pmpd_AntB, bMaskDWord, 0x12680000); offset = rConfig_AntA; for(; index < 13; index ++) { ODM_SetBBReg(pDM_Odm, offset, bMaskDWord, APK_normal_setting_value_1[index]); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n", offset, ODM_GetBBReg(pDM_Odm, offset, bMaskDWord))); offset += 0x04; } //page-B1 ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x40000000); //path A offset = rPdp_AntA; for(index = 0; index < 16; index++) { ODM_SetBBReg(pDM_Odm, offset, bMaskDWord, APK_normal_setting_value_2[index]); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n", offset, ODM_GetBBReg(pDM_Odm, offset, bMaskDWord))); offset += 0x04; } ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000); } else if(path == RF_PATH_B) { //path B APK //load APK setting //path-B offset = rPdp_AntB; for(index = 0; index < 10; index ++) { ODM_SetBBReg(pDM_Odm, offset, bMaskDWord, APK_normal_setting_value_1[index]); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n", offset, ODM_GetBBReg(pDM_Odm, offset, bMaskDWord))); offset += 0x04; } ODM_SetBBReg(pDM_Odm, rConfig_Pmpd_AntA, bMaskDWord, 0x12680000); #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) PHY_SetBBReg(pAdapter, rConfig_Pmpd_AntB, bMaskDWord, 0x12680000); #else PHY_SetBBReg(pDM_Odm, rConfig_Pmpd_AntB, bMaskDWord, 0x12680000); #endif offset = rConfig_AntA; index = 11; for(; index < 13; index ++) //offset 0xb68, 0xb6c { ODM_SetBBReg(pDM_Odm, offset, bMaskDWord, APK_normal_setting_value_1[index]); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n", offset, ODM_GetBBReg(pDM_Odm, offset, bMaskDWord))); offset += 0x04; } //page-B1 ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x40000000); //path B offset = 0xb60; for(index = 0; index < 16; index++) { ODM_SetBBReg(pDM_Odm, offset, bMaskDWord, APK_normal_setting_value_2[index]); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n", offset, ODM_GetBBReg(pDM_Odm, offset, bMaskDWord))); offset += 0x04; } ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0); } //save RF default value #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) regD[path] = PHY_QueryRFReg(pAdapter, path, RF_TXBIAS_A, bMaskDWord); #else regD[path] = ODM_GetRFReg(pDM_Odm, path, RF_TXBIAS_A, bMaskDWord); #endif //Path A AFE all on, path B AFE All off or vise versa for(index = 0; index < IQK_ADDA_REG_NUM ; index++) ODM_SetBBReg(pDM_Odm, AFE_REG[index], bMaskDWord, AFE_on_off[path]); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0xe70 %x\n", ODM_GetBBReg(pDM_Odm, rRx_Wait_CCA, bMaskDWord))); //BB to AP mode if(path == 0) { for(index = 0; index < APK_BB_REG_NUM ; index++) { if(index == 0) //skip continue; else if (index < 5) ODM_SetBBReg(pDM_Odm, BB_REG[index], bMaskDWord, BB_AP_MODE[index]); else if (BB_REG[index] == 0x870) ODM_SetBBReg(pDM_Odm, BB_REG[index], bMaskDWord, BB_backup[index]|BIT10|BIT26); else ODM_SetBBReg(pDM_Odm, BB_REG[index], BIT10, 0x0); } ODM_SetBBReg(pDM_Odm, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00); ODM_SetBBReg(pDM_Odm, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00); } else //path B { ODM_SetBBReg(pDM_Odm, rTx_IQK_Tone_B, bMaskDWord, 0x01008c00); ODM_SetBBReg(pDM_Odm, rRx_IQK_Tone_B, bMaskDWord, 0x01008c00); } ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x800 %x\n", ODM_GetBBReg(pDM_Odm, 0x800, bMaskDWord))); //MAC settings #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_MACSettingCalibration(pAdapter, MAC_REG, MAC_backup); #else _PHY_MACSettingCalibration(pDM_Odm, MAC_REG, MAC_backup); #endif if(path == RF_PATH_A) //Path B to standby mode { ODM_SetRFReg(pDM_Odm, RF_PATH_B, RF_AC, bMaskDWord, 0x10000); } else //Path A to standby mode { ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_AC, bMaskDWord, 0x10000); ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_MODE1, bMaskDWord, 0x1000f); ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_MODE2, bMaskDWord, 0x20103); } delta_offset = ((delta+14)/2); if(delta_offset < 0) delta_offset = 0; else if (delta_offset > 12) delta_offset = 12; //AP calibration for(index = 0; index < APK_BB_REG_NUM; index++) { if(index != 1) //only DO PA11+PAD01001, AP RF setting continue; tmpReg = APK_RF_init_value[path][index]; #if 1 if(!pDM_Odm->RFCalibrateInfo.bAPKThermalMeterIgnore) { BB_offset = (tmpReg & 0xF0000) >> 16; if(!(tmpReg & BIT15)) //sign bit 0 { BB_offset = -BB_offset; } delta_V = APK_delta_mapping[index][delta_offset]; BB_offset += delta_V; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() APK index %d tmpReg 0x%x delta_V %d delta_offset %d\n", index, tmpReg, delta_V, delta_offset)); if(BB_offset < 0) { tmpReg = tmpReg & (~BIT15); BB_offset = -BB_offset; } else { tmpReg = tmpReg | BIT15; } tmpReg = (tmpReg & 0xFFF0FFFF) | (BB_offset << 16); } #endif ODM_SetRFReg(pDM_Odm, path, RF_IPA_A, bMaskDWord, 0x8992e); #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0xc %x\n", PHY_QueryRFReg(pAdapter, path, RF_IPA_A, bMaskDWord))); ODM_SetRFReg(pDM_Odm, path, RF_AC, bMaskDWord, APK_RF_value_0[path][index]); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x0 %x\n", PHY_QueryRFReg(pAdapter, path, RF_AC, bMaskDWord))); ODM_SetRFReg(pDM_Odm, path, RF_TXBIAS_A, bMaskDWord, tmpReg); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0xd %x\n", PHY_QueryRFReg(pAdapter, path, RF_TXBIAS_A, bMaskDWord))); #else ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0xc %x\n", ODM_GetRFReg(pDM_Odm, path, RF_IPA_A, bMaskDWord))); ODM_SetRFReg(pDM_Odm, path, RF_AC, bMaskDWord, APK_RF_value_0[path][index]); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x0 %x\n", ODM_GetRFReg(pDM_Odm, path, RF_AC, bMaskDWord))); ODM_SetRFReg(pDM_Odm, path, RF_TXBIAS_A, bMaskDWord, tmpReg); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0xd %x\n", ODM_GetRFReg(pDM_Odm, path, RF_TXBIAS_A, bMaskDWord))); #endif // PA11+PAD01111, one shot i = 0; do { ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x80000000); { ODM_SetBBReg(pDM_Odm, APK_offset[path], bMaskDWord, APK_value[0]); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n", APK_offset[path], ODM_GetBBReg(pDM_Odm, APK_offset[path], bMaskDWord))); ODM_delay_ms(3); ODM_SetBBReg(pDM_Odm, APK_offset[path], bMaskDWord, APK_value[1]); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n", APK_offset[path], ODM_GetBBReg(pDM_Odm, APK_offset[path], bMaskDWord))); ODM_delay_ms(20); } ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000); if(path == RF_PATH_A) tmpReg = ODM_GetBBReg(pDM_Odm, rAPK, 0x03E00000); else tmpReg = ODM_GetBBReg(pDM_Odm, rAPK, 0xF8000000); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0xbd8[25:21] %x\n", tmpReg)); i++; } while(tmpReg > apkbound && i < 4); APK_result[path][index] = tmpReg; } } //reload MAC default value #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_ReloadMACRegisters(pAdapter, MAC_REG, MAC_backup); #else _PHY_ReloadMACRegisters(pDM_Odm, MAC_REG, MAC_backup); #endif //reload BB default value for(index = 0; index < APK_BB_REG_NUM ; index++) { if(index == 0) //skip continue; ODM_SetBBReg(pDM_Odm, BB_REG[index], bMaskDWord, BB_backup[index]); } //reload AFE default value #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_ReloadADDARegisters(pAdapter, AFE_REG, AFE_backup, IQK_ADDA_REG_NUM); #else _PHY_ReloadADDARegisters(pDM_Odm, AFE_REG, AFE_backup, IQK_ADDA_REG_NUM); #endif //reload RF path default value for(path = 0; path < pathbound; path++) { ODM_SetRFReg(pDM_Odm, path, 0xd, bMaskDWord, regD[path]); if(path == RF_PATH_B) { ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_MODE1, bMaskDWord, 0x1000f); ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_MODE2, bMaskDWord, 0x20101); } //note no index == 0 if (APK_result[path][1] > 6) APK_result[path][1] = 6; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("apk path %d result %d 0x%x \t", path, 1, APK_result[path][1])); } ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("\n")); for(path = 0; path < pathbound; path++) { ODM_SetRFReg(pDM_Odm, path, 0x3, bMaskDWord, ((APK_result[path][1] << 15) | (APK_result[path][1] << 10) | (APK_result[path][1] << 5) | APK_result[path][1])); if(path == RF_PATH_A) ODM_SetRFReg(pDM_Odm, path, 0x4, bMaskDWord, ((APK_result[path][1] << 15) | (APK_result[path][1] << 10) | (0x00 << 5) | 0x05)); else ODM_SetRFReg(pDM_Odm, path, 0x4, bMaskDWord, ((APK_result[path][1] << 15) | (APK_result[path][1] << 10) | (0x02 << 5) | 0x05)); #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) if(!IS_HARDWARE_TYPE_8723A(pAdapter)) ODM_SetRFReg(pDM_Odm, path, RF_BS_PA_APSET_G9_G11, bMaskDWord, ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) | 0x08)); #endif } pDM_Odm->RFCalibrateInfo.bAPKdone = TRUE; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("<==phy_APCalibrate_8188E()\n")); } #define DP_BB_REG_NUM 7 #define DP_RF_REG_NUM 1 #define DP_RETRY_LIMIT 10 #define DP_PATH_NUM 2 #define DP_DPK_NUM 3 #define DP_DPK_VALUE_NUM 2 VOID PHY_IQCalibrate_8188E( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN BOOLEAN bReCovery ) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #else // (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (MP_DRIVER == 1) #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PMPT_CONTEXT pMptCtx = &(pAdapter->MptCtx); #else// (DM_ODM_SUPPORT_TYPE == ODM_CE) PMPT_CONTEXT pMptCtx = &(pAdapter->mppriv.MptCtx); #endif #endif//(MP_DRIVER == 1) #endif s4Byte result[4][8]; //last is final result u1Byte i, final_candidate, Indexforchannel; u1Byte channelToIQK = 7; BOOLEAN bPathAOK, bPathBOK; s4Byte RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC, RegTmp = 0; BOOLEAN is12simular, is13simular, is23simular; BOOLEAN bStartContTx = FALSE, bSingleTone = FALSE, bCarrierSuppression = FALSE; u4Byte IQK_BB_REG_92C[IQK_BB_REG_NUM] = { rOFDM0_XARxIQImbalance, rOFDM0_XBRxIQImbalance, rOFDM0_ECCAThreshold, rOFDM0_AGCRSSITable, rOFDM0_XATxIQImbalance, rOFDM0_XBTxIQImbalance, rOFDM0_XCTxAFE, rOFDM0_XDTxAFE, rOFDM0_RxIQExtAnta}; BOOLEAN is2T; is2T = (pDM_Odm->RFType == ODM_2T2R)?TRUE:FALSE; #if (DM_ODM_SUPPORT_TYPE & (ODM_MP|ODM_CE) ) if (ODM_CheckPowerStatus(pAdapter) == FALSE) return; #else prtl8192cd_priv priv = pDM_Odm->priv; #ifdef MP_TEST if(priv->pshare->rf_ft_var.mp_specific) { if((OPMODE & WIFI_MP_CTX_PACKET) || (OPMODE & WIFI_MP_CTX_ST)) return; } #endif if(priv->pshare->IQK_88E_done) bReCovery= 1; priv->pshare->IQK_88E_done = 1; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_CE) if(!(pDM_Odm->SupportAbility & ODM_RF_CALIBRATION)) { return; } #endif #if MP_DRIVER == 1 if (*(pDM_Odm->mp_mode) == 1) { bStartContTx = pMptCtx->bStartContTx; bSingleTone = pMptCtx->bSingleTone; bCarrierSuppression = pMptCtx->bCarrierSuppression; } #endif // 20120213<Kordan> Turn on when continuous Tx to pass lab testing. (required by Edlu) if(bSingleTone || bCarrierSuppression) return; #if DISABLE_BB_RF return; #endif #if (DM_ODM_SUPPORT_TYPE & (ODM_CE|ODM_AP)) if(bReCovery) #else//for ODM_MP if(bReCovery && (!pAdapter->bInHctTest)) //YJ,add for PowerTest,120405 #endif { ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD, ("PHY_IQCalibrate_8188E: Return due to bReCovery!\n")); #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_ReloadADDARegisters(pAdapter, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup_recover, 9); #else _PHY_ReloadADDARegisters(pDM_Odm, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup_recover, 9); #endif return; } ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:Start!!!\n")); #if 0//Suggested by Edlu,120413 // IQK on channel 7, should switch back when completed. //originChannel = pHalData->CurrentChannel; originChannel = *(pDM_Odm->pChannel); #if (DM_ODM_SUPPORT_TYPE == ODM_MP) pAdapter->HalFunc.SwChnlByTimerHandler(pAdapter, channelToIQK); #elif (DM_ODM_SUPPORT_TYPE == ODM_CE) pAdapter->HalFunc.set_channel_handler(pAdapter, channelToIQK); #endif #endif for(i = 0; i < 8; i++) { result[0][i] = 0; result[1][i] = 0; result[2][i] = 0; if((i==0) ||(i==2) || (i==4) || (i==6)) result[3][i] = 0x100; else result[3][i] = 0; } final_candidate = 0xff; bPathAOK = FALSE; bPathBOK = FALSE; is12simular = FALSE; is23simular = FALSE; is13simular = FALSE; //ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK !!!interface %d currentband %d ishardwareD %d \n", pDM_Odm->interfaceIndex, pHalData->CurrentBandType92D, IS_HARDWARE_TYPE_8192D(pAdapter))); // RT_TRACE(COMP_INIT,DBG_LOUD,("Acquire Mutex in IQCalibrate \n")); for (i=0; i<3; i++) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) phy_IQCalibrate_8188E(pAdapter, result, i, is2T); #else phy_IQCalibrate_8188E(pDM_Odm, result, i, is2T); #endif if(i == 1) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) is12simular = phy_SimularityCompare_8188E(pAdapter, result, 0, 1); #else is12simular = phy_SimularityCompare_8188E(pDM_Odm, result, 0, 1); #endif if(is12simular) { final_candidate = 0; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: is12simular final_candidate is %x\n",final_candidate)); break; } } if(i == 2) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) is13simular = phy_SimularityCompare_8188E(pAdapter, result, 0, 2); #else is13simular = phy_SimularityCompare_8188E(pDM_Odm, result, 0, 2); #endif if(is13simular) { final_candidate = 0; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: is13simular final_candidate is %x\n",final_candidate)); break; } #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) is23simular = phy_SimularityCompare_8188E(pAdapter, result, 1, 2); #else is23simular = phy_SimularityCompare_8188E(pDM_Odm, result, 1, 2); #endif if(is23simular) { final_candidate = 1; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: is23simular final_candidate is %x\n",final_candidate)); } else { /* for(i = 0; i < 8; i++) RegTmp += result[3][i]; if(RegTmp != 0) final_candidate = 3; else final_candidate = 0xFF; */ final_candidate = 3; } } } // RT_TRACE(COMP_INIT,DBG_LOUD,("Release Mutex in IQCalibrate \n")); for (i=0; i<4; i++) { RegE94 = result[i][0]; RegE9C = result[i][1]; RegEA4 = result[i][2]; RegEAC = result[i][3]; RegEB4 = result[i][4]; RegEBC = result[i][5]; RegEC4 = result[i][6]; RegECC = result[i][7]; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: RegE94=%x RegE9C=%x RegEA4=%x RegEAC=%x RegEB4=%x RegEBC=%x RegEC4=%x RegECC=%x\n ", RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC)); } if(final_candidate != 0xff) { pDM_Odm->RFCalibrateInfo.RegE94 = RegE94 = result[final_candidate][0]; pDM_Odm->RFCalibrateInfo.RegE9C = RegE9C = result[final_candidate][1]; RegEA4 = result[final_candidate][2]; RegEAC = result[final_candidate][3]; pDM_Odm->RFCalibrateInfo.RegEB4 = RegEB4 = result[final_candidate][4]; pDM_Odm->RFCalibrateInfo.RegEBC = RegEBC = result[final_candidate][5]; RegEC4 = result[final_candidate][6]; RegECC = result[final_candidate][7]; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: final_candidate is %x\n",final_candidate)); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: RegE94=%x RegE9C=%x RegEA4=%x RegEAC=%x RegEB4=%x RegEBC=%x RegEC4=%x RegECC=%x\n ", RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC)); bPathAOK = bPathBOK = TRUE; } else { ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: FAIL use default value\n")); pDM_Odm->RFCalibrateInfo.RegE94 = pDM_Odm->RFCalibrateInfo.RegEB4 = 0x100; //X default value pDM_Odm->RFCalibrateInfo.RegE9C = pDM_Odm->RFCalibrateInfo.RegEBC = 0x0; //Y default value } if((RegE94 != 0)/*&&(RegEA4 != 0)*/) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_PathAFillIQKMatrix(pAdapter, bPathAOK, result, final_candidate, (RegEA4 == 0)); #else _PHY_PathAFillIQKMatrix(pDM_Odm, bPathAOK, result, final_candidate, (RegEA4 == 0)); #endif } #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) if (is2T) { if((RegEB4 != 0)/*&&(RegEC4 != 0)*/) { _PHY_PathBFillIQKMatrix(pAdapter, bPathBOK, result, final_candidate, (RegEC4 == 0)); } } #endif #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) Indexforchannel = ODM_GetRightChnlPlaceforIQK(pHalData->CurrentChannel); #else Indexforchannel = 0; #endif //To Fix BSOD when final_candidate is 0xff //by sherry 20120321 if(final_candidate < 4) { for(i = 0; i < IQK_Matrix_REG_NUM; i++) pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][i] = result[final_candidate][i]; pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].bIQKDone = TRUE; } //RTPRINT(FINIT, INIT_IQK, ("\nIQK OK Indexforchannel %d.\n", Indexforchannel)); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("\nIQK OK Indexforchannel %d.\n", Indexforchannel)); #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_SaveADDARegisters(pAdapter, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup_recover, 9); #else _PHY_SaveADDARegisters(pDM_Odm, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup_recover, IQK_BB_REG_NUM); #endif ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK finished\n")); #if 0 //Suggested by Edlu,120413 #if (DM_ODM_SUPPORT_TYPE == ODM_MP) pAdapter->HalFunc.SwChnlByTimerHandler(pAdapter, originChannel); #elif (DM_ODM_SUPPORT_TYPE == ODM_CE) pAdapter->HalFunc.set_channel_handler(pAdapter, originChannel); #endif #endif } VOID PHY_LCCalibrate_8188E( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm #else IN PADAPTER pAdapter #endif ) { BOOLEAN bStartContTx = FALSE, bSingleTone = FALSE, bCarrierSuppression = FALSE; u4Byte timeout = 2000, timecount = 0; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #else // (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (MP_DRIVER == 1) #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PMPT_CONTEXT pMptCtx = &(pAdapter->MptCtx); #else// (DM_ODM_SUPPORT_TYPE == ODM_CE) PMPT_CONTEXT pMptCtx = &(pAdapter->mppriv.MptCtx); #endif #endif//(MP_DRIVER == 1) #endif #if MP_DRIVER == 1 if (*(pDM_Odm->mp_mode) == 1) { bStartContTx = pMptCtx->bStartContTx; bSingleTone = pMptCtx->bSingleTone; bCarrierSuppression = pMptCtx->bCarrierSuppression; } #endif #if DISABLE_BB_RF return; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_CE) if(!(pDM_Odm->SupportAbility & ODM_RF_CALIBRATION)) { return; } #endif // 20120213<Kordan> Turn on when continuous Tx to pass lab testing. (required by Edlu) if(bSingleTone || bCarrierSuppression) return; while(*(pDM_Odm->pbScanInProcess) && timecount < timeout) { ODM_delay_ms(50); timecount += 50; } pDM_Odm->RFCalibrateInfo.bLCKInProgress = TRUE; //ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LCK:Start!!!interface %d currentband %x delay %d ms\n", pDM_Odm->interfaceIndex, pHalData->CurrentBandType92D, timecount)); #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) if(pDM_Odm->RFType == ODM_2T2R) { phy_LCCalibrate_8188E(pAdapter, TRUE); } else #endif { // For 88C 1T1R #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) phy_LCCalibrate_8188E(pAdapter, FALSE); #else phy_LCCalibrate_8188E(pDM_Odm, FALSE); #endif } pDM_Odm->RFCalibrateInfo.bLCKInProgress = FALSE; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LCK:Finish!!!interface %d\n", pDM_Odm->InterfaceIndex)); } VOID PHY_APCalibrate_8188E( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN s1Byte delta ) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif #if DISABLE_BB_RF return; #endif return; #if (DM_ODM_SUPPORT_TYPE == ODM_CE) if(!(pDM_Odm->SupportAbility & ODM_RF_CALIBRATION)) { return; } #endif #if FOR_BRAZIL_PRETEST != 1 if(pDM_Odm->RFCalibrateInfo.bAPKdone) #endif return; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) if(pDM_Odm->RFType == ODM_2T2R){ phy_APCalibrate_8188E(pAdapter, delta, TRUE); } else #endif { // For 88C 1T1R #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) phy_APCalibrate_8188E(pAdapter, delta, FALSE); #else phy_APCalibrate_8188E(pDM_Odm, delta, FALSE); #endif } } VOID phy_SetRFPathSwitch_8188E( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN BOOLEAN bMain, IN BOOLEAN is2T ) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #elif (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) if(!pAdapter->bHWInitReady) #elif (DM_ODM_SUPPORT_TYPE == ODM_CE) if(pAdapter->hw_init_completed == _FALSE) #endif { u1Byte u1bTmp; u1bTmp = ODM_Read1Byte(pDM_Odm, REG_LEDCFG2) | BIT7; ODM_Write1Byte(pDM_Odm, REG_LEDCFG2, u1bTmp); //ODM_SetBBReg(pDM_Odm, REG_LEDCFG0, BIT23, 0x01); ODM_SetBBReg(pDM_Odm, rFPGA0_XAB_RFParameter, BIT13, 0x01); } #endif if(is2T) //92C { if(bMain) ODM_SetBBReg(pDM_Odm, rFPGA0_XB_RFInterfaceOE, BIT5|BIT6, 0x1); //92C_Path_A else ODM_SetBBReg(pDM_Odm, rFPGA0_XB_RFInterfaceOE, BIT5|BIT6, 0x2); //BT } else //88C { if(bMain) ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, BIT8|BIT9, 0x2); //Main else ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, BIT8|BIT9, 0x1); //Aux } } VOID PHY_SetRFPathSwitch_8188E( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN BOOLEAN bMain ) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif #if DISABLE_BB_RF return; #endif #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) if(pDM_Odm->RFType == ODM_2T2R) { phy_SetRFPathSwitch_8188E(pAdapter, bMain, TRUE); } else #endif { // For 88C 1T1R #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) phy_SetRFPathSwitch_8188E(pAdapter, bMain, FALSE); #else phy_SetRFPathSwitch_8188E(pDM_Odm, bMain, FALSE); #endif } } #if (DM_ODM_SUPPORT_TYPE == ODM_MP) //digital predistortion VOID phy_DigitalPredistortion( #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) IN PADAPTER pAdapter, #else IN PDM_ODM_T pDM_Odm, #endif IN BOOLEAN is2T ) { #if ( RT_PLATFORM == PLATFORM_WINDOWS) #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif u4Byte tmpReg, tmpReg2, index, i; u1Byte path, pathbound = PATH_NUM; u4Byte AFE_backup[IQK_ADDA_REG_NUM]; u4Byte AFE_REG[IQK_ADDA_REG_NUM] = { rFPGA0_XCD_SwitchControl, rBlue_Tooth, rRx_Wait_CCA, rTx_CCK_RFON, rTx_CCK_BBON, rTx_OFDM_RFON, rTx_OFDM_BBON, rTx_To_Rx, rTx_To_Tx, rRx_CCK, rRx_OFDM, rRx_Wait_RIFS, rRx_TO_Rx, rStandby, rSleep, rPMPD_ANAEN }; u4Byte BB_backup[DP_BB_REG_NUM]; u4Byte BB_REG[DP_BB_REG_NUM] = { rOFDM0_TRxPathEnable, rFPGA0_RFMOD, rOFDM0_TRMuxPar, rFPGA0_XCD_RFInterfaceSW, rFPGA0_XAB_RFInterfaceSW, rFPGA0_XA_RFInterfaceOE, rFPGA0_XB_RFInterfaceOE}; u4Byte BB_settings[DP_BB_REG_NUM] = { 0x00a05430, 0x02040000, 0x000800e4, 0x22208000, 0x0, 0x0, 0x0}; u4Byte RF_backup[DP_PATH_NUM][DP_RF_REG_NUM]; u4Byte RF_REG[DP_RF_REG_NUM] = { RF_TXBIAS_A}; u4Byte MAC_backup[IQK_MAC_REG_NUM]; u4Byte MAC_REG[IQK_MAC_REG_NUM] = { REG_TXPAUSE, REG_BCN_CTRL, REG_BCN_CTRL_1, REG_GPIO_MUXCFG}; u4Byte Tx_AGC[DP_DPK_NUM][DP_DPK_VALUE_NUM] = { {0x1e1e1e1e, 0x03901e1e}, {0x18181818, 0x03901818}, {0x0e0e0e0e, 0x03900e0e} }; u4Byte AFE_on_off[PATH_NUM] = { 0x04db25a4, 0x0b1b25a4}; //path A on path B off / path A off path B on u1Byte RetryCount = 0; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("==>phy_DigitalPredistortion()\n")); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_DigitalPredistortion for %s %s\n", (is2T ? "2T2R" : "1T1R"))); //save BB default value for(index=0; index<DP_BB_REG_NUM; index++) BB_backup[index] = ODM_GetBBReg(pDM_Odm, BB_REG[index], bMaskDWord); //save MAC default value #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_SaveMACRegisters(pAdapter, BB_REG, MAC_backup); #else _PHY_SaveMACRegisters(pDM_Odm, BB_REG, MAC_backup); #endif //save RF default value for(path=0; path<DP_PATH_NUM; path++) { for(index=0; index<DP_RF_REG_NUM; index++) #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) RF_backup[path][index] = PHY_QueryRFReg(pAdapter, path, RF_REG[index], bMaskDWord); #else RF_backup[path][index] = ODM_GetRFReg(pAdapter, path, RF_REG[index], bMaskDWord); #endif } //save AFE default value #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_SaveADDARegisters(pAdapter, AFE_REG, AFE_backup, IQK_ADDA_REG_NUM); #else RF_backup[path][index] = ODM_GetRFReg(pAdapter, path, RF_REG[index], bMaskDWord); #endif //Path A/B AFE all on for(index = 0; index < IQK_ADDA_REG_NUM ; index++) ODM_SetBBReg(pDM_Odm, AFE_REG[index], bMaskDWord, 0x6fdb25a4); //BB register setting for(index = 0; index < DP_BB_REG_NUM; index++) { if(index < 4) ODM_SetBBReg(pDM_Odm, BB_REG[index], bMaskDWord, BB_settings[index]); else if (index == 4) ODM_SetBBReg(pDM_Odm,BB_REG[index], bMaskDWord, BB_backup[index]|BIT10|BIT26); else ODM_SetBBReg(pDM_Odm, BB_REG[index], BIT10, 0x00); } //MAC register setting #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_MACSettingCalibration(pAdapter, MAC_REG, MAC_backup); #else _PHY_MACSettingCalibration(pDM_Odm, MAC_REG, MAC_backup); #endif //PAGE-E IQC setting ODM_SetBBReg(pDM_Odm, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00); ODM_SetBBReg(pDM_Odm, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00); ODM_SetBBReg(pDM_Odm, rTx_IQK_Tone_B, bMaskDWord, 0x01008c00); ODM_SetBBReg(pDM_Odm, rRx_IQK_Tone_B, bMaskDWord, 0x01008c00); //path_A DPK //Path B to standby mode ODM_SetRFReg(pDM_Odm, RF_PATH_B, RF_AC, bMaskDWord, 0x10000); // PA gain = 11 & PAD1 => tx_agc 1f ~11 // PA gain = 11 & PAD2 => tx_agc 10~0e // PA gain = 01 => tx_agc 0b~0d // PA gain = 00 => tx_agc 0a~00 ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x40000000); ODM_SetBBReg(pDM_Odm, 0xbc0, bMaskDWord, 0x0005361f); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000); //do inner loopback DPK 3 times for(i = 0; i < 3; i++) { //PA gain = 11 & PAD2 => tx_agc = 0x0f/0x0c/0x07 for(index = 0; index < 3; index++) ODM_SetBBReg(pDM_Odm, 0xe00+index*4, bMaskDWord, Tx_AGC[i][0]); ODM_SetBBReg(pDM_Odm,0xe00+index*4, bMaskDWord, Tx_AGC[i][1]); for(index = 0; index < 4; index++) ODM_SetBBReg(pDM_Odm,0xe10+index*4, bMaskDWord, Tx_AGC[i][0]); // PAGE_B for Path-A inner loopback DPK setting ODM_SetBBReg(pDM_Odm,rPdp_AntA, bMaskDWord, 0x02097098); ODM_SetBBReg(pDM_Odm,rPdp_AntA_4, bMaskDWord, 0xf76d9f84); ODM_SetBBReg(pDM_Odm,rConfig_Pmpd_AntA, bMaskDWord, 0x0004ab87); ODM_SetBBReg(pDM_Odm,rConfig_AntA, bMaskDWord, 0x00880000); //----send one shot signal----// // Path A ODM_SetBBReg(pDM_Odm,rConfig_Pmpd_AntA, bMaskDWord, 0x80047788); ODM_delay_ms(1); ODM_SetBBReg(pDM_Odm, rConfig_Pmpd_AntA, bMaskDWord, 0x00047788); ODM_delay_ms(50); } //PA gain = 11 => tx_agc = 1a for(index = 0; index < 3; index++) ODM_SetBBReg(pDM_Odm,0xe00+index*4, bMaskDWord, 0x34343434); ODM_SetBBReg(pDM_Odm,0xe08+index*4, bMaskDWord, 0x03903434); for(index = 0; index < 4; index++) ODM_SetBBReg(pDM_Odm,0xe10+index*4, bMaskDWord, 0x34343434); //==================================== // PAGE_B for Path-A DPK setting //==================================== // open inner loopback @ b00[19]:10 od 0xb00 0x01097018 ODM_SetBBReg(pDM_Odm,rPdp_AntA, bMaskDWord, 0x02017098); ODM_SetBBReg(pDM_Odm,rPdp_AntA_4, bMaskDWord, 0xf76d9f84); ODM_SetBBReg(pDM_Odm,rConfig_Pmpd_AntA, bMaskDWord, 0x0004ab87); ODM_SetBBReg(pDM_Odm,rConfig_AntA, bMaskDWord, 0x00880000); //rf_lpbk_setup //1.rf 00:5205a, rf 0d:0e52c ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x0c, bMaskDWord, 0x8992b); ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x0d, bMaskDWord, 0x0e52c); ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x00, bMaskDWord, 0x5205a ); //----send one shot signal----// // Path A ODM_SetBBReg(pDM_Odm,rConfig_Pmpd_AntA, bMaskDWord, 0x800477c0); ODM_delay_ms(1); ODM_SetBBReg(pDM_Odm,rConfig_Pmpd_AntA, bMaskDWord, 0x000477c0); ODM_delay_ms(50); while(RetryCount < DP_RETRY_LIMIT && !pDM_Odm->RFCalibrateInfo.bDPPathAOK) { //----read back measurement results----// ODM_SetBBReg(pDM_Odm, rPdp_AntA, bMaskDWord, 0x0c297018); tmpReg = ODM_GetBBReg(pDM_Odm, 0xbe0, bMaskDWord); ODM_delay_ms(10); ODM_SetBBReg(pDM_Odm, rPdp_AntA, bMaskDWord, 0x0c29701f); tmpReg2 = ODM_GetBBReg(pDM_Odm, 0xbe8, bMaskDWord); ODM_delay_ms(10); tmpReg = (tmpReg & bMaskHWord) >> 16; tmpReg2 = (tmpReg2 & bMaskHWord) >> 16; if(tmpReg < 0xf0 || tmpReg > 0x105 || tmpReg2 > 0xff ) { ODM_SetBBReg(pDM_Odm, rPdp_AntA, bMaskDWord, 0x02017098); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x80000000); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000); ODM_delay_ms(1); ODM_SetBBReg(pDM_Odm, rConfig_Pmpd_AntA, bMaskDWord, 0x800477c0); ODM_delay_ms(1); ODM_SetBBReg(pDM_Odm, rConfig_Pmpd_AntA, bMaskDWord, 0x000477c0); ODM_delay_ms(50); RetryCount++; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("path A DPK RetryCount %d 0xbe0[31:16] %x 0xbe8[31:16] %x\n", RetryCount, tmpReg, tmpReg2)); } else { ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("path A DPK Sucess\n")); pDM_Odm->RFCalibrateInfo.bDPPathAOK = TRUE; break; } } RetryCount = 0; //DPP path A if(pDM_Odm->RFCalibrateInfo.bDPPathAOK) { // DP settings ODM_SetBBReg(pDM_Odm, rPdp_AntA, bMaskDWord, 0x01017098); ODM_SetBBReg(pDM_Odm, rPdp_AntA_4, bMaskDWord, 0x776d9f84); ODM_SetBBReg(pDM_Odm, rConfig_Pmpd_AntA, bMaskDWord, 0x0004ab87); ODM_SetBBReg(pDM_Odm, rConfig_AntA, bMaskDWord, 0x00880000); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x40000000); for(i=rPdp_AntA; i<=0xb3c; i+=4) { ODM_SetBBReg(pDM_Odm, i, bMaskDWord, 0x40004000); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("path A ofsset = 0x%x\n", i)); } //pwsf ODM_SetBBReg(pDM_Odm, 0xb40, bMaskDWord, 0x40404040); ODM_SetBBReg(pDM_Odm, 0xb44, bMaskDWord, 0x28324040); ODM_SetBBReg(pDM_Odm, 0xb48, bMaskDWord, 0x10141920); for(i=0xb4c; i<=0xb5c; i+=4) { ODM_SetBBReg(pDM_Odm, i, bMaskDWord, 0x0c0c0c0c); } //TX_AGC boundary ODM_SetBBReg(pDM_Odm, 0xbc0, bMaskDWord, 0x0005361f); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000); } else { ODM_SetBBReg(pDM_Odm, rPdp_AntA, bMaskDWord, 0x00000000); ODM_SetBBReg(pDM_Odm, rPdp_AntA_4, bMaskDWord, 0x00000000); } //DPK path B if(is2T) { //Path A to standby mode ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_AC, bMaskDWord, 0x10000); // LUTs => tx_agc // PA gain = 11 & PAD1, => tx_agc 1f ~11 // PA gain = 11 & PAD2, => tx_agc 10 ~0e // PA gain = 01 => tx_agc 0b ~0d // PA gain = 00 => tx_agc 0a ~00 ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x40000000); ODM_SetBBReg(pDM_Odm, 0xbc4, bMaskDWord, 0x0005361f); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000); //do inner loopback DPK 3 times for(i = 0; i < 3; i++) { //PA gain = 11 & PAD2 => tx_agc = 0x0f/0x0c/0x07 for(index = 0; index < 4; index++) ODM_SetBBReg(pDM_Odm, 0x830+index*4, bMaskDWord, Tx_AGC[i][0]); for(index = 0; index < 2; index++) ODM_SetBBReg(pDM_Odm, 0x848+index*4, bMaskDWord, Tx_AGC[i][0]); for(index = 0; index < 2; index++) ODM_SetBBReg(pDM_Odm, 0x868+index*4, bMaskDWord, Tx_AGC[i][0]); // PAGE_B for Path-A inner loopback DPK setting ODM_SetBBReg(pDM_Odm, rPdp_AntB, bMaskDWord, 0x02097098); ODM_SetBBReg(pDM_Odm, rPdp_AntB_4, bMaskDWord, 0xf76d9f84); ODM_SetBBReg(pDM_Odm, rConfig_Pmpd_AntB, bMaskDWord, 0x0004ab87); ODM_SetBBReg(pDM_Odm, rConfig_AntB, bMaskDWord, 0x00880000); //----send one shot signal----// // Path B ODM_SetBBReg(pDM_Odm,rConfig_Pmpd_AntB, bMaskDWord, 0x80047788); ODM_delay_ms(1); ODM_SetBBReg(pDM_Odm, rConfig_Pmpd_AntB, bMaskDWord, 0x00047788); ODM_delay_ms(50); } // PA gain = 11 => tx_agc = 1a for(index = 0; index < 4; index++) ODM_SetBBReg(pDM_Odm, 0x830+index*4, bMaskDWord, 0x34343434); for(index = 0; index < 2; index++) ODM_SetBBReg(pDM_Odm, 0x848+index*4, bMaskDWord, 0x34343434); for(index = 0; index < 2; index++) ODM_SetBBReg(pDM_Odm, 0x868+index*4, bMaskDWord, 0x34343434); // PAGE_B for Path-B DPK setting ODM_SetBBReg(pDM_Odm, rPdp_AntB, bMaskDWord, 0x02017098); ODM_SetBBReg(pDM_Odm, rPdp_AntB_4, bMaskDWord, 0xf76d9f84); ODM_SetBBReg(pDM_Odm, rConfig_Pmpd_AntB, bMaskDWord, 0x0004ab87); ODM_SetBBReg(pDM_Odm, rConfig_AntB, bMaskDWord, 0x00880000); // RF lpbk switches on ODM_SetBBReg(pDM_Odm, 0x840, bMaskDWord, 0x0101000f); ODM_SetBBReg(pDM_Odm, 0x840, bMaskDWord, 0x01120103); //Path-B RF lpbk ODM_SetRFReg(pDM_Odm, RF_PATH_B, 0x0c, bMaskDWord, 0x8992b); ODM_SetRFReg(pDM_Odm, RF_PATH_B, 0x0d, bMaskDWord, 0x0e52c); ODM_SetRFReg(pDM_Odm, RF_PATH_B, RF_AC, bMaskDWord, 0x5205a); //----send one shot signal----// ODM_SetBBReg(pDM_Odm, rConfig_Pmpd_AntB, bMaskDWord, 0x800477c0); ODM_delay_ms(1); ODM_SetBBReg(pDM_Odm, rConfig_Pmpd_AntB, bMaskDWord, 0x000477c0); ODM_delay_ms(50); while(RetryCount < DP_RETRY_LIMIT && !pDM_Odm->RFCalibrateInfo.bDPPathBOK) { //----read back measurement results----// ODM_SetBBReg(pDM_Odm, rPdp_AntB, bMaskDWord, 0x0c297018); tmpReg = ODM_GetBBReg(pDM_Odm, 0xbf0, bMaskDWord); ODM_SetBBReg(pDM_Odm, rPdp_AntB, bMaskDWord, 0x0c29701f); tmpReg2 = ODM_GetBBReg(pDM_Odm, 0xbf8, bMaskDWord); tmpReg = (tmpReg & bMaskHWord) >> 16; tmpReg2 = (tmpReg2 & bMaskHWord) >> 16; if(tmpReg < 0xf0 || tmpReg > 0x105 || tmpReg2 > 0xff) { ODM_SetBBReg(pDM_Odm, rPdp_AntB, bMaskDWord, 0x02017098); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x80000000); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000); ODM_delay_ms(1); ODM_SetBBReg(pDM_Odm, rConfig_Pmpd_AntB, bMaskDWord, 0x800477c0); ODM_delay_ms(1); ODM_SetBBReg(pDM_Odm, rConfig_Pmpd_AntB, bMaskDWord, 0x000477c0); ODM_delay_ms(50); RetryCount++; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("path B DPK RetryCount %d 0xbf0[31:16] %x, 0xbf8[31:16] %x\n", RetryCount , tmpReg, tmpReg2)); } else { ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("path B DPK Success\n")); pDM_Odm->RFCalibrateInfo.bDPPathBOK = TRUE; break; } } //DPP path B if(pDM_Odm->RFCalibrateInfo.bDPPathBOK) { // DP setting // LUT by SRAM ODM_SetBBReg(pDM_Odm, rPdp_AntB, bMaskDWord, 0x01017098); ODM_SetBBReg(pDM_Odm, rPdp_AntB_4, bMaskDWord, 0x776d9f84); ODM_SetBBReg(pDM_Odm, rConfig_Pmpd_AntB, bMaskDWord, 0x0004ab87); ODM_SetBBReg(pDM_Odm, rConfig_AntB, bMaskDWord, 0x00880000); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x40000000); for(i=0xb60; i<=0xb9c; i+=4) { ODM_SetBBReg(pDM_Odm, i, bMaskDWord, 0x40004000); ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("path B ofsset = 0x%x\n", i)); } // PWSF ODM_SetBBReg(pDM_Odm, 0xba0, bMaskDWord, 0x40404040); ODM_SetBBReg(pDM_Odm, 0xba4, bMaskDWord, 0x28324050); ODM_SetBBReg(pDM_Odm, 0xba8, bMaskDWord, 0x0c141920); for(i=0xbac; i<=0xbbc; i+=4) { ODM_SetBBReg(pDM_Odm, i, bMaskDWord, 0x0c0c0c0c); } // tx_agc boundary ODM_SetBBReg(pDM_Odm, 0xbc4, bMaskDWord, 0x0005361f); ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000); } else { ODM_SetBBReg(pDM_Odm, rPdp_AntB, bMaskDWord, 0x00000000); ODM_SetBBReg(pDM_Odm, rPdp_AntB_4, bMaskDWord, 0x00000000); } } //reload BB default value for(index=0; index<DP_BB_REG_NUM; index++) ODM_SetBBReg(pDM_Odm, BB_REG[index], bMaskDWord, BB_backup[index]); //reload RF default value for(path = 0; path<DP_PATH_NUM; path++) { for( i = 0 ; i < DP_RF_REG_NUM ; i++){ ODM_SetRFReg(pDM_Odm, path, RF_REG[i], bMaskDWord, RF_backup[path][i]); } } ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_MODE1, bMaskDWord, 0x1000f); //standby mode ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_MODE2, bMaskDWord, 0x20101); //RF lpbk switches off //reload AFE default value #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) _PHY_ReloadADDARegisters(pAdapter, AFE_REG, AFE_backup, IQK_ADDA_REG_NUM); //reload MAC default value _PHY_ReloadMACRegisters(pAdapter, MAC_REG, MAC_backup); #else _PHY_ReloadADDARegisters(pDM_Odm, AFE_REG, AFE_backup, IQK_ADDA_REG_NUM); //reload MAC default value _PHY_ReloadMACRegisters(pDM_Odm, MAC_REG, MAC_backup); #endif pDM_Odm->RFCalibrateInfo.bDPdone = TRUE; ODM_RT_TRACE(pDM_Odm,ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("<==phy_DigitalPredistortion()\n")); #endif } VOID PHY_DigitalPredistortion_8188E( #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) IN PADAPTER pAdapter #else IN PDM_ODM_T pDM_Odm #endif ) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif #if DISABLE_BB_RF return; #endif return; if(pDM_Odm->RFCalibrateInfo.bDPdone) return; #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) if(pDM_Odm->RFType == ODM_2T2R){ phy_DigitalPredistortion(pAdapter, TRUE); } else #endif { // For 88C 1T1R phy_DigitalPredistortion(pAdapter, FALSE); } } //return value TRUE => Main; FALSE => Aux BOOLEAN phy_QueryRFPathSwitch_8188E( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm, #else IN PADAPTER pAdapter, #endif IN BOOLEAN is2T ) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif if(!pAdapter->bHWInitReady) { u1Byte u1bTmp; u1bTmp = ODM_Read1Byte(pDM_Odm, REG_LEDCFG2) | BIT7; ODM_Write1Byte(pDM_Odm, REG_LEDCFG2, u1bTmp); //ODM_SetBBReg(pDM_Odm, REG_LEDCFG0, BIT23, 0x01); ODM_SetBBReg(pDM_Odm, rFPGA0_XAB_RFParameter, BIT13, 0x01); } if(is2T) // { if(ODM_GetBBReg(pDM_Odm, rFPGA0_XB_RFInterfaceOE, BIT5|BIT6) == 0x01) return TRUE; else return FALSE; } else { if(ODM_GetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, BIT8|BIT9) == 0x02) return TRUE; else return FALSE; } } //return value TRUE => Main; FALSE => Aux BOOLEAN PHY_QueryRFPathSwitch_8188E( #if (DM_ODM_SUPPORT_TYPE & ODM_AP) IN PDM_ODM_T pDM_Odm #else IN PADAPTER pAdapter #endif ) { #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) HAL_DATA_TYPE *pHalData = GET_HAL_DATA(pAdapter); #if (DM_ODM_SUPPORT_TYPE == ODM_CE) PDM_ODM_T pDM_Odm = &pHalData->odmpriv; #endif #if (DM_ODM_SUPPORT_TYPE == ODM_MP) PDM_ODM_T pDM_Odm = &pHalData->DM_OutSrc; #endif #endif #if DISABLE_BB_RF return TRUE; #endif #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) //if(IS_92C_SERIAL( pHalData->VersionID)){ if(pDM_Odm->RFType == ODM_2T2R){ return phy_QueryRFPathSwitch_8188E(pAdapter, TRUE); } else #endif { // For 88C 1T1R #if !(DM_ODM_SUPPORT_TYPE & ODM_AP) return phy_QueryRFPathSwitch_8188E(pAdapter, FALSE); #else return phy_QueryRFPathSwitch_8188E(pDM_Odm, FALSE); #endif } } #endif
gpl-2.0
LuaxY/TrinityCore
src/server/game/Entities/Creature/GossipDef.cpp
6
30253
/* * Copyright (C) 2008-2015 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2005-2009 MaNGOS <http://getmangos.com/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "QuestDef.h" #include "GossipDef.h" #include "ObjectMgr.h" #include "Opcodes.h" #include "WorldPacket.h" #include "WorldSession.h" #include "Formulas.h" #include "QuestPackets.h" #include "NPCPackets.h" #include "WorldPacket.h" GossipMenu::GossipMenu() { _menuId = 0; _locale = DEFAULT_LOCALE; _senderGUID.Clear(); } GossipMenu::~GossipMenu() { ClearMenu(); } void GossipMenu::AddMenuItem(int32 menuItemId, uint8 icon, std::string const& message, uint32 sender, uint32 action, std::string const& boxMessage, uint32 boxMoney, bool coded /*= false*/) { ASSERT(_menuItems.size() <= GOSSIP_MAX_MENU_ITEMS); // Find a free new id - script case if (menuItemId == -1) { menuItemId = 0; if (!_menuItems.empty()) { for (GossipMenuItemContainer::const_iterator itr = _menuItems.begin(); itr != _menuItems.end(); ++itr) { if (int32(itr->first) > menuItemId) break; menuItemId = itr->first + 1; } } } GossipMenuItem& menuItem = _menuItems[menuItemId]; menuItem.MenuItemIcon = icon; menuItem.Message = message; menuItem.IsCoded = coded; menuItem.Sender = sender; menuItem.OptionType = action; menuItem.BoxMessage = boxMessage; menuItem.BoxMoney = boxMoney; } /** * @name AddMenuItem * @brief Adds a localized gossip menu item from db by menu id and menu item id. * @param menuId Gossip menu id. * @param menuItemId Gossip menu item id. * @param sender Identifier of the current menu. * @param action Custom action given to OnGossipHello. */ void GossipMenu::AddMenuItem(uint32 menuId, uint32 menuItemId, uint32 sender, uint32 action) { /// Find items for given menu id. GossipMenuItemsMapBounds bounds = sObjectMgr->GetGossipMenuItemsMapBounds(menuId); /// Return if there are none. if (bounds.first == bounds.second) return; /// Iterate over each of them. for (GossipMenuItemsContainer::const_iterator itr = bounds.first; itr != bounds.second; ++itr) { /// Find the one with the given menu item id. if (itr->second.OptionIndex != menuItemId) continue; /// Store texts for localization. std::string strOptionText, strBoxText; BroadcastTextEntry const* optionBroadcastText = sBroadcastTextStore.LookupEntry(itr->second.OptionBroadcastTextId); BroadcastTextEntry const* boxBroadcastText = sBroadcastTextStore.LookupEntry(itr->second.BoxBroadcastTextId); /// OptionText if (optionBroadcastText) strOptionText = DB2Manager::GetBroadcastTextValue(optionBroadcastText, GetLocale()); else strOptionText = itr->second.OptionText; /// BoxText if (boxBroadcastText) strBoxText = DB2Manager::GetBroadcastTextValue(boxBroadcastText, GetLocale()); else strBoxText = itr->second.BoxText; /// Check need of localization. if (GetLocale() != DEFAULT_LOCALE) { if (!optionBroadcastText) { /// Find localizations from database. if (GossipMenuItemsLocale const* gossipMenuLocale = sObjectMgr->GetGossipMenuItemsLocale(MAKE_PAIR32(menuId, menuItemId))) ObjectMgr::GetLocaleString(gossipMenuLocale->OptionText, GetLocale(), strOptionText); } if (!boxBroadcastText) { /// Find localizations from database. if (GossipMenuItemsLocale const* gossipMenuLocale = sObjectMgr->GetGossipMenuItemsLocale(MAKE_PAIR32(menuId, menuItemId))) ObjectMgr::GetLocaleString(gossipMenuLocale->BoxText, GetLocale(), strBoxText); } } /// Add menu item with existing method. Menu item id -1 is also used in ADD_GOSSIP_ITEM macro. AddMenuItem(-1, itr->second.OptionIcon, strOptionText, sender, action, strBoxText, itr->second.BoxMoney, itr->second.BoxCoded); } } void GossipMenu::AddGossipMenuItemData(uint32 menuItemId, uint32 gossipActionMenuId, uint32 gossipActionPoi) { GossipMenuItemData& itemData = _menuItemData[menuItemId]; itemData.GossipActionMenuId = gossipActionMenuId; itemData.GossipActionPoi = gossipActionPoi; } uint32 GossipMenu::GetMenuItemSender(uint32 menuItemId) const { GossipMenuItemContainer::const_iterator itr = _menuItems.find(menuItemId); if (itr == _menuItems.end()) return 0; return itr->second.Sender; } uint32 GossipMenu::GetMenuItemAction(uint32 menuItemId) const { GossipMenuItemContainer::const_iterator itr = _menuItems.find(menuItemId); if (itr == _menuItems.end()) return 0; return itr->second.OptionType; } bool GossipMenu::IsMenuItemCoded(uint32 menuItemId) const { GossipMenuItemContainer::const_iterator itr = _menuItems.find(menuItemId); if (itr == _menuItems.end()) return false; return itr->second.IsCoded; } void GossipMenu::ClearMenu() { _menuItems.clear(); _menuItemData.clear(); } PlayerMenu::PlayerMenu(WorldSession* session) : _session(session) { if (_session) _gossipMenu.SetLocale(_session->GetSessionDbLocaleIndex()); } PlayerMenu::~PlayerMenu() { ClearMenus(); } void PlayerMenu::ClearMenus() { _gossipMenu.ClearMenu(); _questMenu.ClearMenu(); } void PlayerMenu::SendGossipMenu(uint32 titleTextId, ObjectGuid objectGUID) { _gossipMenu.SetSenderGUID(objectGUID); WorldPackets::NPC::GossipMessage packet; packet.GossipGUID = objectGUID; packet.GossipID = _gossipMenu.GetMenuId(); packet.TextID = titleTextId; packet.GossipOptions.resize(_gossipMenu.GetMenuItems().size()); uint32 count = 0; for (GossipMenuItemContainer::const_iterator itr = _gossipMenu.GetMenuItems().begin(); itr != _gossipMenu.GetMenuItems().end(); ++itr) { WorldPackets::NPC::ClientGossipOptions& opt = packet.GossipOptions[count]; GossipMenuItem const& item = itr->second; opt.ClientOption = itr->first; opt.OptionNPC = item.MenuItemIcon; opt.OptionFlags = item.IsCoded; // makes pop up box password opt.OptionCost = item.BoxMoney; // money required to open menu, 2.0.3 opt.Text = item.Message; // text for gossip item opt.Confirm = item.BoxMessage; // accept text (related to money) pop up box, 2.0.3 ++count; } // Store this instead of checking the Singleton every loop iteration bool questLevelInTitle = sWorld->getBoolConfig(CONFIG_UI_QUESTLEVELS_IN_DIALOGS); packet.GossipText.resize(_questMenu.GetMenuItemCount()); count = 0; for (uint8 i = 0; i < _questMenu.GetMenuItemCount(); ++i) { QuestMenuItem const& item = _questMenu.GetItem(i); uint32 questID = item.QuestId; if (Quest const* quest = sObjectMgr->GetQuestTemplate(questID)) { WorldPackets::NPC::ClientGossipText& text = packet.GossipText[count]; text.QuestID = questID; text.QuestType = item.QuestIcon; text.QuestLevel = quest->GetQuestLevel(); text.QuestFlags[0] = quest->GetFlags(); text.QuestFlags[1] = quest->GetFlagsEx(); text.Repeatable = quest->IsRepeatable(); std::string title = quest->GetLogTitle(); LocaleConstant localeConstant = _session->GetSessionDbLocaleIndex(); if (localeConstant >= LOCALE_enUS) if (QuestTemplateLocale const* localeData = sObjectMgr->GetQuestLocale(questID)) ObjectMgr::GetLocaleString(localeData->LogTitle, localeConstant, title); if (questLevelInTitle) AddQuestLevelToTitle(title, quest->GetQuestLevel()); text.QuestTitle = title; ++count; } } // Shrink to the real size packet.GossipText.resize(count); _session->SendPacket(packet.Write()); } void PlayerMenu::SendCloseGossip() { _gossipMenu.SetSenderGUID(ObjectGuid::Empty); WorldPackets::NPC::GossipComplete packet; _session->SendPacket(packet.Write()); } void PlayerMenu::SendPointOfInterest(uint32 id) const { PointOfInterest const* pointOfInterest = sObjectMgr->GetPointOfInterest(id); if (!pointOfInterest) { TC_LOG_ERROR("sql.sql", "Request to send non-existing PointOfInterest (Id: %u), ignored.", id); return; } std::string name = pointOfInterest->Name; LocaleConstant localeConstant = _session->GetSessionDbLocaleIndex(); if (localeConstant >= LOCALE_enUS) if (PointOfInterestLocale const* localeData = sObjectMgr->GetPointOfInterestLocale(id)) ObjectMgr::GetLocaleString(localeData->Name, localeConstant, name); WorldPackets::NPC::GossipPOI packet; packet.Flags = pointOfInterest->Flags; packet.Pos = pointOfInterest->Pos; packet.Icon = pointOfInterest->Icon; packet.Importance = pointOfInterest->Importance; packet.Name = name; _session->SendPacket(packet.Write()); } /*********************************************************/ /*** QUEST SYSTEM ***/ /*********************************************************/ QuestMenu::QuestMenu() { _questMenuItems.reserve(16); // can be set for max from most often sizes to speedup push_back and less memory use } QuestMenu::~QuestMenu() { ClearMenu(); } void QuestMenu::AddMenuItem(uint32 QuestId, uint8 Icon) { if (!sObjectMgr->GetQuestTemplate(QuestId)) return; ASSERT(_questMenuItems.size() <= GOSSIP_MAX_MENU_ITEMS); QuestMenuItem questMenuItem; questMenuItem.QuestId = QuestId; questMenuItem.QuestIcon = Icon; _questMenuItems.push_back(questMenuItem); } bool QuestMenu::HasItem(uint32 questId) const { for (QuestMenuItemList::const_iterator i = _questMenuItems.begin(); i != _questMenuItems.end(); ++i) if (i->QuestId == questId) return true; return false; } void QuestMenu::ClearMenu() { _questMenuItems.clear(); } void PlayerMenu::SendQuestGiverQuestList(ObjectGuid guid) { WorldPackets::Quest::QuestGiverQuestList questList; questList.QuestGiverGUID = guid; if (QuestGreeting const* questGreeting = sObjectMgr->GetQuestGreeting(guid)) { questList.GreetEmoteDelay = questGreeting->greetEmoteDelay; questList.GreetEmoteType = questGreeting->greetEmoteType; questList.Greeting = questGreeting->greeting; } else TC_LOG_ERROR("misc", "Guid: %s - No quest greeting found.", guid.ToString().c_str()); // Store this instead of checking the Singleton every loop iteration bool questLevelInTitle = sWorld->getBoolConfig(CONFIG_UI_QUESTLEVELS_IN_DIALOGS); for (uint32 i = 0; i < _questMenu.GetMenuItemCount(); ++i) { QuestMenuItem const& questMenuItem = _questMenu.GetItem(i); uint32 questID = questMenuItem.QuestId; if (Quest const* quest = sObjectMgr->GetQuestTemplate(questID)) { std::string title = quest->GetLogTitle(); LocaleConstant localeConstant = _session->GetSessionDbLocaleIndex(); if (localeConstant >= LOCALE_enUS) if (QuestTemplateLocale const* questTemplateLocale = sObjectMgr->GetQuestLocale(questID)) ObjectMgr::GetLocaleString(questTemplateLocale->LogTitle, localeConstant, title); if (questLevelInTitle) AddQuestLevelToTitle(title, quest->GetQuestLevel()); bool repeatable = false; // NYI questList.GossipTexts.push_back(WorldPackets::Quest::GossipTextData(questID, questMenuItem.QuestIcon, quest->GetQuestLevel(), quest->GetFlags(), quest->GetFlagsEx(), repeatable, title)); } } _session->SendPacket(questList.Write()); TC_LOG_DEBUG("network", "WORLD: Sent SMSG_QUEST_GIVER_QUEST_LIST_MESSAGE NPC=%s", guid.ToString().c_str()); } void PlayerMenu::SendQuestGiverStatus(uint32 questStatus, ObjectGuid npcGUID) const { WorldPackets::Quest::QuestGiverStatus packet; packet.QuestGiver.Guid = npcGUID; packet.QuestGiver.Status = questStatus; _session->SendPacket(packet.Write()); TC_LOG_DEBUG("network", "WORLD: Sent SMSG_QUESTGIVER_STATUS NPC=%s, status=%u", npcGUID.ToString().c_str(), questStatus); } void PlayerMenu::SendQuestGiverQuestDetails(Quest const* quest, ObjectGuid npcGUID, bool activateAccept) const { std::string questLogTitle = quest->GetLogTitle(); std::string questLogDescription = quest->GetLogDescription(); std::string questDescription = quest->GetQuestDescription(); std::string portraitGiverText = quest->GetPortraitGiverText(); std::string portraitGiverName = quest->GetPortraitGiverName(); std::string portraitTurnInText = quest->GetPortraitTurnInText(); std::string portraitTurnInName = quest->GetPortraitTurnInName(); LocaleConstant localeConstant = _session->GetSessionDbLocaleIndex(); if (localeConstant >= LOCALE_enUS) { if (QuestTemplateLocale const* questTemplateLocale = sObjectMgr->GetQuestLocale(quest->GetQuestId())) { ObjectMgr::GetLocaleString(questTemplateLocale->LogTitle, localeConstant, questLogTitle); ObjectMgr::GetLocaleString(questTemplateLocale->LogDescription, localeConstant, questLogDescription); ObjectMgr::GetLocaleString(questTemplateLocale->QuestDescription, localeConstant, questDescription); ObjectMgr::GetLocaleString(questTemplateLocale->PortraitGiverText, localeConstant, portraitGiverText); ObjectMgr::GetLocaleString(questTemplateLocale->PortraitGiverName, localeConstant, portraitGiverName); ObjectMgr::GetLocaleString(questTemplateLocale->PortraitTurnInText, localeConstant, portraitTurnInText); ObjectMgr::GetLocaleString(questTemplateLocale->PortraitTurnInName, localeConstant, portraitTurnInName); } } if (sWorld->getBoolConfig(CONFIG_UI_QUESTLEVELS_IN_DIALOGS)) AddQuestLevelToTitle(questLogTitle, quest->GetQuestLevel()); WorldPackets::Quest::QuestGiverQuestDetails packet; packet.QuestGiverGUID = npcGUID; packet.InformUnit = _session->GetPlayer()->GetDivider(); packet.QuestID = quest->GetQuestId(); packet.QuestTitle = questLogTitle; packet.LogDescription = questLogDescription; packet.DescriptionText = questDescription; packet.PortraitGiverText = portraitGiverText; packet.PortraitGiverName = portraitGiverName; packet.PortraitTurnInText = portraitTurnInText; packet.PortraitTurnInName = portraitTurnInName; packet.PortraitGiver = quest->GetQuestGiverPortrait(); packet.PortraitTurnIn = quest->GetQuestTurnInPortrait(); packet.DisplayPopup = activateAccept; packet.QuestFlags[0] = quest->GetFlags(); packet.QuestFlags[1] = quest->GetFlagsEx(); packet.SuggestedPartyMembers = quest->GetSuggestedPlayers(); if (quest->GetSrcSpell()) packet.LearnSpells.push_back(quest->GetSrcSpell()); quest->BuildQuestRewards(packet.Rewards, _session->GetPlayer()); packet.DescEmotes.resize(QUEST_EMOTE_COUNT); for (uint32 i = 0; i < QUEST_EMOTE_COUNT; ++i) { packet.DescEmotes[i].Type = quest->DetailsEmote[i]; packet.DescEmotes[i].Delay = quest->DetailsEmoteDelay[i]; } QuestObjectives const& objs = quest->GetObjectives(); packet.Objectives.resize(objs.size()); for (uint32 i = 0; i < objs.size(); ++i) { packet.Objectives[i].ID = objs[i].ID; packet.Objectives[i].ObjectID = objs[i].ObjectID; packet.Objectives[i].Amount = objs[i].Amount; packet.Objectives[i].Type = objs[i].Type; } _session->SendPacket(packet.Write()); TC_LOG_DEBUG("network", "WORLD: Sent SMSG_QUESTGIVER_QUEST_DETAILS NPC=%s, questid=%u", npcGUID.ToString().c_str(), quest->GetQuestId()); } void PlayerMenu::SendQuestQueryResponse(Quest const* quest) const { LocaleConstant localeConstant = _session->GetSessionDbLocaleIndex(); std::string questLogTitle = quest->GetLogTitle(); std::string questLogDescription = quest->GetLogDescription(); std::string questDescription = quest->GetQuestDescription(); std::string areaDescription = quest->GetAreaDescription(); std::string questCompletionLog = quest->GetQuestCompletionLog(); std::string portraitGiverText = quest->GetPortraitGiverText(); std::string portraitGiverName = quest->GetPortraitGiverName(); std::string portraitTurnInText = quest->GetPortraitTurnInText(); std::string portraitTurnInName = quest->GetPortraitTurnInName(); if (localeConstant >= LOCALE_enUS) { if (QuestTemplateLocale const* questTemplateLocale = sObjectMgr->GetQuestLocale(quest->GetQuestId())) { ObjectMgr::GetLocaleString(questTemplateLocale->LogTitle, localeConstant, questLogTitle); ObjectMgr::GetLocaleString(questTemplateLocale->LogDescription, localeConstant, questLogDescription); ObjectMgr::GetLocaleString(questTemplateLocale->QuestDescription, localeConstant, questDescription); ObjectMgr::GetLocaleString(questTemplateLocale->AreaDescription, localeConstant, areaDescription); ObjectMgr::GetLocaleString(questTemplateLocale->QuestCompletionLog, localeConstant, questCompletionLog); ObjectMgr::GetLocaleString(questTemplateLocale->PortraitGiverText, localeConstant, portraitGiverText); ObjectMgr::GetLocaleString(questTemplateLocale->PortraitGiverName, localeConstant, portraitGiverName); ObjectMgr::GetLocaleString(questTemplateLocale->PortraitTurnInText, localeConstant, portraitTurnInText); ObjectMgr::GetLocaleString(questTemplateLocale->PortraitTurnInName, localeConstant, portraitTurnInName); } } WorldPackets::Quest::QueryQuestInfoResponse packet; packet.Allow = true; packet.QuestID = quest->GetQuestId(); packet.Info.QuestID = quest->GetQuestId(); packet.Info.QuestType = quest->GetQuestType(); packet.Info.QuestLevel = quest->GetQuestLevel(); packet.Info.QuestPackageID = quest->GetQuestPackageID(); packet.Info.QuestMinLevel = quest->GetMinLevel(); packet.Info.QuestSortID = quest->GetZoneOrSort(); packet.Info.QuestInfoID = quest->GetQuestInfoID(); packet.Info.SuggestedGroupNum = quest->GetSuggestedPlayers(); packet.Info.RewardNextQuest = quest->GetNextQuestInChain(); packet.Info.RewardXPDifficulty = quest->GetXPDifficulty(); packet.Info.RewardXPMultiplier = quest->GetXPMultiplier(); if (!quest->HasFlag(QUEST_FLAGS_HIDDEN_REWARDS)) packet.Info.RewardMoney = quest->RewardMoney < 0 ? quest->RewardMoney : _session->GetPlayer()->GetQuestMoneyReward(quest); packet.Info.RewardMoneyDifficulty = quest->GetRewMoneyDifficulty(); packet.Info.RewardMoneyMultiplier = quest->GetMoneyMultiplier(); packet.Info.RewardBonusMoney = quest->GetRewMoneyMaxLevel(); packet.Info.RewardDisplaySpell = quest->GetRewDisplaySpell(); packet.Info.RewardSpell = quest->GetRewSpell(); packet.Info.RewardHonor = quest->GetRewHonor(); packet.Info.RewardKillHonor = quest->GetRewKillHonor(); packet.Info.StartItem = quest->GetSrcItemId(); packet.Info.Flags = quest->GetFlags(); packet.Info.FlagsEx = quest->GetFlagsEx(); packet.Info.RewardTitle = quest->GetRewTitle(); packet.Info.RewardTalents = quest->GetBonusTalents(); packet.Info.RewardArenaPoints = quest->GetRewArenaPoints(); packet.Info.RewardSkillLineID = quest->GetRewardSkillId(); packet.Info.RewardNumSkillUps = quest->GetRewardSkillPoints(); packet.Info.RewardFactionFlags = quest->GetRewardReputationMask(); packet.Info.PortraitGiver = quest->GetQuestGiverPortrait(); packet.Info.PortraitTurnIn = quest->GetQuestTurnInPortrait(); for (uint8 i = 0; i < QUEST_ITEM_DROP_COUNT; ++i) { packet.Info.ItemDrop[i] = quest->ItemDrop[i]; packet.Info.ItemDropQuantity[i] = quest->ItemDropQuantity[i]; } if (!quest->HasFlag(QUEST_FLAGS_HIDDEN_REWARDS)) { for (uint8 i = 0; i < QUEST_REWARD_ITEM_COUNT; ++i) { packet.Info.RewardItems[i] = quest->RewardItemId[i]; packet.Info.RewardAmount[i] = quest->RewardItemCount[i]; } for (uint8 i = 0; i < QUEST_REWARD_CHOICES_COUNT; ++i) { packet.Info.UnfilteredChoiceItems[i].ItemID = quest->RewardChoiceItemId[i]; packet.Info.UnfilteredChoiceItems[i].Quantity = quest->RewardChoiceItemCount[i]; } } for (uint8 i = 0; i < QUEST_REWARD_REPUTATIONS_COUNT; ++i) { packet.Info.RewardFactionID[i] = quest->RewardFactionId[i]; packet.Info.RewardFactionValue[i] = quest->RewardFactionValue[i]; packet.Info.RewardFactionOverride[i] = quest->RewardFactionOverride[i]; } packet.Info.POIContinent = quest->GetPOIContinent(); packet.Info.POIx = quest->GetPOIx(); packet.Info.POIy = quest->GetPOIy(); packet.Info.POIPriority = quest->GetPOIPriority(); if (sWorld->getBoolConfig(CONFIG_UI_QUESTLEVELS_IN_DIALOGS)) AddQuestLevelToTitle(questLogTitle, quest->GetQuestLevel()); packet.Info.LogTitle = questLogTitle; packet.Info.LogDescription = questLogDescription; packet.Info.QuestDescription = questDescription; packet.Info.AreaDescription = areaDescription; packet.Info.QuestCompletionLog = questCompletionLog; packet.Info.AllowableRaces = quest->GetAllowableRaces(); for (QuestObjective const& questObjective : quest->GetObjectives()) { packet.Info.Objectives.push_back(questObjective); if (localeConstant >= LOCALE_enUS) { if (QuestObjectivesLocale const* questObjectivesLocale = sObjectMgr->GetQuestObjectivesLocale(questObjective.ID)) ObjectMgr::GetLocaleString(questObjectivesLocale->Description, localeConstant, packet.Info.Objectives.back().Description); } } for (uint32 i = 0; i < QUEST_REWARD_CURRENCY_COUNT; ++i) { packet.Info.RewardCurrencyID[i] = quest->RewardCurrencyId[i]; packet.Info.RewardCurrencyQty[i] = quest->RewardCurrencyCount[i]; } packet.Info.PortraitGiverText = portraitGiverText; packet.Info.PortraitGiverName = portraitGiverName; packet.Info.PortraitTurnInText = portraitTurnInText; packet.Info.PortraitTurnInName = portraitTurnInName; packet.Info.AcceptedSoundKitID = quest->GetSoundAccept(); packet.Info.CompleteSoundKitID = quest->GetSoundTurnIn(); packet.Info.AreaGroupID = quest->GetAreaGroupID(); packet.Info.TimeAllowed = quest->GetLimitTime(); _session->SendPacket(packet.Write()); TC_LOG_DEBUG("network", "WORLD: Sent SMSG_QUEST_QUERY_RESPONSE questid=%u", quest->GetQuestId()); } void PlayerMenu::SendQuestGiverOfferReward(Quest const* quest, ObjectGuid npcGUID, bool enableNext) const { std::string questTitle = quest->GetLogTitle(); std::string questOfferRewardText = quest->GetOfferRewardText(); std::string portraitGiverText = quest->GetPortraitGiverText(); std::string portraitGiverName = quest->GetPortraitGiverName(); std::string portraitTurnInText = quest->GetPortraitTurnInText(); std::string portraitTurnInName = quest->GetPortraitTurnInName(); LocaleConstant locale = _session->GetSessionDbLocaleIndex(); if (locale >= LOCALE_enUS) { if (QuestTemplateLocale const* questTemplateLocale = sObjectMgr->GetQuestLocale(quest->GetQuestId())) { ObjectMgr::GetLocaleString(questTemplateLocale->LogTitle, locale, questTitle); ObjectMgr::GetLocaleString(questTemplateLocale->OfferRewardText, locale, questOfferRewardText); ObjectMgr::GetLocaleString(questTemplateLocale->PortraitGiverText, locale, portraitGiverText); ObjectMgr::GetLocaleString(questTemplateLocale->PortraitGiverName, locale, portraitGiverName); ObjectMgr::GetLocaleString(questTemplateLocale->PortraitTurnInText, locale, portraitTurnInText); ObjectMgr::GetLocaleString(questTemplateLocale->PortraitTurnInName, locale, portraitTurnInName); } } if (sWorld->getBoolConfig(CONFIG_UI_QUESTLEVELS_IN_DIALOGS)) AddQuestLevelToTitle(questTitle, quest->GetQuestLevel()); WorldPackets::Quest::QuestGiverOfferRewardMessage packet; WorldPackets::Quest::QuestGiverOfferReward& offer = packet.QuestData; quest->BuildQuestRewards(offer.Rewards, _session->GetPlayer()); offer.QuestGiverGUID = npcGUID; // Is there a better way? what about game objects? if (Creature const* creature = sObjectAccessor->GetCreature(*_session->GetPlayer(), npcGUID)) offer.QuestGiverCreatureID = creature->GetCreatureTemplate()->Entry; offer.QuestID = quest->GetQuestId(); offer.AutoLaunched = enableNext; offer.SuggestedPartyMembers = quest->GetSuggestedPlayers(); for (uint32 i = 0; i < QUEST_EMOTE_COUNT && quest->OfferRewardEmote[i]; ++i) offer.Emotes.push_back(WorldPackets::Quest::QuestDescEmote(quest->OfferRewardEmote[i], quest->OfferRewardEmoteDelay[i])); offer.QuestFlags[0] = quest->GetFlags(); offer.QuestFlags[1] = quest->GetFlagsEx(); packet.QuestTitle = questTitle; packet.RewardText = questOfferRewardText; packet.PortraitTurnIn = quest->GetQuestTurnInPortrait(); packet.PortraitGiver = quest->GetQuestGiverPortrait(); packet.PortraitGiverText = portraitGiverText; packet.PortraitGiverName = portraitGiverName; packet.PortraitTurnInText = portraitTurnInText; packet.PortraitTurnInName = portraitTurnInName; packet.QuestPackageID = quest->GetQuestPackageID(); _session->SendPacket(packet.Write()); TC_LOG_DEBUG("network", "WORLD: Sent SMSG_QUESTGIVER_OFFER_REWARD NPC=%s, questid=%u", npcGUID.ToString().c_str(), quest->GetQuestId()); } void PlayerMenu::SendQuestGiverRequestItems(Quest const* quest, ObjectGuid npcGUID, bool canComplete, bool closeOnCancel) const { // We can always call to RequestItems, but this packet only goes out if there are actually // items. Otherwise, we'll skip straight to the OfferReward std::string questTitle = quest->GetLogTitle(); std::string requestItemsText = quest->GetRequestItemsText(); LocaleConstant locale = _session->GetSessionDbLocaleIndex(); if (locale >= LOCALE_enUS) { if (QuestTemplateLocale const* questTemplateLocale = sObjectMgr->GetQuestLocale(quest->GetQuestId())) { ObjectMgr::GetLocaleString(questTemplateLocale->LogTitle, locale, questTitle); ObjectMgr::GetLocaleString(questTemplateLocale->RequestItemsText, locale, requestItemsText); } } if (!quest->HasSpecialFlag(QUEST_SPECIAL_FLAGS_DELIVER) && canComplete) { SendQuestGiverOfferReward(quest, npcGUID, true); return; } if (sWorld->getBoolConfig(CONFIG_UI_QUESTLEVELS_IN_DIALOGS)) AddQuestLevelToTitle(questTitle, quest->GetQuestLevel()); WorldPackets::Quest::QuestGiverRequestItems packet; packet.QuestGiverGUID = npcGUID; // Is there a better way? what about game objects? if (Creature const* creature = sObjectAccessor->GetCreature(*_session->GetPlayer(), npcGUID)) packet.QuestGiverCreatureID = creature->GetCreatureTemplate()->Entry; packet.QuestID = quest->GetQuestId(); if (canComplete) { packet.CompEmoteDelay = quest->EmoteOnCompleteDelay; packet.CompEmoteType = quest->EmoteOnComplete; } else { packet.CompEmoteDelay = quest->EmoteOnIncompleteDelay; packet.CompEmoteType = quest->EmoteOnIncomplete; } packet.QuestFlags[0] = quest->GetFlags(); packet.QuestFlags[1] = quest->GetFlagsEx(); packet.SuggestPartyMembers = quest->GetSuggestedPlayers(); packet.StatusFlags = 0xDF; // Unk, send common value packet.MoneyToGet = 0; for (QuestObjective const& obj : quest->GetObjectives()) { switch (obj.Type) { case QUEST_OBJECTIVE_ITEM: packet.Collect.push_back(WorldPackets::Quest::QuestObjectiveCollect(obj.ObjectID, obj.Amount)); break; case QUEST_OBJECTIVE_CURRENCY: packet.Currency.push_back(WorldPackets::Quest::QuestCurrency(obj.ObjectID, obj.Amount)); break; case QUEST_OBJECTIVE_MONEY: packet.MoneyToGet += obj.Amount; break; default: break; } } packet.AutoLaunched = closeOnCancel; packet.QuestTitle = questTitle; packet.CompletionText = requestItemsText; _session->SendPacket(packet.Write()); TC_LOG_DEBUG("network", "WORLD: Sent SMSG_QUESTGIVER_REQUEST_ITEMS NPC=%s, questid=%u", npcGUID.ToString().c_str(), quest->GetQuestId()); } void PlayerMenu::AddQuestLevelToTitle(std::string &title, int32 level) { // Adds the quest level to the front of the quest title // example: [13] Westfall Stew std::stringstream questTitlePretty; questTitlePretty << "[" << level << "] " << title; title = questTitlePretty.str(); }
gpl-2.0
liquidware/liquidware_beagleboard_linux
drivers/hwmon/coretemp.c
6
14238
/* * coretemp.c - Linux kernel module for hardware monitoring * * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz> * * Inspired from many hwmon drivers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/hwmon.h> #include <linux/sysfs.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/platform_device.h> #include <linux/cpu.h> #include <linux/pci.h> #include <asm/msr.h> #include <asm/processor.h> #define DRVNAME "coretemp" typedef enum { SHOW_TEMP, SHOW_TJMAX, SHOW_TTARGET, SHOW_LABEL, SHOW_NAME } SHOW; /* * Functions declaration */ static struct coretemp_data *coretemp_update_device(struct device *dev); struct coretemp_data { struct device *hwmon_dev; struct mutex update_lock; const char *name; u32 id; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ int temp; int tjmax; int ttarget; u8 alarm; }; /* * Sysfs stuff */ static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { int ret; struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct coretemp_data *data = dev_get_drvdata(dev); if (attr->index == SHOW_NAME) ret = sprintf(buf, "%s\n", data->name); else /* show label */ ret = sprintf(buf, "Core %d\n", data->id); return ret; } static ssize_t show_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct coretemp_data *data = coretemp_update_device(dev); /* read the Out-of-spec log, never clear */ return sprintf(buf, "%d\n", data->alarm); } static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct coretemp_data *data = coretemp_update_device(dev); int err; if (attr->index == SHOW_TEMP) err = data->valid ? sprintf(buf, "%d\n", data->temp) : -EAGAIN; else if (attr->index == SHOW_TJMAX) err = sprintf(buf, "%d\n", data->tjmax); else err = sprintf(buf, "%d\n", data->ttarget); return err; } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, SHOW_TEMP); static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp, NULL, SHOW_TJMAX); static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, NULL, SHOW_TTARGET); static DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL); static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL); static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME); static struct attribute *coretemp_attributes[] = { &sensor_dev_attr_name.dev_attr.attr, &sensor_dev_attr_temp1_label.dev_attr.attr, &dev_attr_temp1_crit_alarm.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, NULL }; static const struct attribute_group coretemp_group = { .attrs = coretemp_attributes, }; static struct coretemp_data *coretemp_update_device(struct device *dev) { struct coretemp_data *data = dev_get_drvdata(dev); mutex_lock(&data->update_lock); if (!data->valid || time_after(jiffies, data->last_updated + HZ)) { u32 eax, edx; data->valid = 0; rdmsr_on_cpu(data->id, MSR_IA32_THERM_STATUS, &eax, &edx); data->alarm = (eax >> 5) & 1; /* update only if data has been valid */ if (eax & 0x80000000) { data->temp = data->tjmax - (((eax >> 16) & 0x7f) * 1000); data->valid = 1; } else { dev_dbg(dev, "Temperature data invalid (0x%x)\n", eax); } data->last_updated = jiffies; } mutex_unlock(&data->update_lock); return data; } static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) { /* The 100C is default for both mobile and non mobile CPUs */ int tjmax = 100000; int tjmax_ee = 85000; int usemsr_ee = 1; int err; u32 eax, edx; struct pci_dev *host_bridge; /* Early chips have no MSR for TjMax */ if ((c->x86_model == 0xf) && (c->x86_mask < 4)) { usemsr_ee = 0; } /* Atom CPUs */ if (c->x86_model == 0x1c) { usemsr_ee = 0; host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL && (host_bridge->device == 0xa000 /* NM10 based nettop */ || host_bridge->device == 0xa010)) /* NM10 based netbook */ tjmax = 100000; else tjmax = 90000; pci_dev_put(host_bridge); } if ((c->x86_model > 0xe) && (usemsr_ee)) { u8 platform_id; /* Now we can detect the mobile CPU using Intel provided table http://softwarecommunity.intel.com/Wiki/Mobility/720.htm For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU */ err = rdmsr_safe_on_cpu(id, 0x17, &eax, &edx); if (err) { dev_warn(dev, "Unable to access MSR 0x17, assuming desktop" " CPU\n"); usemsr_ee = 0; } else if (c->x86_model < 0x17 && !(eax & 0x10000000)) { /* Trust bit 28 up to Penryn, I could not find any documentation on that; if you happen to know someone at Intel please ask */ usemsr_ee = 0; } else { /* Platform ID bits 52:50 (EDX starts at bit 32) */ platform_id = (edx >> 18) & 0x7; /* Mobile Penryn CPU seems to be platform ID 7 or 5 (guesswork) */ if ((c->x86_model == 0x17) && ((platform_id == 5) || (platform_id == 7))) { /* If MSR EE bit is set, set it to 90 degrees C, otherwise 105 degrees C */ tjmax_ee = 90000; tjmax = 105000; } } } if (usemsr_ee) { err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx); if (err) { dev_warn(dev, "Unable to access MSR 0xEE, for Tjmax, left" " at default\n"); } else if (eax & 0x40000000) { tjmax = tjmax_ee; } /* if we dont use msr EE it means we are desktop CPU (with exeception of Atom) */ } else if (tjmax == 100000) { dev_warn(dev, "Using relative temperature scale!\n"); } return tjmax; } static int __devinit get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) { /* The 100C is default for both mobile and non mobile CPUs */ int err; u32 eax, edx; u32 val; /* A new feature of current Intel(R) processors, the IA32_TEMPERATURE_TARGET contains the TjMax value */ err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); if (err) { dev_warn(dev, "Unable to read TjMax from CPU.\n"); } else { val = (eax >> 16) & 0xff; /* * If the TjMax is not plausible, an assumption * will be used */ if ((val > 80) && (val < 120)) { dev_info(dev, "TjMax is %d C.\n", val); return val * 1000; } } /* * An assumption is made for early CPUs and unreadable MSR. * NOTE: the given value may not be correct. */ switch (c->x86_model) { case 0xe: case 0xf: case 0x16: case 0x1a: dev_warn(dev, "TjMax is assumed as 100 C!\n"); return 100000; break; case 0x17: case 0x1c: /* Atom CPUs */ return adjust_tjmax(c, id, dev); break; default: dev_warn(dev, "CPU (model=0x%x) is not supported yet," " using default TjMax of 100C.\n", c->x86_model); return 100000; } } static int __devinit coretemp_probe(struct platform_device *pdev) { struct coretemp_data *data; struct cpuinfo_x86 *c = &cpu_data(pdev->id); int err; u32 eax, edx; if (!(data = kzalloc(sizeof(struct coretemp_data), GFP_KERNEL))) { err = -ENOMEM; dev_err(&pdev->dev, "Out of memory\n"); goto exit; } data->id = pdev->id; data->name = "coretemp"; mutex_init(&data->update_lock); /* test if we can access the THERM_STATUS MSR */ err = rdmsr_safe_on_cpu(data->id, MSR_IA32_THERM_STATUS, &eax, &edx); if (err) { dev_err(&pdev->dev, "Unable to access THERM_STATUS MSR, giving up\n"); goto exit_free; } /* Check if we have problem with errata AE18 of Core processors: Readings might stop update when processor visited too deep sleep, fixed for stepping D0 (6EC). */ if ((c->x86_model == 0xe) && (c->x86_mask < 0xc)) { /* check for microcode update */ rdmsr_on_cpu(data->id, MSR_IA32_UCODE_REV, &eax, &edx); if (edx < 0x39) { err = -ENODEV; dev_err(&pdev->dev, "Errata AE18 not fixed, update BIOS or " "microcode of the CPU!\n"); goto exit_free; } } data->tjmax = get_tjmax(c, data->id, &pdev->dev); platform_set_drvdata(pdev, data); /* * read the still undocumented IA32_TEMPERATURE_TARGET. It exists * on older CPUs but not in this register, * Atoms don't have it either. */ if ((c->x86_model > 0xe) && (c->x86_model != 0x1c)) { err = rdmsr_safe_on_cpu(data->id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); if (err) { dev_warn(&pdev->dev, "Unable to read" " IA32_TEMPERATURE_TARGET MSR\n"); } else { data->ttarget = data->tjmax - (((eax >> 8) & 0xff) * 1000); err = device_create_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); if (err) goto exit_free; } } if ((err = sysfs_create_group(&pdev->dev.kobj, &coretemp_group))) goto exit_dev; data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); dev_err(&pdev->dev, "Class registration failed (%d)\n", err); goto exit_class; } return 0; exit_class: sysfs_remove_group(&pdev->dev.kobj, &coretemp_group); exit_dev: device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); exit_free: kfree(data); exit: return err; } static int __devexit coretemp_remove(struct platform_device *pdev) { struct coretemp_data *data = platform_get_drvdata(pdev); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &coretemp_group); device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); platform_set_drvdata(pdev, NULL); kfree(data); return 0; } static struct platform_driver coretemp_driver = { .driver = { .owner = THIS_MODULE, .name = DRVNAME, }, .probe = coretemp_probe, .remove = __devexit_p(coretemp_remove), }; struct pdev_entry { struct list_head list; struct platform_device *pdev; unsigned int cpu; }; static LIST_HEAD(pdev_list); static DEFINE_MUTEX(pdev_list_mutex); static int __cpuinit coretemp_device_add(unsigned int cpu) { int err; struct platform_device *pdev; struct pdev_entry *pdev_entry; pdev = platform_device_alloc(DRVNAME, cpu); if (!pdev) { err = -ENOMEM; printk(KERN_ERR DRVNAME ": Device allocation failed\n"); goto exit; } pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL); if (!pdev_entry) { err = -ENOMEM; goto exit_device_put; } err = platform_device_add(pdev); if (err) { printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n", err); goto exit_device_free; } pdev_entry->pdev = pdev; pdev_entry->cpu = cpu; mutex_lock(&pdev_list_mutex); list_add_tail(&pdev_entry->list, &pdev_list); mutex_unlock(&pdev_list_mutex); return 0; exit_device_free: kfree(pdev_entry); exit_device_put: platform_device_put(pdev); exit: return err; } #ifdef CONFIG_HOTPLUG_CPU static void coretemp_device_remove(unsigned int cpu) { struct pdev_entry *p, *n; mutex_lock(&pdev_list_mutex); list_for_each_entry_safe(p, n, &pdev_list, list) { if (p->cpu == cpu) { platform_device_unregister(p->pdev); list_del(&p->list); kfree(p); } } mutex_unlock(&pdev_list_mutex); } static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long) hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: coretemp_device_add(cpu); break; case CPU_DOWN_PREPARE: coretemp_device_remove(cpu); break; } return NOTIFY_OK; } static struct notifier_block coretemp_cpu_notifier __refdata = { .notifier_call = coretemp_cpu_callback, }; #endif /* !CONFIG_HOTPLUG_CPU */ static int __init coretemp_init(void) { int i, err = -ENODEV; struct pdev_entry *p, *n; /* quick check if we run Intel */ if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL) goto exit; err = platform_driver_register(&coretemp_driver); if (err) goto exit; for_each_online_cpu(i) { struct cpuinfo_x86 *c = &cpu_data(i); /* * CPUID.06H.EAX[0] indicates whether the CPU has thermal * sensors. We check this bit only, all the early CPUs * without thermal sensors will be filtered out. */ if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01)) { err = coretemp_device_add(i); if (err) goto exit_devices_unreg; } else { printk(KERN_INFO DRVNAME ": CPU (model=0x%x)" " has no thermal sensor.\n", c->x86_model); } } if (list_empty(&pdev_list)) { err = -ENODEV; goto exit_driver_unreg; } #ifdef CONFIG_HOTPLUG_CPU register_hotcpu_notifier(&coretemp_cpu_notifier); #endif return 0; exit_devices_unreg: mutex_lock(&pdev_list_mutex); list_for_each_entry_safe(p, n, &pdev_list, list) { platform_device_unregister(p->pdev); list_del(&p->list); kfree(p); } mutex_unlock(&pdev_list_mutex); exit_driver_unreg: platform_driver_unregister(&coretemp_driver); exit: return err; } static void __exit coretemp_exit(void) { struct pdev_entry *p, *n; #ifdef CONFIG_HOTPLUG_CPU unregister_hotcpu_notifier(&coretemp_cpu_notifier); #endif mutex_lock(&pdev_list_mutex); list_for_each_entry_safe(p, n, &pdev_list, list) { platform_device_unregister(p->pdev); list_del(&p->list); kfree(p); } mutex_unlock(&pdev_list_mutex); platform_driver_unregister(&coretemp_driver); } MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>"); MODULE_DESCRIPTION("Intel Core temperature monitor"); MODULE_LICENSE("GPL"); module_init(coretemp_init) module_exit(coretemp_exit)
gpl-2.0
pauloborges/bluez
profiles/sap/manager.c
6
1719
/* * BlueZ - Bluetooth protocol stack for Linux * * Copyright (C) 2010 Instituto Nokia de Tecnologia - INdT * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <stdbool.h> #include "log.h" #include "adapter.h" #include "device.h" #include "profile.h" #include "service.h" #include "manager.h" #include "server.h" static int sap_server_probe(struct btd_profile *p, struct btd_adapter *adapter) { DBG("path %s", adapter_get_path(adapter)); return sap_server_register(adapter); } static void sap_server_remove(struct btd_profile *p, struct btd_adapter *adapter) { const char *path = adapter_get_path(adapter); DBG("path %s", path); sap_server_unregister(path); } static struct btd_profile sap_profile = { .name = "sap-server", .adapter_probe = sap_server_probe, .adapter_remove = sap_server_remove, }; int sap_manager_init(void) { btd_profile_register(&sap_profile); return 0; } void sap_manager_exit(void) { btd_profile_unregister(&sap_profile); }
gpl-2.0
ihacklog/strongswan
src/frontends/android/jni/libandroidbridge/kernel/android_net.c
6
2808
/* * Copyright (C) 2012-2013 Tobias Brunner * Hochschule fuer Technik Rapperswil * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. */ #include "android_net.h" #include "../charonservice.h" #include <hydra.h> #include <processing/jobs/callback_job.h> #include <threading/mutex.h> /** delay before firing roam events (ms) */ #define ROAM_DELAY 100 typedef struct private_android_net_t private_android_net_t; struct private_android_net_t { /** * Public kernel interface */ android_net_t public; /** * Reference to NetworkManager object */ network_manager_t *network_manager; /** * earliest time of the next roam event */ timeval_t next_roam; /** * mutex to check and update roam event time */ mutex_t *mutex; }; /** * callback function that raises the delayed roam event */ static job_requeue_t roam_event() { /* this will fail if no connection is up */ charonservice->bypass_socket(charonservice, -1, 0); hydra->kernel_interface->roam(hydra->kernel_interface, TRUE); return JOB_REQUEUE_NONE; } /** * Listen for connectivity change events and queue a roam event */ static void connectivity_cb(private_android_net_t *this, bool disconnected) { timeval_t now; job_t *job; time_monotonic(&now); this->mutex->lock(this->mutex); if (!timercmp(&now, &this->next_roam, >)) { this->mutex->unlock(this->mutex); return; } timeval_add_ms(&now, ROAM_DELAY); this->next_roam = now; this->mutex->unlock(this->mutex); job = (job_t*)callback_job_create((callback_job_cb_t)roam_event, NULL, NULL, NULL); lib->scheduler->schedule_job_ms(lib->scheduler, job, ROAM_DELAY); } METHOD(android_net_t, destroy, void, private_android_net_t *this) { this->network_manager->remove_connectivity_cb(this->network_manager, (void*)connectivity_cb); this->mutex->destroy(this->mutex); free(this); } /* * Described in header. */ android_net_t *android_net_create() { private_android_net_t *this; INIT(this, .public = { .destroy = _destroy, }, .mutex = mutex_create(MUTEX_TYPE_DEFAULT), .network_manager = charonservice->get_network_manager(charonservice), ); timerclear(&this->next_roam); this->network_manager->add_connectivity_cb(this->network_manager, (void*)connectivity_cb, this); return &this->public; };
gpl-2.0
Lprigara/KernelLinuxRaspberry
drivers/scsi/megaraid/megaraid_sas_base.c
6
153829
/* * Linux MegaRAID driver for SAS based RAID controllers * * Copyright (c) 2003-2012 LSI Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * FILE: megaraid_sas_base.c * Version : 06.700.06.00-rc1 * * Authors: LSI Corporation * Sreenivas Bagalkote * Sumant Patro * Bo Yang * Adam Radford <linuxraid@lsi.com> * * Send feedback to: <megaraidlinux@lsi.com> * * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 * ATTN: Linuxraid */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/list.h> #include <linux/moduleparam.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/uio.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/fs.h> #include <linux/compat.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/poll.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "megaraid_sas_fusion.h" #include "megaraid_sas.h" /* * Number of sectors per IO command * Will be set in megasas_init_mfi if user does not provide */ static unsigned int max_sectors; module_param_named(max_sectors, max_sectors, int, 0); MODULE_PARM_DESC(max_sectors, "Maximum number of sectors per IO command"); static int msix_disable; module_param(msix_disable, int, S_IRUGO); MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); static unsigned int msix_vectors; module_param(msix_vectors, int, S_IRUGO); MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; module_param(throttlequeuedepth, int, S_IRUGO); MODULE_PARM_DESC(throttlequeuedepth, "Adapter queue depth when throttled due to I/O timeout. Default: 16"); int resetwaittime = MEGASAS_RESET_WAIT_TIME; module_param(resetwaittime, int, S_IRUGO); MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout " "before resetting adapter. Default: 180"); MODULE_LICENSE("GPL"); MODULE_VERSION(MEGASAS_VERSION); MODULE_AUTHOR("megaraidlinux@lsi.com"); MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); static int megasas_get_pd_list(struct megasas_instance *instance); static int megasas_ld_list_query(struct megasas_instance *instance, u8 query_type); static int megasas_issue_init_mfi(struct megasas_instance *instance); static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word); /* * PCI ID table for all supported controllers */ static struct pci_device_id megasas_pci_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, /* xscale IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, /* ppc IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, /* ppc IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, /* gen2*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, /* gen2*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, /* skinny*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, /* skinny*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, /* xscale IOP, vega */ {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, /* xscale IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, /* Fusion */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, /* Invader */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, /* Fury */ {} }; MODULE_DEVICE_TABLE(pci, megasas_pci_table); static int megasas_mgmt_majorno; static struct megasas_mgmt_info megasas_mgmt_info; static struct fasync_struct *megasas_async_queue; static DEFINE_MUTEX(megasas_async_queue_mutex); static int megasas_poll_wait_aen; static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); static u32 support_poll_for_event; u32 megasas_dbg_lvl; static u32 support_device_change; /* define lock for aen poll */ spinlock_t poll_aen_lock; void megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status); static u32 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs); static int megasas_adp_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem *reg_set); static irqreturn_t megasas_isr(int irq, void *devp); static u32 megasas_init_adapter_mfi(struct megasas_instance *instance); u32 megasas_build_and_issue_cmd(struct megasas_instance *instance, struct scsi_cmnd *scmd); static void megasas_complete_cmd_dpc(unsigned long instance_addr); void megasas_release_fusion(struct megasas_instance *instance); int megasas_ioc_init_fusion(struct megasas_instance *instance); void megasas_free_cmds_fusion(struct megasas_instance *instance); u8 megasas_get_map_info(struct megasas_instance *instance); int megasas_sync_map_info(struct megasas_instance *instance); int wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd); void megasas_reset_reply_desc(struct megasas_instance *instance); int megasas_reset_fusion(struct Scsi_Host *shost); void megasas_fusion_ocr_wq(struct work_struct *work); void megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); } /** * megasas_get_cmd - Get a command from the free pool * @instance: Adapter soft state * * Returns a free command from the pool */ struct megasas_cmd *megasas_get_cmd(struct megasas_instance *instance) { unsigned long flags; struct megasas_cmd *cmd = NULL; spin_lock_irqsave(&instance->cmd_pool_lock, flags); if (!list_empty(&instance->cmd_pool)) { cmd = list_entry((&instance->cmd_pool)->next, struct megasas_cmd, list); list_del_init(&cmd->list); } else { printk(KERN_ERR "megasas: Command pool empty!\n"); } spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); return cmd; } /** * megasas_return_cmd - Return a cmd to free command pool * @instance: Adapter soft state * @cmd: Command packet to be returned to free command pool */ inline void megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { unsigned long flags; spin_lock_irqsave(&instance->cmd_pool_lock, flags); cmd->scmd = NULL; cmd->frame_count = 0; if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) && (reset_devices)) cmd->frame->hdr.cmd = MFI_CMD_INVALID; list_add_tail(&cmd->list, &instance->cmd_pool); spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); } /** * The following functions are defined for xscale * (deviceid : 1064R, PERC5) controllers */ /** * megasas_enable_intr_xscale - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_xscale(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; regs = instance->reg_set; writel(0, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_xscale -Disables interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_xscale(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; u32 mask = 0x1f; regs = instance->reg_set; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_xscale - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs) { return readl(&(regs)->outbound_msg_0); } /** * megasas_clear_interrupt_xscale - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) { u32 status; u32 mfiStatus = 0; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (status & MFI_OB_INTR_STATUS_MASK) mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; /* * Clear the interrupt by writing back the same value */ if (mfiStatus) writel(status, &regs->outbound_intr_status); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_status); return mfiStatus; } /** * megasas_fire_cmd_xscale - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_xscale(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel((frame_phys_addr >> 3)|(frame_count), &(regs)->inbound_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_adp_reset_xscale - For controller reset * @regs: MFI register set */ static int megasas_adp_reset_xscale(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { u32 i; u32 pcidata; writel(MFI_ADP_RESET, &regs->inbound_doorbell); for (i = 0; i < 3; i++) msleep(1000); /* sleep for 3 secs */ pcidata = 0; pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); printk(KERN_NOTICE "pcidata = %x\n", pcidata); if (pcidata & 0x2) { printk(KERN_NOTICE "mfi 1068 offset read=%x\n", pcidata); pcidata &= ~0x2; pci_write_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, pcidata); for (i = 0; i < 2; i++) msleep(1000); /* need to wait 2 secs again */ pcidata = 0; pci_read_config_dword(instance->pdev, MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); printk(KERN_NOTICE "1068 offset handshake read=%x\n", pcidata); if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { printk(KERN_NOTICE "1068 offset pcidt=%x\n", pcidata); pcidata = 0; pci_write_config_dword(instance->pdev, MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); } } return 0; } /** * megasas_check_reset_xscale - For controller reset check * @regs: MFI register set */ static int megasas_check_reset_xscale(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && (le32_to_cpu(*instance->consumer) == MEGASAS_ADPRESET_INPROG_SIGN)) return 1; return 0; } static struct megasas_instance_template megasas_instance_template_xscale = { .fire_cmd = megasas_fire_cmd_xscale, .enable_intr = megasas_enable_intr_xscale, .disable_intr = megasas_disable_intr_xscale, .clear_intr = megasas_clear_intr_xscale, .read_fw_status_reg = megasas_read_fw_status_reg_xscale, .adp_reset = megasas_adp_reset_xscale, .check_reset = megasas_check_reset_xscale, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /** * This is the end of set of functions & definitions specific * to xscale (deviceid : 1064R, PERC5) controllers */ /** * The following functions are defined for ppc (deviceid : 0x60) * controllers */ /** * megasas_enable_intr_ppc - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_ppc(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; regs = instance->reg_set; writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); writel(~0x80000000, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_ppc - Disable interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_ppc(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; u32 mask = 0xFFFFFFFF; regs = instance->reg_set; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_ppc - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs) { return readl(&(regs)->outbound_scratch_pad); } /** * megasas_clear_interrupt_ppc - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) { u32 status, mfiStatus = 0; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; /* * Clear the interrupt by writing back the same value */ writel(status, &regs->outbound_doorbell_clear); /* Dummy readl to force pci flush */ readl(&regs->outbound_doorbell_clear); return mfiStatus; } /** * megasas_fire_cmd_ppc - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_ppc(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_check_reset_ppc - For controller reset check * @regs: MFI register set */ static int megasas_check_reset_ppc(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) return 1; return 0; } static struct megasas_instance_template megasas_instance_template_ppc = { .fire_cmd = megasas_fire_cmd_ppc, .enable_intr = megasas_enable_intr_ppc, .disable_intr = megasas_disable_intr_ppc, .clear_intr = megasas_clear_intr_ppc, .read_fw_status_reg = megasas_read_fw_status_reg_ppc, .adp_reset = megasas_adp_reset_xscale, .check_reset = megasas_check_reset_ppc, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /** * megasas_enable_intr_skinny - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_skinny(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; regs = instance->reg_set; writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_skinny - Disables interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_skinny(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; u32 mask = 0xFFFFFFFF; regs = instance->reg_set; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_skinny - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs) { return readl(&(regs)->outbound_scratch_pad); } /** * megasas_clear_interrupt_skinny - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) { u32 status; u32 mfiStatus = 0; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { return 0; } /* * Check if it is our interrupt */ if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) == MFI_STATE_FAULT) { mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; } else mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; /* * Clear the interrupt by writing back the same value */ writel(status, &regs->outbound_intr_status); /* * dummy read to flush PCI */ readl(&regs->outbound_intr_status); return mfiStatus; } /** * megasas_fire_cmd_skinny - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_skinny(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel(upper_32_bits(frame_phys_addr), &(regs)->inbound_high_queue_port); writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, &(regs)->inbound_low_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_check_reset_skinny - For controller reset check * @regs: MFI register set */ static int megasas_check_reset_skinny(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) return 1; return 0; } static struct megasas_instance_template megasas_instance_template_skinny = { .fire_cmd = megasas_fire_cmd_skinny, .enable_intr = megasas_enable_intr_skinny, .disable_intr = megasas_disable_intr_skinny, .clear_intr = megasas_clear_intr_skinny, .read_fw_status_reg = megasas_read_fw_status_reg_skinny, .adp_reset = megasas_adp_reset_gen2, .check_reset = megasas_check_reset_skinny, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /** * The following functions are defined for gen2 (deviceid : 0x78 0x79) * controllers */ /** * megasas_enable_intr_gen2 - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_gen2(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; regs = instance->reg_set; writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); /* write ~0x00000005 (4 & 1) to the intr mask*/ writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_gen2 - Disables interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_gen2(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; u32 mask = 0xFFFFFFFF; regs = instance->reg_set; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_gen2 - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs) { return readl(&(regs)->outbound_scratch_pad); } /** * megasas_clear_interrupt_gen2 - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) { u32 status; u32 mfiStatus = 0; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; } if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; } /* * Clear the interrupt by writing back the same value */ if (mfiStatus) writel(status, &regs->outbound_doorbell_clear); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_status); return mfiStatus; } /** * megasas_fire_cmd_gen2 - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_gen2(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_adp_reset_gen2 - For controller reset * @regs: MFI register set */ static int megasas_adp_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem *reg_set) { u32 retry = 0 ; u32 HostDiag; u32 *seq_offset = &reg_set->seq_offset; u32 *hostdiag_offset = &reg_set->host_diag; if (instance->instancet == &megasas_instance_template_skinny) { seq_offset = &reg_set->fusion_seq_offset; hostdiag_offset = &reg_set->fusion_host_diag; } writel(0, seq_offset); writel(4, seq_offset); writel(0xb, seq_offset); writel(2, seq_offset); writel(7, seq_offset); writel(0xd, seq_offset); msleep(1000); HostDiag = (u32)readl(hostdiag_offset); while ( !( HostDiag & DIAG_WRITE_ENABLE) ) { msleep(100); HostDiag = (u32)readl(hostdiag_offset); printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n", retry, HostDiag); if (retry++ >= 100) return 1; } printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); ssleep(10); HostDiag = (u32)readl(hostdiag_offset); while ( ( HostDiag & DIAG_RESET_ADAPTER) ) { msleep(100); HostDiag = (u32)readl(hostdiag_offset); printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n", retry, HostDiag); if (retry++ >= 1000) return 1; } return 0; } /** * megasas_check_reset_gen2 - For controller reset check * @regs: MFI register set */ static int megasas_check_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { return 1; } return 0; } static struct megasas_instance_template megasas_instance_template_gen2 = { .fire_cmd = megasas_fire_cmd_gen2, .enable_intr = megasas_enable_intr_gen2, .disable_intr = megasas_disable_intr_gen2, .clear_intr = megasas_clear_intr_gen2, .read_fw_status_reg = megasas_read_fw_status_reg_gen2, .adp_reset = megasas_adp_reset_gen2, .check_reset = megasas_check_reset_gen2, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /** * This is the end of set of functions & definitions * specific to gen2 (deviceid : 0x78, 0x79) controllers */ /* * Template added for TB (Fusion) */ extern struct megasas_instance_template megasas_instance_template_fusion; /** * megasas_issue_polled - Issues a polling command * @instance: Adapter soft state * @cmd: Command packet to be issued * * For polling, MFI requires the cmd_status to be set to 0xFF before posting. */ int megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) { struct megasas_header *frame_hdr = &cmd->frame->hdr; frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE; frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); /* * Issue the frame using inbound queue port */ instance->instancet->issue_dcmd(instance, cmd); /* * Wait for cmd_status to change */ return wait_and_poll(instance, cmd); } /** * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds * @instance: Adapter soft state * @cmd: Command to be issued * * This function waits on an event for the command to be returned from ISR. * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs * Used to issue ioctl commands. */ static int megasas_issue_blocked_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { cmd->cmd_status = ENODATA; instance->instancet->issue_dcmd(instance, cmd); wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA); return 0; } /** * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd * @instance: Adapter soft state * @cmd_to_abort: Previously issued cmd to be aborted * * MFI firmware can abort previously issued AEN command (automatic event * notification). The megasas_issue_blocked_abort_cmd() issues such abort * cmd and waits for return status. * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs */ static int megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd_to_abort) { struct megasas_cmd *cmd; struct megasas_abort_frame *abort_fr; cmd = megasas_get_cmd(instance); if (!cmd) return -1; abort_fr = &cmd->frame->abort; /* * Prepare and issue the abort frame */ abort_fr->cmd = MFI_CMD_ABORT; abort_fr->cmd_status = 0xFF; abort_fr->flags = cpu_to_le16(0); abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); abort_fr->abort_mfi_phys_addr_lo = cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); abort_fr->abort_mfi_phys_addr_hi = cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); cmd->sync_cmd = 1; cmd->cmd_status = ENODATA; instance->instancet->issue_dcmd(instance, cmd); /* * Wait for this cmd to complete */ wait_event(instance->abort_cmd_wait_q, cmd->cmd_status != 0xFF); cmd->sync_cmd = 0; megasas_return_cmd(instance, cmd); return 0; } /** * megasas_make_sgl32 - Prepares 32-bit SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @mfi_sgl: SGL to be filled in * * If successful, this function returns the number of SG elements. Otherwise, * it returnes -1. */ static int megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) { int i; int sge_count; struct scatterlist *os_sgl; sge_count = scsi_dma_map(scp); BUG_ON(sge_count < 0); if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); } } return sge_count; } /** * megasas_make_sgl64 - Prepares 64-bit SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @mfi_sgl: SGL to be filled in * * If successful, this function returns the number of SG elements. Otherwise, * it returnes -1. */ static int megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) { int i; int sge_count; struct scatterlist *os_sgl; sge_count = scsi_dma_map(scp); BUG_ON(sge_count < 0); if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); } } return sge_count; } /** * megasas_make_sgl_skinny - Prepares IEEE SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @mfi_sgl: SGL to be filled in * * If successful, this function returns the number of SG elements. Otherwise, * it returnes -1. */ static int megasas_make_sgl_skinny(struct megasas_instance *instance, struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) { int i; int sge_count; struct scatterlist *os_sgl; sge_count = scsi_dma_map(scp); if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { mfi_sgl->sge_skinny[i].length = cpu_to_le32(sg_dma_len(os_sgl)); mfi_sgl->sge_skinny[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); } } return sge_count; } /** * megasas_get_frame_count - Computes the number of frames * @frame_type : type of frame- io or pthru frame * @sge_count : number of sg elements * * Returns the number of frames required for numnber of sge's (sge_count) */ static u32 megasas_get_frame_count(struct megasas_instance *instance, u8 sge_count, u8 frame_type) { int num_cnt; int sge_bytes; u32 sge_sz; u32 frame_count=0; sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : sizeof(struct megasas_sge32); if (instance->flag_ieee) { sge_sz = sizeof(struct megasas_sge_skinny); } /* * Main frame can contain 2 SGEs for 64-bit SGLs and * 3 SGEs for 32-bit SGLs for ldio & * 1 SGEs for 64-bit SGLs and * 2 SGEs for 32-bit SGLs for pthru frame */ if (unlikely(frame_type == PTHRU_FRAME)) { if (instance->flag_ieee == 1) { num_cnt = sge_count - 1; } else if (IS_DMA64) num_cnt = sge_count - 1; else num_cnt = sge_count - 2; } else { if (instance->flag_ieee == 1) { num_cnt = sge_count - 1; } else if (IS_DMA64) num_cnt = sge_count - 2; else num_cnt = sge_count - 3; } if(num_cnt>0){ sge_bytes = sge_sz * num_cnt; frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; } /* Main frame */ frame_count +=1; if (frame_count > 7) frame_count = 8; return frame_count; } /** * megasas_build_dcdb - Prepares a direct cdb (DCDB) command * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared in * * This function prepares CDB commands. These are typcially pass-through * commands to the devices. */ static int megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd *cmd) { u32 is_logical; u32 device_id; u16 flags = 0; struct megasas_pthru_frame *pthru; is_logical = MEGASAS_IS_LOGICAL(scp); device_id = MEGASAS_DEV_INDEX(instance, scp); pthru = (struct megasas_pthru_frame *)cmd->frame; if (scp->sc_data_direction == PCI_DMA_TODEVICE) flags = MFI_FRAME_DIR_WRITE; else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) flags = MFI_FRAME_DIR_READ; else if (scp->sc_data_direction == PCI_DMA_NONE) flags = MFI_FRAME_DIR_NONE; if (instance->flag_ieee == 1) { flags |= MFI_FRAME_IEEE; } /* * Prepare the DCDB frame */ pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; pthru->cmd_status = 0x0; pthru->scsi_status = 0x0; pthru->target_id = device_id; pthru->lun = scp->device->lun; pthru->cdb_len = scp->cmd_len; pthru->timeout = 0; pthru->pad_0 = 0; pthru->flags = cpu_to_le16(flags); pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); /* * If the command is for the tape device, set the * pthru timeout to the os layer timeout value. */ if (scp->device->type == TYPE_TAPE) { if ((scp->request->timeout / HZ) > 0xFFFF) pthru->timeout = 0xFFFF; else pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); } /* * Construct SGL */ if (instance->flag_ieee == 1) { pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); pthru->sge_count = megasas_make_sgl_skinny(instance, scp, &pthru->sgl); } else if (IS_DMA64) { pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); pthru->sge_count = megasas_make_sgl64(instance, scp, &pthru->sgl); } else pthru->sge_count = megasas_make_sgl32(instance, scp, &pthru->sgl); if (pthru->sge_count > instance->max_num_sge) { printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n", pthru->sge_count); return 0; } /* * Sense info specific */ pthru->sense_len = SCSI_SENSE_BUFFERSIZE; pthru->sense_buf_phys_addr_hi = cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); pthru->sense_buf_phys_addr_lo = cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); /* * Compute the total number of frames this command consumes. FW uses * this number to pull sufficient number of frames from host memory. */ cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, PTHRU_FRAME); return cmd->frame_count; } /** * megasas_build_ldio - Prepares IOs to logical devices * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared * * Frames (and accompanying SGLs) for regular SCSI IOs use this function. */ static int megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd *cmd) { u32 device_id; u8 sc = scp->cmnd[0]; u16 flags = 0; struct megasas_io_frame *ldio; device_id = MEGASAS_DEV_INDEX(instance, scp); ldio = (struct megasas_io_frame *)cmd->frame; if (scp->sc_data_direction == PCI_DMA_TODEVICE) flags = MFI_FRAME_DIR_WRITE; else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) flags = MFI_FRAME_DIR_READ; if (instance->flag_ieee == 1) { flags |= MFI_FRAME_IEEE; } /* * Prepare the Logical IO frame: 2nd bit is zero for all read cmds */ ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; ldio->cmd_status = 0x0; ldio->scsi_status = 0x0; ldio->target_id = device_id; ldio->timeout = 0; ldio->reserved_0 = 0; ldio->pad_0 = 0; ldio->flags = cpu_to_le16(flags); ldio->start_lba_hi = 0; ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; /* * 6-byte READ(0x08) or WRITE(0x0A) cdb */ if (scp->cmd_len == 6) { ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]); ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); } /* * 10-byte READ(0x28) or WRITE(0x2A) cdb */ else if (scp->cmd_len == 10) { ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | ((u32) scp->cmnd[7] << 8)); ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]); } /* * 12-byte READ(0xA8) or WRITE(0xAA) cdb */ else if (scp->cmd_len == 12) { ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | ((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]); ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]); } /* * 16-byte READ(0x88) or WRITE(0x8A) cdb */ else if (scp->cmd_len == 16) { ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | ((u32) scp->cmnd[11] << 16) | ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]); ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | ((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]); ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]); } /* * Construct SGL */ if (instance->flag_ieee) { ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); ldio->sge_count = megasas_make_sgl_skinny(instance, scp, &ldio->sgl); } else if (IS_DMA64) { ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); } else ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); if (ldio->sge_count > instance->max_num_sge) { printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n", ldio->sge_count); return 0; } /* * Sense info specific */ ldio->sense_len = SCSI_SENSE_BUFFERSIZE; ldio->sense_buf_phys_addr_hi = 0; ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); /* * Compute the total number of frames this command consumes. FW uses * this number to pull sufficient number of frames from host memory. */ cmd->frame_count = megasas_get_frame_count(instance, ldio->sge_count, IO_FRAME); return cmd->frame_count; } /** * megasas_is_ldio - Checks if the cmd is for logical drive * @scmd: SCSI command * * Called by megasas_queue_command to find out if the command to be queued * is a logical drive command */ inline int megasas_is_ldio(struct scsi_cmnd *cmd) { if (!MEGASAS_IS_LOGICAL(cmd)) return 0; switch (cmd->cmnd[0]) { case READ_10: case WRITE_10: case READ_12: case WRITE_12: case READ_6: case WRITE_6: case READ_16: case WRITE_16: return 1; default: return 0; } } /** * megasas_dump_pending_frames - Dumps the frame address of all pending cmds * in FW * @instance: Adapter soft state */ static inline void megasas_dump_pending_frames(struct megasas_instance *instance) { struct megasas_cmd *cmd; int i,n; union megasas_sgl *mfi_sgl; struct megasas_io_frame *ldio; struct megasas_pthru_frame *pthru; u32 sgcount; u32 max_cmd = instance->max_fw_cmds; printk(KERN_ERR "\nmegasas[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); printk(KERN_ERR "megasas[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); if (IS_DMA64) printk(KERN_ERR "\nmegasas[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); else printk(KERN_ERR "\nmegasas[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); printk(KERN_ERR "megasas[%d]: Pending OS cmds in FW : \n",instance->host->host_no); for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if(!cmd->scmd) continue; printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); if (megasas_is_ldio(cmd->scmd)){ ldio = (struct megasas_io_frame *)cmd->frame; mfi_sgl = &ldio->sgl; sgcount = ldio->sge_count; printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); } else { pthru = (struct megasas_pthru_frame *) cmd->frame; mfi_sgl = &pthru->sgl; sgcount = pthru->sge_count; printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); } if(megasas_dbg_lvl & MEGASAS_DBG_LVL){ for (n = 0; n < sgcount; n++){ if (IS_DMA64) printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ", le32_to_cpu(mfi_sgl->sge64[n].length), le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); else printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ", le32_to_cpu(mfi_sgl->sge32[n].length), le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); } } printk(KERN_ERR "\n"); } /*for max_cmd*/ printk(KERN_ERR "\nmegasas[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if(cmd->sync_cmd == 1){ printk(KERN_ERR "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); } } printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no); } u32 megasas_build_and_issue_cmd(struct megasas_instance *instance, struct scsi_cmnd *scmd) { struct megasas_cmd *cmd; u32 frame_count; cmd = megasas_get_cmd(instance); if (!cmd) return SCSI_MLQUEUE_HOST_BUSY; /* * Logical drive command */ if (megasas_is_ldio(scmd)) frame_count = megasas_build_ldio(instance, scmd, cmd); else frame_count = megasas_build_dcdb(instance, scmd, cmd); if (!frame_count) goto out_return_cmd; cmd->scmd = scmd; scmd->SCp.ptr = (char *)cmd; /* * Issue the command to the FW */ atomic_inc(&instance->fw_outstanding); instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, cmd->frame_count-1, instance->reg_set); return 0; out_return_cmd: megasas_return_cmd(instance, cmd); return 1; } /** * megasas_queue_command - Queue entry point * @scmd: SCSI command to be queued * @done: Callback entry point */ static int megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) { struct megasas_instance *instance; unsigned long flags; instance = (struct megasas_instance *) scmd->device->host->hostdata; if (instance->issuepend_done == 0) return SCSI_MLQUEUE_HOST_BUSY; spin_lock_irqsave(&instance->hba_lock, flags); if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { spin_unlock_irqrestore(&instance->hba_lock, flags); scmd->result = DID_ERROR << 16; done(scmd); return 0; } if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; } spin_unlock_irqrestore(&instance->hba_lock, flags); scmd->scsi_done = done; scmd->result = 0; if (MEGASAS_IS_LOGICAL(scmd) && (scmd->device->id >= MEGASAS_MAX_LD || scmd->device->lun)) { scmd->result = DID_BAD_TARGET << 16; goto out_done; } switch (scmd->cmnd[0]) { case SYNCHRONIZE_CACHE: /* * FW takes care of flush cache on its own * No need to send it down */ scmd->result = DID_OK << 16; goto out_done; default: break; } if (instance->instancet->build_and_issue_cmd(instance, scmd)) { printk(KERN_ERR "megasas: Err returned from build_and_issue_cmd\n"); return SCSI_MLQUEUE_HOST_BUSY; } return 0; out_done: done(scmd); return 0; } static DEF_SCSI_QCMD(megasas_queue_command) static struct megasas_instance *megasas_lookup_instance(u16 host_no) { int i; for (i = 0; i < megasas_mgmt_info.max_index; i++) { if ((megasas_mgmt_info.instance[i]) && (megasas_mgmt_info.instance[i]->host->host_no == host_no)) return megasas_mgmt_info.instance[i]; } return NULL; } static int megasas_slave_configure(struct scsi_device *sdev) { u16 pd_index = 0; struct megasas_instance *instance ; instance = megasas_lookup_instance(sdev->host->host_no); /* * Don't export physical disk devices to the disk driver. * * FIXME: Currently we don't export them to the midlayer at all. * That will be fixed once LSI engineers have audited the * firmware for possible issues. */ if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && sdev->type == TYPE_DISK) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { blk_queue_rq_timeout(sdev->request_queue, MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); return 0; } return -ENXIO; } /* * The RAID firmware may require extended timeouts. */ blk_queue_rq_timeout(sdev->request_queue, MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); return 0; } static int megasas_slave_alloc(struct scsi_device *sdev) { u16 pd_index = 0; struct megasas_instance *instance ; instance = megasas_lookup_instance(sdev->host->host_no); if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) && (sdev->type == TYPE_DISK)) { /* * Open the OS scan to the SYSTEM PD */ pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; if ((instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) && (instance->pd_list[pd_index].driveType == TYPE_DISK)) { return 0; } return -ENXIO; } return 0; } void megaraid_sas_kill_hba(struct megasas_instance *instance) { if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { writel(MFI_STOP_ADP, &instance->reg_set->doorbell); } else { writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell); } } /** * megasas_check_and_restore_queue_depth - Check if queue depth needs to be * restored to max value * @instance: Adapter soft state * */ void megasas_check_and_restore_queue_depth(struct megasas_instance *instance) { unsigned long flags; if (instance->flag & MEGASAS_FW_BUSY && time_after(jiffies, instance->last_time + 5 * HZ) && atomic_read(&instance->fw_outstanding) < instance->throttlequeuedepth + 1) { spin_lock_irqsave(instance->host->host_lock, flags); instance->flag &= ~MEGASAS_FW_BUSY; if (instance->is_imr) { instance->host->can_queue = instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; } else instance->host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS; spin_unlock_irqrestore(instance->host->host_lock, flags); } } /** * megasas_complete_cmd_dpc - Returns FW's controller structure * @instance_addr: Address of adapter soft state * * Tasklet to complete cmds */ static void megasas_complete_cmd_dpc(unsigned long instance_addr) { u32 producer; u32 consumer; u32 context; struct megasas_cmd *cmd; struct megasas_instance *instance = (struct megasas_instance *)instance_addr; unsigned long flags; /* If we have already declared adapter dead, donot complete cmds */ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR ) return; spin_lock_irqsave(&instance->completion_lock, flags); producer = le32_to_cpu(*instance->producer); consumer = le32_to_cpu(*instance->consumer); while (consumer != producer) { context = le32_to_cpu(instance->reply_queue[consumer]); if (context >= instance->max_fw_cmds) { printk(KERN_ERR "Unexpected context value %x\n", context); BUG(); } cmd = instance->cmd_list[context]; megasas_complete_cmd(instance, cmd, DID_OK); consumer++; if (consumer == (instance->max_fw_cmds + 1)) { consumer = 0; } } *instance->consumer = cpu_to_le32(producer); spin_unlock_irqrestore(&instance->completion_lock, flags); /* * Check if we can restore can_queue */ megasas_check_and_restore_queue_depth(instance); } static void megasas_internal_reset_defer_cmds(struct megasas_instance *instance); static void process_fw_state_change_wq(struct work_struct *work); void megasas_do_ocr(struct megasas_instance *instance) { if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); } instance->instancet->disable_intr(instance); instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; instance->issuepend_done = 0; atomic_set(&instance->fw_outstanding, 0); megasas_internal_reset_defer_cmds(instance); process_fw_state_change_wq(&instance->work_init); } /** * megasas_wait_for_outstanding - Wait for all outstanding cmds * @instance: Adapter soft state * * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to * complete all its outstanding commands. Returns error if one or more IOs * are pending after this time period. It also marks the controller dead. */ static int megasas_wait_for_outstanding(struct megasas_instance *instance) { int i; u32 reset_index; u32 wait_time = MEGASAS_RESET_WAIT_TIME; u8 adprecovery; unsigned long flags; struct list_head clist_local; struct megasas_cmd *reset_cmd; u32 fw_state; u8 kill_adapter_flag; spin_lock_irqsave(&instance->hba_lock, flags); adprecovery = instance->adprecovery; spin_unlock_irqrestore(&instance->hba_lock, flags); if (adprecovery != MEGASAS_HBA_OPERATIONAL) { INIT_LIST_HEAD(&clist_local); spin_lock_irqsave(&instance->hba_lock, flags); list_splice_init(&instance->internal_reset_pending_q, &clist_local); spin_unlock_irqrestore(&instance->hba_lock, flags); printk(KERN_NOTICE "megasas: HBA reset wait ...\n"); for (i = 0; i < wait_time; i++) { msleep(1000); spin_lock_irqsave(&instance->hba_lock, flags); adprecovery = instance->adprecovery; spin_unlock_irqrestore(&instance->hba_lock, flags); if (adprecovery == MEGASAS_HBA_OPERATIONAL) break; } if (adprecovery != MEGASAS_HBA_OPERATIONAL) { printk(KERN_NOTICE "megasas: reset: Stopping HBA.\n"); spin_lock_irqsave(&instance->hba_lock, flags); instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; spin_unlock_irqrestore(&instance->hba_lock, flags); return FAILED; } reset_index = 0; while (!list_empty(&clist_local)) { reset_cmd = list_entry((&clist_local)->next, struct megasas_cmd, list); list_del_init(&reset_cmd->list); if (reset_cmd->scmd) { reset_cmd->scmd->result = DID_RESET << 16; printk(KERN_NOTICE "%d:%p reset [%02x]\n", reset_index, reset_cmd, reset_cmd->scmd->cmnd[0]); reset_cmd->scmd->scsi_done(reset_cmd->scmd); megasas_return_cmd(instance, reset_cmd); } else if (reset_cmd->sync_cmd) { printk(KERN_NOTICE "megasas:%p synch cmds" "reset queue\n", reset_cmd); reset_cmd->cmd_status = ENODATA; instance->instancet->fire_cmd(instance, reset_cmd->frame_phys_addr, 0, instance->reg_set); } else { printk(KERN_NOTICE "megasas: %p unexpected" "cmds lst\n", reset_cmd); } reset_index++; } return SUCCESS; } for (i = 0; i < resetwaittime; i++) { int outstanding = atomic_read(&instance->fw_outstanding); if (!outstanding) break; if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { printk(KERN_NOTICE "megasas: [%2d]waiting for %d " "commands to complete\n",i,outstanding); /* * Call cmd completion routine. Cmd to be * be completed directly without depending on isr. */ megasas_complete_cmd_dpc((unsigned long)instance); } msleep(1000); } i = 0; kill_adapter_flag = 0; do { fw_state = instance->instancet->read_fw_status_reg( instance->reg_set) & MFI_STATE_MASK; if ((fw_state == MFI_STATE_FAULT) && (instance->disableOnlineCtrlReset == 0)) { if (i == 3) { kill_adapter_flag = 2; break; } megasas_do_ocr(instance); kill_adapter_flag = 1; /* wait for 1 secs to let FW finish the pending cmds */ msleep(1000); } i++; } while (i <= 3); if (atomic_read(&instance->fw_outstanding) && !kill_adapter_flag) { if (instance->disableOnlineCtrlReset == 0) { megasas_do_ocr(instance); /* wait for 5 secs to let FW finish the pending cmds */ for (i = 0; i < wait_time; i++) { int outstanding = atomic_read(&instance->fw_outstanding); if (!outstanding) return SUCCESS; msleep(1000); } } } if (atomic_read(&instance->fw_outstanding) || (kill_adapter_flag == 2)) { printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n"); /* * Send signal to FW to stop processing any pending cmds. * The controller will be taken offline by the OS now. */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { writel(MFI_STOP_ADP, &instance->reg_set->doorbell); } else { writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell); } megasas_dump_pending_frames(instance); spin_lock_irqsave(&instance->hba_lock, flags); instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; spin_unlock_irqrestore(&instance->hba_lock, flags); return FAILED; } printk(KERN_NOTICE "megaraid_sas: no pending cmds after reset\n"); return SUCCESS; } /** * megasas_generic_reset - Generic reset routine * @scmd: Mid-layer SCSI command * * This routine implements a generic reset handler for device, bus and host * reset requests. Device, bus and host specific reset handlers can use this * function after they do their specific tasks. */ static int megasas_generic_reset(struct scsi_cmnd *scmd) { int ret_val; struct megasas_instance *instance; instance = (struct megasas_instance *)scmd->device->host->hostdata; scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", scmd->cmnd[0], scmd->retries); if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { printk(KERN_ERR "megasas: cannot recover from previous reset " "failures\n"); return FAILED; } ret_val = megasas_wait_for_outstanding(instance); if (ret_val == SUCCESS) printk(KERN_NOTICE "megasas: reset successful \n"); else printk(KERN_ERR "megasas: failed to do reset\n"); return ret_val; } /** * megasas_reset_timer - quiesce the adapter if required * @scmd: scsi cmnd * * Sets the FW busy flag and reduces the host->can_queue if the * cmd has not been completed within the timeout period. */ static enum blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) { struct megasas_instance *instance; unsigned long flags; if (time_after(jiffies, scmd->jiffies_at_alloc + (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) { return BLK_EH_NOT_HANDLED; } instance = (struct megasas_instance *)scmd->device->host->hostdata; if (!(instance->flag & MEGASAS_FW_BUSY)) { /* FW is busy, throttle IO */ spin_lock_irqsave(instance->host->host_lock, flags); instance->host->can_queue = instance->throttlequeuedepth; instance->last_time = jiffies; instance->flag |= MEGASAS_FW_BUSY; spin_unlock_irqrestore(instance->host->host_lock, flags); } return BLK_EH_RESET_TIMER; } /** * megasas_reset_device - Device reset handler entry point */ static int megasas_reset_device(struct scsi_cmnd *scmd) { int ret; /* * First wait for all commands to complete */ ret = megasas_generic_reset(scmd); return ret; } /** * megasas_reset_bus_host - Bus & host reset handler entry point */ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) { int ret; struct megasas_instance *instance; instance = (struct megasas_instance *)scmd->device->host->hostdata; /* * First wait for all commands to complete */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) ret = megasas_reset_fusion(scmd->device->host); else ret = megasas_generic_reset(scmd); return ret; } /** * megasas_bios_param - Returns disk geometry for a disk * @sdev: device handle * @bdev: block device * @capacity: drive capacity * @geom: geometry parameters */ static int megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { int heads; int sectors; sector_t cylinders; unsigned long tmp; /* Default heads (64) & sectors (32) */ heads = 64; sectors = 32; tmp = heads * sectors; cylinders = capacity; sector_div(cylinders, tmp); /* * Handle extended translation size for logical drives > 1Gb */ if (capacity >= 0x200000) { heads = 255; sectors = 63; tmp = heads*sectors; cylinders = capacity; sector_div(cylinders, tmp); } geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; return 0; } static void megasas_aen_polling(struct work_struct *work); /** * megasas_service_aen - Processes an event notification * @instance: Adapter soft state * @cmd: AEN command completed by the ISR * * For AEN, driver sends a command down to FW that is held by the FW till an * event occurs. When an event of interest occurs, FW completes the command * that it was previously holding. * * This routines sends SIGIO signal to processes that have registered with the * driver for AEN. */ static void megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) { unsigned long flags; /* * Don't signal app if it is just an aborted previously registered aen */ if ((!cmd->abort_aen) && (instance->unload == 0)) { spin_lock_irqsave(&poll_aen_lock, flags); megasas_poll_wait_aen = 1; spin_unlock_irqrestore(&poll_aen_lock, flags); wake_up(&megasas_poll_wait); kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); } else cmd->abort_aen = 0; instance->aen_cmd = NULL; megasas_return_cmd(instance, cmd); if ((instance->unload == 0) && ((instance->issuepend_done == 1))) { struct megasas_aen_event *ev; ev = kzalloc(sizeof(*ev), GFP_ATOMIC); if (!ev) { printk(KERN_ERR "megasas_service_aen: out of memory\n"); } else { ev->instance = instance; instance->ev = ev; INIT_DELAYED_WORK(&ev->hotplug_work, megasas_aen_polling); schedule_delayed_work(&ev->hotplug_work, 0); } } } static int megasas_change_queue_depth(struct scsi_device *sdev, int queue_depth, int reason) { if (reason != SCSI_QDEPTH_DEFAULT) return -EOPNOTSUPP; if (queue_depth > sdev->host->can_queue) queue_depth = sdev->host->can_queue; scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), queue_depth); return queue_depth; } /* * Scsi host template for megaraid_sas driver */ static struct scsi_host_template megasas_template = { .module = THIS_MODULE, .name = "LSI SAS based MegaRAID driver", .proc_name = "megaraid_sas", .slave_configure = megasas_slave_configure, .slave_alloc = megasas_slave_alloc, .queuecommand = megasas_queue_command, .eh_device_reset_handler = megasas_reset_device, .eh_bus_reset_handler = megasas_reset_bus_host, .eh_host_reset_handler = megasas_reset_bus_host, .eh_timed_out = megasas_reset_timer, .bios_param = megasas_bios_param, .use_clustering = ENABLE_CLUSTERING, .change_queue_depth = megasas_change_queue_depth, .no_write_same = 1, }; /** * megasas_complete_int_cmd - Completes an internal command * @instance: Adapter soft state * @cmd: Command to be completed * * The megasas_issue_blocked_cmd() function waits for a command to complete * after it issues a command. This function wakes up that waiting routine by * calling wake_up() on the wait queue. */ static void megasas_complete_int_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { cmd->cmd_status = cmd->frame->io.cmd_status; if (cmd->cmd_status == ENODATA) { cmd->cmd_status = 0; } wake_up(&instance->int_cmd_wait_q); } /** * megasas_complete_abort - Completes aborting a command * @instance: Adapter soft state * @cmd: Cmd that was issued to abort another cmd * * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q * after it issues an abort on a previously issued command. This function * wakes up all functions waiting on the same wait queue. */ static void megasas_complete_abort(struct megasas_instance *instance, struct megasas_cmd *cmd) { if (cmd->sync_cmd) { cmd->sync_cmd = 0; cmd->cmd_status = 0; wake_up(&instance->abort_cmd_wait_q); } return; } /** * megasas_complete_cmd - Completes a command * @instance: Adapter soft state * @cmd: Command to be completed * @alt_status: If non-zero, use this value as status to * SCSI mid-layer instead of the value returned * by the FW. This should be used if caller wants * an alternate status (as in the case of aborted * commands) */ void megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status) { int exception = 0; struct megasas_header *hdr = &cmd->frame->hdr; unsigned long flags; struct fusion_context *fusion = instance->ctrl_context; u32 opcode; /* flag for the retry reset */ cmd->retry_for_fw_reset = 0; if (cmd->scmd) cmd->scmd->SCp.ptr = NULL; switch (hdr->cmd) { case MFI_CMD_INVALID: /* Some older 1068 controller FW may keep a pended MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel when booting the kdump kernel. Ignore this command to prevent a kernel panic on shutdown of the kdump kernel. */ printk(KERN_WARNING "megaraid_sas: MFI_CMD_INVALID command " "completed.\n"); printk(KERN_WARNING "megaraid_sas: If you have a controller " "other than PERC5, please upgrade your firmware.\n"); break; case MFI_CMD_PD_SCSI_IO: case MFI_CMD_LD_SCSI_IO: /* * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been * issued either through an IO path or an IOCTL path. If it * was via IOCTL, we will send it to internal completion. */ if (cmd->sync_cmd) { cmd->sync_cmd = 0; megasas_complete_int_cmd(instance, cmd); break; } case MFI_CMD_LD_READ: case MFI_CMD_LD_WRITE: if (alt_status) { cmd->scmd->result = alt_status << 16; exception = 1; } if (exception) { atomic_dec(&instance->fw_outstanding); scsi_dma_unmap(cmd->scmd); cmd->scmd->scsi_done(cmd->scmd); megasas_return_cmd(instance, cmd); break; } switch (hdr->cmd_status) { case MFI_STAT_OK: cmd->scmd->result = DID_OK << 16; break; case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: cmd->scmd->result = (DID_ERROR << 16) | hdr->scsi_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { memset(cmd->scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); memcpy(cmd->scmd->sense_buffer, cmd->sense, hdr->sense_len); cmd->scmd->result |= DRIVER_SENSE << 24; } break; case MFI_STAT_LD_OFFLINE: case MFI_STAT_DEVICE_NOT_FOUND: cmd->scmd->result = DID_BAD_TARGET << 16; break; default: printk(KERN_DEBUG "megasas: MFI FW status %#x\n", hdr->cmd_status); cmd->scmd->result = DID_ERROR << 16; break; } atomic_dec(&instance->fw_outstanding); scsi_dma_unmap(cmd->scmd); cmd->scmd->scsi_done(cmd->scmd); megasas_return_cmd(instance, cmd); break; case MFI_CMD_SMP: case MFI_CMD_STP: case MFI_CMD_DCMD: opcode = le32_to_cpu(cmd->frame->dcmd.opcode); /* Check for LD map update */ if ((opcode == MR_DCMD_LD_MAP_GET_INFO) && (cmd->frame->dcmd.mbox.b[1] == 1)) { fusion->fast_path_io = 0; spin_lock_irqsave(instance->host->host_lock, flags); if (cmd->frame->hdr.cmd_status != 0) { if (cmd->frame->hdr.cmd_status != MFI_STAT_NOT_FOUND) printk(KERN_WARNING "megasas: map sync" "failed, status = 0x%x.\n", cmd->frame->hdr.cmd_status); else { megasas_return_cmd(instance, cmd); spin_unlock_irqrestore( instance->host->host_lock, flags); break; } } else instance->map_id++; megasas_return_cmd(instance, cmd); /* * Set fast path IO to ZERO. * Validate Map will set proper value. * Meanwhile all IOs will go as LD IO. */ if (MR_ValidateMapInfo(instance)) fusion->fast_path_io = 1; else fusion->fast_path_io = 0; megasas_sync_map_info(instance); spin_unlock_irqrestore(instance->host->host_lock, flags); break; } if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || opcode == MR_DCMD_CTRL_EVENT_GET) { spin_lock_irqsave(&poll_aen_lock, flags); megasas_poll_wait_aen = 0; spin_unlock_irqrestore(&poll_aen_lock, flags); } /* * See if got an event notification */ if (opcode == MR_DCMD_CTRL_EVENT_WAIT) megasas_service_aen(instance, cmd); else megasas_complete_int_cmd(instance, cmd); break; case MFI_CMD_ABORT: /* * Cmd issued to abort another cmd returned */ megasas_complete_abort(instance, cmd); break; default: printk("megasas: Unknown command completed! [0x%X]\n", hdr->cmd); break; } } /** * megasas_issue_pending_cmds_again - issue all pending cmds * in FW again because of the fw reset * @instance: Adapter soft state */ static inline void megasas_issue_pending_cmds_again(struct megasas_instance *instance) { struct megasas_cmd *cmd; struct list_head clist_local; union megasas_evt_class_locale class_locale; unsigned long flags; u32 seq_num; INIT_LIST_HEAD(&clist_local); spin_lock_irqsave(&instance->hba_lock, flags); list_splice_init(&instance->internal_reset_pending_q, &clist_local); spin_unlock_irqrestore(&instance->hba_lock, flags); while (!list_empty(&clist_local)) { cmd = list_entry((&clist_local)->next, struct megasas_cmd, list); list_del_init(&cmd->list); if (cmd->sync_cmd || cmd->scmd) { printk(KERN_NOTICE "megaraid_sas: command %p, %p:%d" "detected to be pending while HBA reset.\n", cmd, cmd->scmd, cmd->sync_cmd); cmd->retry_for_fw_reset++; if (cmd->retry_for_fw_reset == 3) { printk(KERN_NOTICE "megaraid_sas: cmd %p, %p:%d" "was tried multiple times during reset." "Shutting down the HBA\n", cmd, cmd->scmd, cmd->sync_cmd); megaraid_sas_kill_hba(instance); instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; return; } } if (cmd->sync_cmd == 1) { if (cmd->scmd) { printk(KERN_NOTICE "megaraid_sas: unexpected" "cmd attached to internal command!\n"); } printk(KERN_NOTICE "megasas: %p synchronous cmd" "on the internal reset queue," "issue it again.\n", cmd); cmd->cmd_status = ENODATA; instance->instancet->fire_cmd(instance, cmd->frame_phys_addr , 0, instance->reg_set); } else if (cmd->scmd) { printk(KERN_NOTICE "megasas: %p scsi cmd [%02x]" "detected on the internal queue, issue again.\n", cmd, cmd->scmd->cmnd[0]); atomic_inc(&instance->fw_outstanding); instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, cmd->frame_count-1, instance->reg_set); } else { printk(KERN_NOTICE "megasas: %p unexpected cmd on the" "internal reset defer list while re-issue!!\n", cmd); } } if (instance->aen_cmd) { printk(KERN_NOTICE "megaraid_sas: aen_cmd in def process\n"); megasas_return_cmd(instance, instance->aen_cmd); instance->aen_cmd = NULL; } /* * Initiate AEN (Asynchronous Event Notification) */ seq_num = instance->last_seq_num; class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; megasas_register_aen(instance, seq_num, class_locale.word); } /** * Move the internal reset pending commands to a deferred queue. * * We move the commands pending at internal reset time to a * pending queue. This queue would be flushed after successful * completion of the internal reset sequence. if the internal reset * did not complete in time, the kernel reset handler would flush * these commands. **/ static void megasas_internal_reset_defer_cmds(struct megasas_instance *instance) { struct megasas_cmd *cmd; int i; u32 max_cmd = instance->max_fw_cmds; u32 defer_index; unsigned long flags; defer_index = 0; spin_lock_irqsave(&instance->cmd_pool_lock, flags); for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if (cmd->sync_cmd == 1 || cmd->scmd) { printk(KERN_NOTICE "megasas: moving cmd[%d]:%p:%d:%p" "on the defer queue as internal\n", defer_index, cmd, cmd->sync_cmd, cmd->scmd); if (!list_empty(&cmd->list)) { printk(KERN_NOTICE "megaraid_sas: ERROR while" " moving this cmd:%p, %d %p, it was" "discovered on some list?\n", cmd, cmd->sync_cmd, cmd->scmd); list_del_init(&cmd->list); } defer_index++; list_add_tail(&cmd->list, &instance->internal_reset_pending_q); } } spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); } static void process_fw_state_change_wq(struct work_struct *work) { struct megasas_instance *instance = container_of(work, struct megasas_instance, work_init); u32 wait; unsigned long flags; if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) { printk(KERN_NOTICE "megaraid_sas: error, recovery st %x \n", instance->adprecovery); return ; } if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) { printk(KERN_NOTICE "megaraid_sas: FW detected to be in fault" "state, restarting it...\n"); instance->instancet->disable_intr(instance); atomic_set(&instance->fw_outstanding, 0); atomic_set(&instance->fw_reset_no_pci_access, 1); instance->instancet->adp_reset(instance, instance->reg_set); atomic_set(&instance->fw_reset_no_pci_access, 0 ); printk(KERN_NOTICE "megaraid_sas: FW restarted successfully," "initiating next stage...\n"); printk(KERN_NOTICE "megaraid_sas: HBA recovery state machine," "state 2 starting...\n"); /*waitting for about 20 second before start the second init*/ for (wait = 0; wait < 30; wait++) { msleep(1000); } if (megasas_transition_to_ready(instance, 1)) { printk(KERN_NOTICE "megaraid_sas:adapter not ready\n"); megaraid_sas_kill_hba(instance); instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; return ; } if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) ) { *instance->consumer = *instance->producer; } else { *instance->consumer = 0; *instance->producer = 0; } megasas_issue_init_mfi(instance); spin_lock_irqsave(&instance->hba_lock, flags); instance->adprecovery = MEGASAS_HBA_OPERATIONAL; spin_unlock_irqrestore(&instance->hba_lock, flags); instance->instancet->enable_intr(instance); megasas_issue_pending_cmds_again(instance); instance->issuepend_done = 1; } return ; } /** * megasas_deplete_reply_queue - Processes all completed commands * @instance: Adapter soft state * @alt_status: Alternate status to be returned to * SCSI mid-layer instead of the status * returned by the FW * Note: this must be called with hba lock held */ static int megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status) { u32 mfiStatus; u32 fw_state; if ((mfiStatus = instance->instancet->check_reset(instance, instance->reg_set)) == 1) { return IRQ_HANDLED; } if ((mfiStatus = instance->instancet->clear_intr( instance->reg_set) ) == 0) { /* Hardware may not set outbound_intr_status in MSI-X mode */ if (!instance->msix_vectors) return IRQ_NONE; } instance->mfiStatus = mfiStatus; if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { fw_state = instance->instancet->read_fw_status_reg( instance->reg_set) & MFI_STATE_MASK; if (fw_state != MFI_STATE_FAULT) { printk(KERN_NOTICE "megaraid_sas: fw state:%x\n", fw_state); } if ((fw_state == MFI_STATE_FAULT) && (instance->disableOnlineCtrlReset == 0)) { printk(KERN_NOTICE "megaraid_sas: wait adp restart\n"); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); } instance->instancet->disable_intr(instance); instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; instance->issuepend_done = 0; atomic_set(&instance->fw_outstanding, 0); megasas_internal_reset_defer_cmds(instance); printk(KERN_NOTICE "megasas: fwState=%x, stage:%d\n", fw_state, instance->adprecovery); schedule_work(&instance->work_init); return IRQ_HANDLED; } else { printk(KERN_NOTICE "megasas: fwstate:%x, dis_OCR=%x\n", fw_state, instance->disableOnlineCtrlReset); } } tasklet_schedule(&instance->isr_tasklet); return IRQ_HANDLED; } /** * megasas_isr - isr entry point */ static irqreturn_t megasas_isr(int irq, void *devp) { struct megasas_irq_context *irq_context = devp; struct megasas_instance *instance = irq_context->instance; unsigned long flags; irqreturn_t rc; if (atomic_read(&instance->fw_reset_no_pci_access)) return IRQ_HANDLED; spin_lock_irqsave(&instance->hba_lock, flags); rc = megasas_deplete_reply_queue(instance, DID_OK); spin_unlock_irqrestore(&instance->hba_lock, flags); return rc; } /** * megasas_transition_to_ready - Move the FW to READY state * @instance: Adapter soft state * * During the initialization, FW passes can potentially be in any one of * several possible states. If the FW in operational, waiting-for-handshake * states, driver must take steps to bring it to ready state. Otherwise, it * has to wait for the ready state. */ int megasas_transition_to_ready(struct megasas_instance *instance, int ocr) { int i; u8 max_wait; u32 fw_state; u32 cur_state; u32 abs_state, curr_abs_state; fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; if (fw_state != MFI_STATE_READY) printk(KERN_INFO "megasas: Waiting for FW to come to ready" " state\n"); while (fw_state != MFI_STATE_READY) { abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); switch (fw_state) { case MFI_STATE_FAULT: printk(KERN_DEBUG "megasas: FW in FAULT state!!\n"); if (ocr) { max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FAULT; break; } else return -ENODEV; case MFI_STATE_WAIT_HANDSHAKE: /* * Set the CLR bit in inbound doorbell */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { writel( MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, &instance->reg_set->doorbell); } else { writel( MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, &instance->reg_set->inbound_doorbell); } max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_WAIT_HANDSHAKE; break; case MFI_STATE_BOOT_MESSAGE_PENDING: if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { writel(MFI_INIT_HOTPLUG, &instance->reg_set->doorbell); } else writel(MFI_INIT_HOTPLUG, &instance->reg_set->inbound_doorbell); max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; break; case MFI_STATE_OPERATIONAL: /* * Bring it to READY state; assuming max wait 10 secs */ instance->instancet->disable_intr(instance); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { writel(MFI_RESET_FLAGS, &instance->reg_set->doorbell); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { for (i = 0; i < (10 * 1000); i += 20) { if (readl( &instance-> reg_set-> doorbell) & 1) msleep(20); else break; } } } else writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell); max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_OPERATIONAL; break; case MFI_STATE_UNDEFINED: /* * This state should not last for more than 2 seconds */ max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_UNDEFINED; break; case MFI_STATE_BB_INIT: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_BB_INIT; break; case MFI_STATE_FW_INIT: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FW_INIT; break; case MFI_STATE_FW_INIT_2: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FW_INIT_2; break; case MFI_STATE_DEVICE_SCAN: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_DEVICE_SCAN; break; case MFI_STATE_FLUSH_CACHE: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FLUSH_CACHE; break; default: printk(KERN_DEBUG "megasas: Unknown state 0x%x\n", fw_state); return -ENODEV; } /* * The cur_state should not last for more than max_wait secs */ for (i = 0; i < (max_wait * 1000); i++) { fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK ; curr_abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); if (abs_state == curr_abs_state) { msleep(1); } else break; } /* * Return error if fw_state hasn't changed after max_wait */ if (curr_abs_state == abs_state) { printk(KERN_DEBUG "FW state [%d] hasn't changed " "in %d secs\n", fw_state, max_wait); return -ENODEV; } } printk(KERN_INFO "megasas: FW now in Ready state\n"); return 0; } /** * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool * @instance: Adapter soft state */ static void megasas_teardown_frame_pool(struct megasas_instance *instance) { int i; u32 max_cmd = instance->max_mfi_cmds; struct megasas_cmd *cmd; if (!instance->frame_dma_pool) return; /* * Return all frames to pool */ for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if (cmd->frame) pci_pool_free(instance->frame_dma_pool, cmd->frame, cmd->frame_phys_addr); if (cmd->sense) pci_pool_free(instance->sense_dma_pool, cmd->sense, cmd->sense_phys_addr); } /* * Now destroy the pool itself */ pci_pool_destroy(instance->frame_dma_pool); pci_pool_destroy(instance->sense_dma_pool); instance->frame_dma_pool = NULL; instance->sense_dma_pool = NULL; } /** * megasas_create_frame_pool - Creates DMA pool for cmd frames * @instance: Adapter soft state * * Each command packet has an embedded DMA memory buffer that is used for * filling MFI frame and the SG list that immediately follows the frame. This * function creates those DMA memory buffers for each command packet by using * PCI pool facility. */ static int megasas_create_frame_pool(struct megasas_instance *instance) { int i; u32 max_cmd; u32 sge_sz; u32 sgl_sz; u32 total_sz; u32 frame_count; struct megasas_cmd *cmd; max_cmd = instance->max_mfi_cmds; /* * Size of our frame is 64 bytes for MFI frame, followed by max SG * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer */ sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : sizeof(struct megasas_sge32); if (instance->flag_ieee) { sge_sz = sizeof(struct megasas_sge_skinny); } /* * Calculated the number of 64byte frames required for SGL */ sgl_sz = sge_sz * instance->max_num_sge; frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE; frame_count = 15; /* * We need one extra frame for the MFI command */ frame_count++; total_sz = MEGAMFI_FRAME_SIZE * frame_count; /* * Use DMA pool facility provided by PCI layer */ instance->frame_dma_pool = pci_pool_create("megasas frame pool", instance->pdev, total_sz, 64, 0); if (!instance->frame_dma_pool) { printk(KERN_DEBUG "megasas: failed to setup frame pool\n"); return -ENOMEM; } instance->sense_dma_pool = pci_pool_create("megasas sense pool", instance->pdev, 128, 4, 0); if (!instance->sense_dma_pool) { printk(KERN_DEBUG "megasas: failed to setup sense pool\n"); pci_pool_destroy(instance->frame_dma_pool); instance->frame_dma_pool = NULL; return -ENOMEM; } /* * Allocate and attach a frame to each of the commands in cmd_list. * By making cmd->index as the context instead of the &cmd, we can * always use 32bit context regardless of the architecture */ for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; cmd->frame = pci_pool_alloc(instance->frame_dma_pool, GFP_KERNEL, &cmd->frame_phys_addr); cmd->sense = pci_pool_alloc(instance->sense_dma_pool, GFP_KERNEL, &cmd->sense_phys_addr); /* * megasas_teardown_frame_pool() takes care of freeing * whatever has been allocated */ if (!cmd->frame || !cmd->sense) { printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n"); megasas_teardown_frame_pool(instance); return -ENOMEM; } memset(cmd->frame, 0, total_sz); cmd->frame->io.context = cpu_to_le32(cmd->index); cmd->frame->io.pad_0 = 0; if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) && (reset_devices)) cmd->frame->hdr.cmd = MFI_CMD_INVALID; } return 0; } /** * megasas_free_cmds - Free all the cmds in the free cmd pool * @instance: Adapter soft state */ void megasas_free_cmds(struct megasas_instance *instance) { int i; /* First free the MFI frame pool */ megasas_teardown_frame_pool(instance); /* Free all the commands in the cmd_list */ for (i = 0; i < instance->max_mfi_cmds; i++) kfree(instance->cmd_list[i]); /* Free the cmd_list buffer itself */ kfree(instance->cmd_list); instance->cmd_list = NULL; INIT_LIST_HEAD(&instance->cmd_pool); } /** * megasas_alloc_cmds - Allocates the command packets * @instance: Adapter soft state * * Each command that is issued to the FW, whether IO commands from the OS or * internal commands like IOCTLs, are wrapped in local data structure called * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to * the FW. * * Each frame has a 32-bit field called context (tag). This context is used * to get back the megasas_cmd from the frame when a frame gets completed in * the ISR. Typically the address of the megasas_cmd itself would be used as * the context. But we wanted to keep the differences between 32 and 64 bit * systems to the mininum. We always use 32 bit integers for the context. In * this driver, the 32 bit values are the indices into an array cmd_list. * This array is used only to look up the megasas_cmd given the context. The * free commands themselves are maintained in a linked list called cmd_pool. */ int megasas_alloc_cmds(struct megasas_instance *instance) { int i; int j; u32 max_cmd; struct megasas_cmd *cmd; max_cmd = instance->max_mfi_cmds; /* * instance->cmd_list is an array of struct megasas_cmd pointers. * Allocate the dynamic array first and then allocate individual * commands. */ instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); if (!instance->cmd_list) { printk(KERN_DEBUG "megasas: out of memory\n"); return -ENOMEM; } memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd); for (i = 0; i < max_cmd; i++) { instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL); if (!instance->cmd_list[i]) { for (j = 0; j < i; j++) kfree(instance->cmd_list[j]); kfree(instance->cmd_list); instance->cmd_list = NULL; return -ENOMEM; } } /* * Add all the commands to command pool (instance->cmd_pool) */ for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; memset(cmd, 0, sizeof(struct megasas_cmd)); cmd->index = i; cmd->scmd = NULL; cmd->instance = instance; list_add_tail(&cmd->list, &instance->cmd_pool); } /* * Create a frame pool and assign one frame to each cmd */ if (megasas_create_frame_pool(instance)) { printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n"); megasas_free_cmds(instance); } return 0; } /* * megasas_get_pd_list_info - Returns FW's pd_list structure * @instance: Adapter soft state * @pd_list: pd_list structure * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ static int megasas_get_pd_list(struct megasas_instance *instance) { int ret = 0, pd_index = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_PD_LIST *ci; struct MR_PD_ADDRESS *pd_addr; dma_addr_t ci_h = 0; cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; ci = pci_alloc_consistent(instance->pdev, MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h); if (!ci) { printk(KERN_DEBUG "Failed to alloc mem for pd_list\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; dcmd->mbox.b[1] = 0; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); if (!megasas_issue_polled(instance, cmd)) { ret = 0; } else { ret = -1; } /* * the following function will get the instance PD LIST. */ pd_addr = ci->addr; if ( ret == 0 && (le32_to_cpu(ci->count) < (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { memset(instance->pd_list, 0, MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { instance->pd_list[pd_addr->deviceId].tid = le16_to_cpu(pd_addr->deviceId); instance->pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType; instance->pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM; pd_addr++; } } pci_free_consistent(instance->pdev, MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), ci, ci_h); megasas_return_cmd(instance, cmd); return ret; } /* * megasas_get_ld_list_info - Returns FW's ld_list structure * @instance: Adapter soft state * @ld_list: ld_list structure * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ static int megasas_get_ld_list(struct megasas_instance *instance) { int ret = 0, ld_index = 0, ids = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_LD_LIST *ci; dma_addr_t ci_h = 0; u32 ld_count; cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; ci = pci_alloc_consistent(instance->pdev, sizeof(struct MR_LD_LIST), &ci_h); if (!ci) { printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST)); dcmd->pad_0 = 0; if (!megasas_issue_polled(instance, cmd)) { ret = 0; } else { ret = -1; } ld_count = le32_to_cpu(ci->ldCount); /* the following function will get the instance PD LIST */ if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) { memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); for (ld_index = 0; ld_index < ld_count; ld_index++) { if (ci->ldList[ld_index].state != 0) { ids = ci->ldList[ld_index].ref.targetId; instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; } } } pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h); megasas_return_cmd(instance, cmd); return ret; } /** * megasas_ld_list_query - Returns FW's ld_list structure * @instance: Adapter soft state * @ld_list: ld_list structure * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ static int megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) { int ret = 0, ld_index = 0, ids = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_LD_TARGETID_LIST *ci; dma_addr_t ci_h = 0; u32 tgtid_count; cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_WARNING "megasas:(megasas_ld_list_query): Failed to get cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; ci = pci_alloc_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), &ci_h); if (!ci) { printk(KERN_WARNING "megasas: Failed to alloc mem for ld_list_query\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = query_type; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); dcmd->pad_0 = 0; if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) { ret = 0; } else { /* On failure, call older LD list DCMD */ ret = 1; } tgtid_count = le32_to_cpu(ci->count); if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) { memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); for (ld_index = 0; ld_index < tgtid_count; ld_index++) { ids = ci->targetId[ld_index]; instance->ld_ids[ids] = ci->targetId[ld_index]; } } pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), ci, ci_h); megasas_return_cmd(instance, cmd); return ret; } /** * megasas_get_controller_info - Returns FW's controller structure * @instance: Adapter soft state * @ctrl_info: Controller information structure * * Issues an internal command (DCMD) to get the FW's controller structure. * This information is mainly used to find out the maximum IO transfer per * command supported by the FW. */ static int megasas_get_ctrl_info(struct megasas_instance *instance, struct megasas_ctrl_info *ctrl_info) { int ret = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct megasas_ctrl_info *ci; dma_addr_t ci_h = 0; cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_DEBUG "megasas: Failed to get a free cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; ci = pci_alloc_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), &ci_h); if (!ci) { printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info)); if (!megasas_issue_polled(instance, cmd)) { ret = 0; memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info)); } else { ret = -1; } pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), ci, ci_h); megasas_return_cmd(instance, cmd); return ret; } /** * megasas_issue_init_mfi - Initializes the FW * @instance: Adapter soft state * * Issues the INIT MFI cmd */ static int megasas_issue_init_mfi(struct megasas_instance *instance) { u32 context; struct megasas_cmd *cmd; struct megasas_init_frame *init_frame; struct megasas_init_queue_info *initq_info; dma_addr_t init_frame_h; dma_addr_t initq_info_h; /* * Prepare a init frame. Note the init frame points to queue info * structure. Each frame has SGL allocated after first 64 bytes. For * this frame - since we don't need any SGL - we use SGL's space as * queue info structure * * We will not get a NULL command below. We just created the pool. */ cmd = megasas_get_cmd(instance); init_frame = (struct megasas_init_frame *)cmd->frame; initq_info = (struct megasas_init_queue_info *) ((unsigned long)init_frame + 64); init_frame_h = cmd->frame_phys_addr; initq_info_h = init_frame_h + 64; context = init_frame->context; memset(init_frame, 0, MEGAMFI_FRAME_SIZE); memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); init_frame->context = context; initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = 0xFF; init_frame->queue_info_new_phys_addr_lo = cpu_to_le32(lower_32_bits(initq_info_h)); init_frame->queue_info_new_phys_addr_hi = cpu_to_le32(upper_32_bits(initq_info_h)); init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); /* * disable the intr before firing the init frame to FW */ instance->instancet->disable_intr(instance); /* * Issue the init frame in polled mode */ if (megasas_issue_polled(instance, cmd)) { printk(KERN_ERR "megasas: Failed to init firmware\n"); megasas_return_cmd(instance, cmd); goto fail_fw_init; } megasas_return_cmd(instance, cmd); return 0; fail_fw_init: return -EINVAL; } static u32 megasas_init_adapter_mfi(struct megasas_instance *instance) { struct megasas_register_set __iomem *reg_set; u32 context_sz; u32 reply_q_sz; reg_set = instance->reg_set; /* * Get various operational parameters from status register */ instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; /* * Reduce the max supported cmds by 1. This is to ensure that the * reply_q_sz (1 more than the max cmd that driver may send) * does not exceed max cmds that the FW can support */ instance->max_fw_cmds = instance->max_fw_cmds-1; instance->max_mfi_cmds = instance->max_fw_cmds; instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> 0x10; /* * Create a pool of commands */ if (megasas_alloc_cmds(instance)) goto fail_alloc_cmds; /* * Allocate memory for reply queue. Length of reply queue should * be _one_ more than the maximum commands handled by the firmware. * * Note: When FW completes commands, it places corresponding contex * values in this circular reply queue. This circular queue is a fairly * typical producer-consumer queue. FW is the producer (of completed * commands) and the driver is the consumer. */ context_sz = sizeof(u32); reply_q_sz = context_sz * (instance->max_fw_cmds + 1); instance->reply_queue = pci_alloc_consistent(instance->pdev, reply_q_sz, &instance->reply_queue_h); if (!instance->reply_queue) { printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n"); goto fail_reply_queue; } if (megasas_issue_init_mfi(instance)) goto fail_fw_init; instance->fw_support_ieee = 0; instance->fw_support_ieee = (instance->instancet->read_fw_status_reg(reg_set) & 0x04000000); printk(KERN_NOTICE "megasas_init_mfi: fw_support_ieee=%d", instance->fw_support_ieee); if (instance->fw_support_ieee) instance->flag_ieee = 1; return 0; fail_fw_init: pci_free_consistent(instance->pdev, reply_q_sz, instance->reply_queue, instance->reply_queue_h); fail_reply_queue: megasas_free_cmds(instance); fail_alloc_cmds: return 1; } /** * megasas_init_fw - Initializes the FW * @instance: Adapter soft state * * This is the main function for initializing firmware */ static int megasas_init_fw(struct megasas_instance *instance) { u32 max_sectors_1; u32 max_sectors_2; u32 tmp_sectors, msix_enable, scratch_pad_2; struct megasas_register_set __iomem *reg_set; struct megasas_ctrl_info *ctrl_info; unsigned long bar_list; int i, loop, fw_msix_count = 0; /* Find first memory bar */ bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); instance->base_addr = pci_resource_start(instance->pdev, instance->bar); if (pci_request_selected_regions(instance->pdev, instance->bar, "megasas: LSI")) { printk(KERN_DEBUG "megasas: IO memory region busy!\n"); return -EBUSY; } instance->reg_set = ioremap_nocache(instance->base_addr, 8192); if (!instance->reg_set) { printk(KERN_DEBUG "megasas: Failed to map IO mem\n"); goto fail_ioremap; } reg_set = instance->reg_set; switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: case PCI_DEVICE_ID_LSI_INVADER: case PCI_DEVICE_ID_LSI_FURY: instance->instancet = &megasas_instance_template_fusion; break; case PCI_DEVICE_ID_LSI_SAS1078R: case PCI_DEVICE_ID_LSI_SAS1078DE: instance->instancet = &megasas_instance_template_ppc; break; case PCI_DEVICE_ID_LSI_SAS1078GEN2: case PCI_DEVICE_ID_LSI_SAS0079GEN2: instance->instancet = &megasas_instance_template_gen2; break; case PCI_DEVICE_ID_LSI_SAS0073SKINNY: case PCI_DEVICE_ID_LSI_SAS0071SKINNY: instance->instancet = &megasas_instance_template_skinny; break; case PCI_DEVICE_ID_LSI_SAS1064R: case PCI_DEVICE_ID_DELL_PERC5: default: instance->instancet = &megasas_instance_template_xscale; break; } if (megasas_transition_to_ready(instance, 0)) { atomic_set(&instance->fw_reset_no_pci_access, 1); instance->instancet->adp_reset (instance, instance->reg_set); atomic_set(&instance->fw_reset_no_pci_access, 0); dev_info(&instance->pdev->dev, "megasas: FW restarted successfully from %s!\n", __func__); /*waitting for about 30 second before retry*/ ssleep(30); if (megasas_transition_to_ready(instance, 0)) goto fail_ready_state; } /* * MSI-X host index 0 is common for all adapter. * It is used for all MPT based Adapters. */ instance->reply_post_host_index_addr[0] = (u32 *)((u8 *)instance->reg_set + MPI2_REPLY_POST_HOST_INDEX_OFFSET); /* Check if MSI-X is supported while in ready state */ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & 0x4000000) >> 0x1a; if (msix_enable && !msix_disable) { scratch_pad_2 = readl (&instance->reg_set->outbound_scratch_pad_2); /* Check max MSI-X vectors */ if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) { instance->msix_vectors = (scratch_pad_2 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; fw_msix_count = instance->msix_vectors; if (msix_vectors) instance->msix_vectors = min(msix_vectors, instance->msix_vectors); } else if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { /* Invader/Fury supports more than 8 MSI-X */ instance->msix_vectors = ((scratch_pad_2 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; fw_msix_count = instance->msix_vectors; /* Save 1-15 reply post index address to local memory * Index 0 is already saved from reg offset * MPI2_REPLY_POST_HOST_INDEX_OFFSET */ for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { instance->reply_post_host_index_addr[loop] = (u32 *)((u8 *)instance->reg_set + MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + (loop * 0x10)); } if (msix_vectors) instance->msix_vectors = min(msix_vectors, instance->msix_vectors); } else instance->msix_vectors = 1; /* Don't bother allocating more MSI-X vectors than cpus */ instance->msix_vectors = min(instance->msix_vectors, (unsigned int)num_online_cpus()); for (i = 0; i < instance->msix_vectors; i++) instance->msixentry[i].entry = i; i = pci_enable_msix(instance->pdev, instance->msixentry, instance->msix_vectors); if (i >= 0) { if (i) { if (!pci_enable_msix(instance->pdev, instance->msixentry, i)) instance->msix_vectors = i; else instance->msix_vectors = 0; } } else instance->msix_vectors = 0; dev_info(&instance->pdev->dev, "[scsi%d]: FW supports" "<%d> MSIX vector,Online CPUs: <%d>," "Current MSIX <%d>\n", instance->host->host_no, fw_msix_count, (unsigned int)num_online_cpus(), instance->msix_vectors); } /* Get operational params, sge flags, send init cmd to controller */ if (instance->instancet->init_adapter(instance)) goto fail_init_adapter; printk(KERN_ERR "megasas: INIT adapter done\n"); /** for passthrough * the following function will get the PD LIST. */ memset(instance->pd_list, 0 , (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); megasas_get_pd_list(instance); memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); if (megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) megasas_get_ld_list(instance); ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); /* * Compute the max allowed sectors per IO: The controller info has two * limits on max sectors. Driver should use the minimum of these two. * * 1 << stripe_sz_ops.min = max sectors per strip * * Note that older firmwares ( < FW ver 30) didn't report information * to calculate max_sectors_1. So the number ended up as zero always. */ tmp_sectors = 0; if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) { max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * le16_to_cpu(ctrl_info->max_strips_per_io); max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); /*Check whether controller is iMR or MR */ if (ctrl_info->memory_size) { instance->is_imr = 0; dev_info(&instance->pdev->dev, "Controller type: MR," "Memory size is: %dMB\n", le16_to_cpu(ctrl_info->memory_size)); } else { instance->is_imr = 1; dev_info(&instance->pdev->dev, "Controller type: iMR\n"); } /* OnOffProperties are converted into CPU arch*/ le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); instance->disableOnlineCtrlReset = ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; /* adapterOperations2 are converted into CPU arch*/ le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); instance->UnevenSpanSupport = ctrl_info->adapterOperations2.supportUnevenSpans; if (instance->UnevenSpanSupport) { struct fusion_context *fusion = instance->ctrl_context; dev_info(&instance->pdev->dev, "FW supports: " "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport); if (MR_ValidateMapInfo(instance)) fusion->fast_path_io = 1; else fusion->fast_path_io = 0; } } instance->max_sectors_per_req = instance->max_num_sge * PAGE_SIZE / 512; if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) instance->max_sectors_per_req = tmp_sectors; kfree(ctrl_info); /* Check for valid throttlequeuedepth module parameter */ if (instance->is_imr) { if (throttlequeuedepth > (instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS)) instance->throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; else instance->throttlequeuedepth = throttlequeuedepth; } else { if (throttlequeuedepth > (instance->max_fw_cmds - MEGASAS_INT_CMDS)) instance->throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; else instance->throttlequeuedepth = throttlequeuedepth; } /* * Setup tasklet for cmd completion */ tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); return 0; fail_init_adapter: fail_ready_state: iounmap(instance->reg_set); fail_ioremap: pci_release_selected_regions(instance->pdev, instance->bar); return -EINVAL; } /** * megasas_release_mfi - Reverses the FW initialization * @intance: Adapter soft state */ static void megasas_release_mfi(struct megasas_instance *instance) { u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); if (instance->reply_queue) pci_free_consistent(instance->pdev, reply_q_sz, instance->reply_queue, instance->reply_queue_h); megasas_free_cmds(instance); iounmap(instance->reg_set); pci_release_selected_regions(instance->pdev, instance->bar); } /** * megasas_get_seq_num - Gets latest event sequence numbers * @instance: Adapter soft state * @eli: FW event log sequence numbers information * * FW maintains a log of all events in a non-volatile area. Upper layers would * usually find out the latest sequence number of the events, the seq number at * the boot etc. They would "read" all the events below the latest seq number * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq * number), they would subsribe to AEN (asynchronous event notification) and * wait for the events to happen. */ static int megasas_get_seq_num(struct megasas_instance *instance, struct megasas_evt_log_info *eli) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct megasas_evt_log_info *el_info; dma_addr_t el_info_h = 0; cmd = megasas_get_cmd(instance); if (!cmd) { return -ENOMEM; } dcmd = &cmd->frame->dcmd; el_info = pci_alloc_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), &el_info_h); if (!el_info) { megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(el_info, 0, sizeof(*el_info)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info)); megasas_issue_blocked_cmd(instance, cmd); /* * Copy the data back into callers buffer */ eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num); eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num); eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num); eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num); eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num); pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), el_info, el_info_h); megasas_return_cmd(instance, cmd); return 0; } /** * megasas_register_aen - Registers for asynchronous event notification * @instance: Adapter soft state * @seq_num: The starting sequence number * @class_locale: Class of the event * * This function subscribes for AEN for events beyond the @seq_num. It requests * to be notified if and only if the event is of type @class_locale */ static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word) { int ret_val; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; union megasas_evt_class_locale curr_aen; union megasas_evt_class_locale prev_aen; /* * If there an AEN pending already (aen_cmd), check if the * class_locale of that pending AEN is inclusive of the new * AEN request we currently have. If it is, then we don't have * to do anything. In other words, whichever events the current * AEN request is subscribing to, have already been subscribed * to. * * If the old_cmd is _not_ inclusive, then we have to abort * that command, form a class_locale that is superset of both * old and current and re-issue to the FW */ curr_aen.word = class_locale_word; if (instance->aen_cmd) { prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale); /* * A class whose enum value is smaller is inclusive of all * higher values. If a PROGRESS (= -1) was previously * registered, then a new registration requests for higher * classes need not be sent to FW. They are automatically * included. * * Locale numbers don't have such hierarchy. They are bitmap * values */ if ((prev_aen.members.class <= curr_aen.members.class) && !((le16_to_cpu(prev_aen.members.locale) & curr_aen.members.locale) ^ curr_aen.members.locale)) { /* * Previously issued event registration includes * current request. Nothing to do. */ return 0; } else { curr_aen.members.locale |= le16_to_cpu(prev_aen.members.locale); if (prev_aen.members.class < curr_aen.members.class) curr_aen.members.class = prev_aen.members.class; instance->aen_cmd->abort_aen = 1; ret_val = megasas_issue_blocked_abort_cmd(instance, instance-> aen_cmd); if (ret_val) { printk(KERN_DEBUG "megasas: Failed to abort " "previous AEN command\n"); return ret_val; } } } cmd = megasas_get_cmd(instance); if (!cmd) return -ENOMEM; dcmd = &cmd->frame->dcmd; memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); /* * Prepare DCMD for aen registration */ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); dcmd->mbox.w[0] = cpu_to_le32(seq_num); instance->last_seq_num = seq_num; dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail)); if (instance->aen_cmd != NULL) { megasas_return_cmd(instance, cmd); return 0; } /* * Store reference to the cmd used to register for AEN. When an * application wants us to register for AEN, we have to abort this * cmd and re-register with a new EVENT LOCALE supplied by that app */ instance->aen_cmd = cmd; /* * Issue the aen registration frame */ instance->instancet->issue_dcmd(instance, cmd); return 0; } /** * megasas_start_aen - Subscribes to AEN during driver load time * @instance: Adapter soft state */ static int megasas_start_aen(struct megasas_instance *instance) { struct megasas_evt_log_info eli; union megasas_evt_class_locale class_locale; /* * Get the latest sequence number from FW */ memset(&eli, 0, sizeof(eli)); if (megasas_get_seq_num(instance, &eli)) return -1; /* * Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; return megasas_register_aen(instance, le32_to_cpu(eli.newest_seq_num) + 1, class_locale.word); } /** * megasas_io_attach - Attaches this driver to SCSI mid-layer * @instance: Adapter soft state */ static int megasas_io_attach(struct megasas_instance *instance) { struct Scsi_Host *host = instance->host; /* * Export parameters required by SCSI mid-layer */ host->irq = instance->pdev->irq; host->unique_id = instance->unique_id; if (instance->is_imr) { host->can_queue = instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; } else host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS; host->this_id = instance->init_id; host->sg_tablesize = instance->max_num_sge; if (instance->fw_support_ieee) instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; /* * Check if the module parameter value for max_sectors can be used */ if (max_sectors && max_sectors < instance->max_sectors_per_req) instance->max_sectors_per_req = max_sectors; else { if (max_sectors) { if (((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) && (max_sectors <= MEGASAS_MAX_SECTORS)) { instance->max_sectors_per_req = max_sectors; } else { printk(KERN_INFO "megasas: max_sectors should be > 0" "and <= %d (or < 1MB for GEN2 controller)\n", instance->max_sectors_per_req); } } } host->max_sectors = instance->max_sectors_per_req; host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; host->max_channel = MEGASAS_MAX_CHANNELS - 1; host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; host->max_lun = MEGASAS_MAX_LUN; host->max_cmd_len = 16; /* Fusion only supports host reset */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { host->hostt->eh_device_reset_handler = NULL; host->hostt->eh_bus_reset_handler = NULL; } /* * Notify the mid-layer about the new controller */ if (scsi_add_host(host, &instance->pdev->dev)) { printk(KERN_DEBUG "megasas: scsi_add_host failed\n"); return -ENODEV; } /* * Trigger SCSI to scan our drives */ scsi_scan_host(host); return 0; } static int megasas_set_dma_mask(struct pci_dev *pdev) { /* * All our contollers are capable of performing 64-bit DMA */ if (IS_DMA64) { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) goto fail_set_dma_mask; } } else { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) goto fail_set_dma_mask; } return 0; fail_set_dma_mask: return 1; } /** * megasas_probe_one - PCI hotplug entry point * @pdev: PCI device structure * @id: PCI ids of supported hotplugged adapter */ static int megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { int rval, pos, i, j; struct Scsi_Host *host; struct megasas_instance *instance; u16 control = 0; /* Reset MSI-X in the kdump kernel */ if (reset_devices) { pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); if (pos) { pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &control); if (control & PCI_MSIX_FLAGS_ENABLE) { dev_info(&pdev->dev, "resetting MSI-X\n"); pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, control & ~PCI_MSIX_FLAGS_ENABLE); } } } /* * Announce PCI information */ printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device); printk("bus %d:slot %d:func %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); /* * PCI prepping: enable device set bus mastering and dma mask */ rval = pci_enable_device_mem(pdev); if (rval) { return rval; } pci_set_master(pdev); if (megasas_set_dma_mask(pdev)) goto fail_set_dma_mask; host = scsi_host_alloc(&megasas_template, sizeof(struct megasas_instance)); if (!host) { printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n"); goto fail_alloc_instance; } instance = (struct megasas_instance *)host->hostdata; memset(instance, 0, sizeof(*instance)); atomic_set( &instance->fw_reset_no_pci_access, 0 ); instance->pdev = pdev; switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: case PCI_DEVICE_ID_LSI_INVADER: case PCI_DEVICE_ID_LSI_FURY: { struct fusion_context *fusion; instance->ctrl_context = kzalloc(sizeof(struct fusion_context), GFP_KERNEL); if (!instance->ctrl_context) { printk(KERN_DEBUG "megasas: Failed to allocate " "memory for Fusion context info\n"); goto fail_alloc_dma_buf; } fusion = instance->ctrl_context; INIT_LIST_HEAD(&fusion->cmd_pool); spin_lock_init(&fusion->cmd_pool_lock); } break; default: /* For all other supported controllers */ instance->producer = pci_alloc_consistent(pdev, sizeof(u32), &instance->producer_h); instance->consumer = pci_alloc_consistent(pdev, sizeof(u32), &instance->consumer_h); if (!instance->producer || !instance->consumer) { printk(KERN_DEBUG "megasas: Failed to allocate" "memory for producer, consumer\n"); goto fail_alloc_dma_buf; } *instance->producer = 0; *instance->consumer = 0; break; } megasas_poll_wait_aen = 0; instance->flag_ieee = 0; instance->ev = NULL; instance->issuepend_done = 1; instance->adprecovery = MEGASAS_HBA_OPERATIONAL; instance->is_imr = 0; megasas_poll_wait_aen = 0; instance->evt_detail = pci_alloc_consistent(pdev, sizeof(struct megasas_evt_detail), &instance->evt_detail_h); if (!instance->evt_detail) { printk(KERN_DEBUG "megasas: Failed to allocate memory for " "event detail structure\n"); goto fail_alloc_dma_buf; } /* * Initialize locks and queues */ INIT_LIST_HEAD(&instance->cmd_pool); INIT_LIST_HEAD(&instance->internal_reset_pending_q); atomic_set(&instance->fw_outstanding,0); init_waitqueue_head(&instance->int_cmd_wait_q); init_waitqueue_head(&instance->abort_cmd_wait_q); spin_lock_init(&instance->cmd_pool_lock); spin_lock_init(&instance->hba_lock); spin_lock_init(&instance->completion_lock); mutex_init(&instance->aen_mutex); mutex_init(&instance->reset_mutex); /* * Initialize PCI related and misc parameters */ instance->host = host; instance->unique_id = pdev->bus->number << 8 | pdev->devfn; instance->init_id = MEGASAS_DEFAULT_INIT_ID; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { instance->flag_ieee = 1; sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); } else sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS); megasas_dbg_lvl = 0; instance->flag = 0; instance->unload = 1; instance->last_time = 0; instance->disableOnlineCtrlReset = 1; instance->UnevenSpanSupport = 0; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); else INIT_WORK(&instance->work_init, process_fw_state_change_wq); /* * Initialize MFI Firmware */ if (megasas_init_fw(instance)) goto fail_init_mfi; retry_irq_register: /* * Register IRQ */ if (instance->msix_vectors) { for (i = 0 ; i < instance->msix_vectors; i++) { instance->irq_context[i].instance = instance; instance->irq_context[i].MSIxIndex = i; if (request_irq(instance->msixentry[i].vector, instance->instancet->service_isr, 0, "megasas", &instance->irq_context[i])) { printk(KERN_DEBUG "megasas: Failed to " "register IRQ for vector %d.\n", i); for (j = 0 ; j < i ; j++) free_irq( instance->msixentry[j].vector, &instance->irq_context[j]); /* Retry irq register for IO_APIC */ instance->msix_vectors = 0; goto retry_irq_register; } } } else { instance->irq_context[0].instance = instance; instance->irq_context[0].MSIxIndex = 0; if (request_irq(pdev->irq, instance->instancet->service_isr, IRQF_SHARED, "megasas", &instance->irq_context[0])) { printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); goto fail_irq; } } instance->instancet->enable_intr(instance); /* * Store instance in PCI softstate */ pci_set_drvdata(pdev, instance); /* * Add this controller to megasas_mgmt_info structure so that it * can be exported to management applications */ megasas_mgmt_info.count++; megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; megasas_mgmt_info.max_index++; /* * Register with SCSI mid-layer */ if (megasas_io_attach(instance)) goto fail_io_attach; instance->unload = 0; /* * Initiate AEN (Asynchronous Event Notification) */ if (megasas_start_aen(instance)) { printk(KERN_DEBUG "megasas: start aen failed\n"); goto fail_start_aen; } return 0; fail_start_aen: fail_io_attach: megasas_mgmt_info.count--; megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; megasas_mgmt_info.max_index--; pci_set_drvdata(pdev, NULL); instance->instancet->disable_intr(instance); if (instance->msix_vectors) for (i = 0 ; i < instance->msix_vectors; i++) free_irq(instance->msixentry[i].vector, &instance->irq_context[i]); else free_irq(instance->pdev->irq, &instance->irq_context[0]); fail_irq: if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) megasas_release_fusion(instance); else megasas_release_mfi(instance); fail_init_mfi: if (instance->msix_vectors) pci_disable_msix(instance->pdev); fail_alloc_dma_buf: if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); if (instance->consumer) pci_free_consistent(pdev, sizeof(u32), instance->consumer, instance->consumer_h); scsi_host_put(host); fail_alloc_instance: fail_set_dma_mask: pci_disable_device(pdev); return -ENODEV; } /** * megasas_flush_cache - Requests FW to flush all its caches * @instance: Adapter soft state */ static void megasas_flush_cache(struct megasas_instance *instance) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) return; cmd = megasas_get_cmd(instance); if (!cmd) return; dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; megasas_issue_blocked_cmd(instance, cmd); megasas_return_cmd(instance, cmd); return; } /** * megasas_shutdown_controller - Instructs FW to shutdown the controller * @instance: Adapter soft state * @opcode: Shutdown/Hibernate */ static void megasas_shutdown_controller(struct megasas_instance *instance, u32 opcode) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) return; cmd = megasas_get_cmd(instance); if (!cmd) return; if (instance->aen_cmd) megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd); if (instance->map_update_cmd) megasas_issue_blocked_abort_cmd(instance, instance->map_update_cmd); dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = cpu_to_le32(opcode); megasas_issue_blocked_cmd(instance, cmd); megasas_return_cmd(instance, cmd); return; } #ifdef CONFIG_PM /** * megasas_suspend - driver suspend entry point * @pdev: PCI device structure * @state: PCI power state to suspend routine */ static int megasas_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *host; struct megasas_instance *instance; int i; instance = pci_get_drvdata(pdev); host = instance->host; instance->unload = 1; megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); /* cancel the delayed work if this work still in queue */ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; cancel_delayed_work_sync(&ev->hotplug_work); instance->ev = NULL; } tasklet_kill(&instance->isr_tasklet); pci_set_drvdata(instance->pdev, instance); instance->instancet->disable_intr(instance); if (instance->msix_vectors) for (i = 0 ; i < instance->msix_vectors; i++) free_irq(instance->msixentry[i].vector, &instance->irq_context[i]); else free_irq(instance->pdev->irq, &instance->irq_context[0]); if (instance->msix_vectors) pci_disable_msix(instance->pdev); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } /** * megasas_resume- driver resume entry point * @pdev: PCI device structure */ static int megasas_resume(struct pci_dev *pdev) { int rval, i, j; struct Scsi_Host *host; struct megasas_instance *instance; instance = pci_get_drvdata(pdev); host = instance->host; pci_set_power_state(pdev, PCI_D0); pci_enable_wake(pdev, PCI_D0, 0); pci_restore_state(pdev); /* * PCI prepping: enable device set bus mastering and dma mask */ rval = pci_enable_device_mem(pdev); if (rval) { printk(KERN_ERR "megasas: Enable device failed\n"); return rval; } pci_set_master(pdev); if (megasas_set_dma_mask(pdev)) goto fail_set_dma_mask; /* * Initialize MFI Firmware */ atomic_set(&instance->fw_outstanding, 0); /* * We expect the FW state to be READY */ if (megasas_transition_to_ready(instance, 0)) goto fail_ready_state; /* Now re-enable MSI-X */ if (instance->msix_vectors) pci_enable_msix(instance->pdev, instance->msixentry, instance->msix_vectors); switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: case PCI_DEVICE_ID_LSI_INVADER: case PCI_DEVICE_ID_LSI_FURY: { megasas_reset_reply_desc(instance); if (megasas_ioc_init_fusion(instance)) { megasas_free_cmds(instance); megasas_free_cmds_fusion(instance); goto fail_init_mfi; } if (!megasas_get_map_info(instance)) megasas_sync_map_info(instance); } break; default: *instance->producer = 0; *instance->consumer = 0; if (megasas_issue_init_mfi(instance)) goto fail_init_mfi; break; } tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); /* * Register IRQ */ if (instance->msix_vectors) { for (i = 0 ; i < instance->msix_vectors; i++) { instance->irq_context[i].instance = instance; instance->irq_context[i].MSIxIndex = i; if (request_irq(instance->msixentry[i].vector, instance->instancet->service_isr, 0, "megasas", &instance->irq_context[i])) { printk(KERN_DEBUG "megasas: Failed to " "register IRQ for vector %d.\n", i); for (j = 0 ; j < i ; j++) free_irq( instance->msixentry[j].vector, &instance->irq_context[j]); goto fail_irq; } } } else { instance->irq_context[0].instance = instance; instance->irq_context[0].MSIxIndex = 0; if (request_irq(pdev->irq, instance->instancet->service_isr, IRQF_SHARED, "megasas", &instance->irq_context[0])) { printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); goto fail_irq; } } instance->instancet->enable_intr(instance); instance->unload = 0; /* * Initiate AEN (Asynchronous Event Notification) */ if (megasas_start_aen(instance)) printk(KERN_ERR "megasas: Start AEN failed\n"); return 0; fail_irq: fail_init_mfi: if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); if (instance->consumer) pci_free_consistent(pdev, sizeof(u32), instance->consumer, instance->consumer_h); scsi_host_put(host); fail_set_dma_mask: fail_ready_state: pci_disable_device(pdev); return -ENODEV; } #else #define megasas_suspend NULL #define megasas_resume NULL #endif /** * megasas_detach_one - PCI hot"un"plug entry point * @pdev: PCI device structure */ static void megasas_detach_one(struct pci_dev *pdev) { int i; struct Scsi_Host *host; struct megasas_instance *instance; struct fusion_context *fusion; instance = pci_get_drvdata(pdev); instance->unload = 1; host = instance->host; fusion = instance->ctrl_context; scsi_remove_host(instance->host); megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); /* cancel the delayed work if this work still in queue*/ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; cancel_delayed_work_sync(&ev->hotplug_work); instance->ev = NULL; } tasklet_kill(&instance->isr_tasklet); /* * Take the instance off the instance array. Note that we will not * decrement the max_index. We let this array be sparse array */ for (i = 0; i < megasas_mgmt_info.max_index; i++) { if (megasas_mgmt_info.instance[i] == instance) { megasas_mgmt_info.count--; megasas_mgmt_info.instance[i] = NULL; break; } } pci_set_drvdata(instance->pdev, NULL); instance->instancet->disable_intr(instance); if (instance->msix_vectors) for (i = 0 ; i < instance->msix_vectors; i++) free_irq(instance->msixentry[i].vector, &instance->irq_context[i]); else free_irq(instance->pdev->irq, &instance->irq_context[0]); if (instance->msix_vectors) pci_disable_msix(instance->pdev); switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: case PCI_DEVICE_ID_LSI_INVADER: case PCI_DEVICE_ID_LSI_FURY: megasas_release_fusion(instance); for (i = 0; i < 2 ; i++) if (fusion->ld_map[i]) dma_free_coherent(&instance->pdev->dev, fusion->map_sz, fusion->ld_map[i], fusion-> ld_map_phys[i]); kfree(instance->ctrl_context); break; default: megasas_release_mfi(instance); pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); pci_free_consistent(pdev, sizeof(u32), instance->consumer, instance->consumer_h); break; } if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); scsi_host_put(host); pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); return; } /** * megasas_shutdown - Shutdown entry point * @device: Generic device structure */ static void megasas_shutdown(struct pci_dev *pdev) { int i; struct megasas_instance *instance = pci_get_drvdata(pdev); instance->unload = 1; megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); instance->instancet->disable_intr(instance); if (instance->msix_vectors) for (i = 0 ; i < instance->msix_vectors; i++) free_irq(instance->msixentry[i].vector, &instance->irq_context[i]); else free_irq(instance->pdev->irq, &instance->irq_context[0]); if (instance->msix_vectors) pci_disable_msix(instance->pdev); } /** * megasas_mgmt_open - char node "open" entry point */ static int megasas_mgmt_open(struct inode *inode, struct file *filep) { /* * Allow only those users with admin rights */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; return 0; } /** * megasas_mgmt_fasync - Async notifier registration from applications * * This function adds the calling process to a driver global queue. When an * event occurs, SIGIO will be sent to all processes in this queue. */ static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) { int rc; mutex_lock(&megasas_async_queue_mutex); rc = fasync_helper(fd, filep, mode, &megasas_async_queue); mutex_unlock(&megasas_async_queue_mutex); if (rc >= 0) { /* For sanity check when we get ioctl */ filep->private_data = filep; return 0; } printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); return rc; } /** * megasas_mgmt_poll - char node "poll" entry point * */ static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait) { unsigned int mask; unsigned long flags; poll_wait(file, &megasas_poll_wait, wait); spin_lock_irqsave(&poll_aen_lock, flags); if (megasas_poll_wait_aen) mask = (POLLIN | POLLRDNORM); else mask = 0; spin_unlock_irqrestore(&poll_aen_lock, flags); return mask; } /** * megasas_mgmt_fw_ioctl - Issues management ioctls to FW * @instance: Adapter soft state * @argp: User's ioctl packet */ static int megasas_mgmt_fw_ioctl(struct megasas_instance *instance, struct megasas_iocpacket __user * user_ioc, struct megasas_iocpacket *ioc) { struct megasas_sge32 *kern_sge32; struct megasas_cmd *cmd; void *kbuff_arr[MAX_IOCTL_SGE]; dma_addr_t buf_handle = 0; int error = 0, i; void *sense = NULL; dma_addr_t sense_handle; unsigned long *sense_ptr; memset(kbuff_arr, 0, sizeof(kbuff_arr)); if (ioc->sge_count > MAX_IOCTL_SGE) { printk(KERN_DEBUG "megasas: SGE count [%d] > max limit [%d]\n", ioc->sge_count, MAX_IOCTL_SGE); return -EINVAL; } cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n"); return -ENOMEM; } /* * User's IOCTL packet has 2 frames (maximum). Copy those two * frames into our cmd's frames. cmd->frame's context will get * overwritten when we copy from user's frames. So set that value * alone separately */ memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); cmd->frame->hdr.context = cpu_to_le32(cmd->index); cmd->frame->hdr.pad_0 = 0; cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 | MFI_FRAME_SENSE64)); /* * The management interface between applications and the fw uses * MFI frames. E.g, RAID configuration changes, LD property changes * etc are accomplishes through different kinds of MFI frames. The * driver needs to care only about substituting user buffers with * kernel buffers in SGLs. The location of SGL is embedded in the * struct iocpacket itself. */ kern_sge32 = (struct megasas_sge32 *) ((unsigned long)cmd->frame + ioc->sgl_off); /* * For each user buffer, create a mirror buffer and copy in */ for (i = 0; i < ioc->sge_count; i++) { if (!ioc->sgl[i].iov_len) continue; kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, ioc->sgl[i].iov_len, &buf_handle, GFP_KERNEL); if (!kbuff_arr[i]) { printk(KERN_DEBUG "megasas: Failed to alloc " "kernel SGL buffer for IOCTL \n"); error = -ENOMEM; goto out; } /* * We don't change the dma_coherent_mask, so * pci_alloc_consistent only returns 32bit addresses */ kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); /* * We created a kernel buffer corresponding to the * user buffer. Now copy in from the user buffer */ if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, (u32) (ioc->sgl[i].iov_len))) { error = -EFAULT; goto out; } } if (ioc->sense_len) { sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, &sense_handle, GFP_KERNEL); if (!sense) { error = -ENOMEM; goto out; } sense_ptr = (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); *sense_ptr = cpu_to_le32(sense_handle); } /* * Set the sync_cmd flag so that the ISR knows not to complete this * cmd to the SCSI mid-layer */ cmd->sync_cmd = 1; megasas_issue_blocked_cmd(instance, cmd); cmd->sync_cmd = 0; /* * copy out the kernel buffers to user buffers */ for (i = 0; i < ioc->sge_count; i++) { if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], ioc->sgl[i].iov_len)) { error = -EFAULT; goto out; } } /* * copy out the sense */ if (ioc->sense_len) { /* * sense_ptr points to the location that has the user * sense buffer address */ sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + ioc->sense_off); if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), sense, ioc->sense_len)) { printk(KERN_ERR "megasas: Failed to copy out to user " "sense data\n"); error = -EFAULT; goto out; } } /* * copy the status codes returned by the fw */ if (copy_to_user(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status, sizeof(u8))) { printk(KERN_DEBUG "megasas: Error copying out cmd_status\n"); error = -EFAULT; } out: if (sense) { dma_free_coherent(&instance->pdev->dev, ioc->sense_len, sense, sense_handle); } for (i = 0; i < ioc->sge_count; i++) { if (kbuff_arr[i]) dma_free_coherent(&instance->pdev->dev, le32_to_cpu(kern_sge32[i].length), kbuff_arr[i], le32_to_cpu(kern_sge32[i].phys_addr)); } megasas_return_cmd(instance, cmd); return error; } static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) { struct megasas_iocpacket __user *user_ioc = (struct megasas_iocpacket __user *)arg; struct megasas_iocpacket *ioc; struct megasas_instance *instance; int error; int i; unsigned long flags; u32 wait_time = MEGASAS_RESET_WAIT_TIME; ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) return -ENOMEM; if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) { error = -EFAULT; goto out_kfree_ioc; } instance = megasas_lookup_instance(ioc->host_no); if (!instance) { error = -ENODEV; goto out_kfree_ioc; } if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { printk(KERN_ERR "Controller in crit error\n"); error = -ENODEV; goto out_kfree_ioc; } if (instance->unload == 1) { error = -ENODEV; goto out_kfree_ioc; } /* * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds */ if (down_interruptible(&instance->ioctl_sem)) { error = -ERESTARTSYS; goto out_kfree_ioc; } for (i = 0; i < wait_time; i++) { spin_lock_irqsave(&instance->hba_lock, flags); if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); break; } spin_unlock_irqrestore(&instance->hba_lock, flags); if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { printk(KERN_NOTICE "megasas: waiting" "for controller reset to finish\n"); } msleep(1000); } spin_lock_irqsave(&instance->hba_lock, flags); if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); printk(KERN_ERR "megaraid_sas: timed out while" "waiting for HBA to recover\n"); error = -ENODEV; goto out_up; } spin_unlock_irqrestore(&instance->hba_lock, flags); error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); out_up: up(&instance->ioctl_sem); out_kfree_ioc: kfree(ioc); return error; } static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) { struct megasas_instance *instance; struct megasas_aen aen; int error; int i; unsigned long flags; u32 wait_time = MEGASAS_RESET_WAIT_TIME; if (file->private_data != file) { printk(KERN_DEBUG "megasas: fasync_helper was not " "called first\n"); return -EINVAL; } if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) return -EFAULT; instance = megasas_lookup_instance(aen.host_no); if (!instance) return -ENODEV; if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { return -ENODEV; } if (instance->unload == 1) { return -ENODEV; } for (i = 0; i < wait_time; i++) { spin_lock_irqsave(&instance->hba_lock, flags); if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); break; } spin_unlock_irqrestore(&instance->hba_lock, flags); if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { printk(KERN_NOTICE "megasas: waiting for" "controller reset to finish\n"); } msleep(1000); } spin_lock_irqsave(&instance->hba_lock, flags); if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); printk(KERN_ERR "megaraid_sas: timed out while waiting" "for HBA to recover.\n"); return -ENODEV; } spin_unlock_irqrestore(&instance->hba_lock, flags); mutex_lock(&instance->aen_mutex); error = megasas_register_aen(instance, aen.seq_num, aen.class_locale_word); mutex_unlock(&instance->aen_mutex); return error; } /** * megasas_mgmt_ioctl - char node ioctl entry point */ static long megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case MEGASAS_IOC_FIRMWARE: return megasas_mgmt_ioctl_fw(file, arg); case MEGASAS_IOC_GET_AEN: return megasas_mgmt_ioctl_aen(file, arg); } return -ENOTTY; } #ifdef CONFIG_COMPAT static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) { struct compat_megasas_iocpacket __user *cioc = (struct compat_megasas_iocpacket __user *)arg; struct megasas_iocpacket __user *ioc = compat_alloc_user_space(sizeof(struct megasas_iocpacket)); int i; int error = 0; compat_uptr_t ptr; if (clear_user(ioc, sizeof(*ioc))) return -EFAULT; if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) || copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) || copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) || copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) || copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) || copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) return -EFAULT; /* * The sense_ptr is used in megasas_mgmt_fw_ioctl only when * sense_len is not null, so prepare the 64bit value under * the same condition. */ if (ioc->sense_len) { void __user **sense_ioc_ptr = (void __user **)(ioc->frame.raw + ioc->sense_off); compat_uptr_t *sense_cioc_ptr = (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off); if (get_user(ptr, sense_cioc_ptr) || put_user(compat_ptr(ptr), sense_ioc_ptr)) return -EFAULT; } for (i = 0; i < MAX_IOCTL_SGE; i++) { if (get_user(ptr, &cioc->sgl[i].iov_base) || put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || copy_in_user(&ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len, sizeof(compat_size_t))) return -EFAULT; } error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc); if (copy_in_user(&cioc->frame.hdr.cmd_status, &ioc->frame.hdr.cmd_status, sizeof(u8))) { printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n"); return -EFAULT; } return error; } static long megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case MEGASAS_IOC_FIRMWARE32: return megasas_mgmt_compat_ioctl_fw(file, arg); case MEGASAS_IOC_GET_AEN: return megasas_mgmt_ioctl_aen(file, arg); } return -ENOTTY; } #endif /* * File operations structure for management interface */ static const struct file_operations megasas_mgmt_fops = { .owner = THIS_MODULE, .open = megasas_mgmt_open, .fasync = megasas_mgmt_fasync, .unlocked_ioctl = megasas_mgmt_ioctl, .poll = megasas_mgmt_poll, #ifdef CONFIG_COMPAT .compat_ioctl = megasas_mgmt_compat_ioctl, #endif .llseek = noop_llseek, }; /* * PCI hotplug support registration structure */ static struct pci_driver megasas_pci_driver = { .name = "megaraid_sas", .id_table = megasas_pci_table, .probe = megasas_probe_one, .remove = megasas_detach_one, .suspend = megasas_suspend, .resume = megasas_resume, .shutdown = megasas_shutdown, }; /* * Sysfs driver attributes */ static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf) { return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", MEGASAS_VERSION); } static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL); static ssize_t megasas_sysfs_show_release_date(struct device_driver *dd, char *buf) { return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", MEGASAS_RELDATE); } static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL); static ssize_t megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", support_poll_for_event); } static DRIVER_ATTR(support_poll_for_event, S_IRUGO, megasas_sysfs_show_support_poll_for_event, NULL); static ssize_t megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", support_device_change); } static DRIVER_ATTR(support_device_change, S_IRUGO, megasas_sysfs_show_support_device_change, NULL); static ssize_t megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", megasas_dbg_lvl); } static ssize_t megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count) { int retval = count; if(sscanf(buf,"%u",&megasas_dbg_lvl)<1){ printk(KERN_ERR "megasas: could not set dbg_lvl\n"); retval = -EINVAL; } return retval; } static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, megasas_sysfs_set_dbg_lvl); static void megasas_aen_polling(struct work_struct *work) { struct megasas_aen_event *ev = container_of(work, struct megasas_aen_event, hotplug_work.work); struct megasas_instance *instance = ev->instance; union megasas_evt_class_locale class_locale; struct Scsi_Host *host; struct scsi_device *sdev1; u16 pd_index = 0; u16 ld_index = 0; int i, j, doscan = 0; u32 seq_num; int error; if (!instance) { printk(KERN_ERR "invalid instance!\n"); kfree(ev); return; } instance->ev = NULL; host = instance->host; if (instance->evt_detail) { switch (le32_to_cpu(instance->evt_detail->code)) { case MR_EVT_PD_INSERTED: if (megasas_get_pd_list(instance) == 0) { for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { pd_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, i, j, 0); if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { if (!sdev1) { scsi_add_device(host, i, j, 0); } if (sdev1) scsi_device_put(sdev1); } } } } doscan = 0; break; case MR_EVT_PD_REMOVED: if (megasas_get_pd_list(instance) == 0) { for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { pd_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, i, j, 0); if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { if (sdev1) { scsi_device_put(sdev1); } } else { if (sdev1) { scsi_remove_device(sdev1); scsi_device_put(sdev1); } } } } } doscan = 0; break; case MR_EVT_LD_OFFLINE: case MR_EVT_CFG_CLEARED: case MR_EVT_LD_DELETED: if (megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) megasas_get_ld_list(instance); for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); if (instance->ld_ids[ld_index] != 0xff) { if (sdev1) { scsi_device_put(sdev1); } } else { if (sdev1) { scsi_remove_device(sdev1); scsi_device_put(sdev1); } } } } doscan = 0; break; case MR_EVT_LD_CREATED: if (megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) megasas_get_ld_list(instance); for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); if (instance->ld_ids[ld_index] != 0xff) { if (!sdev1) { scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); } } if (sdev1) { scsi_device_put(sdev1); } } } doscan = 0; break; case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: case MR_EVT_FOREIGN_CFG_IMPORTED: case MR_EVT_LD_STATE_CHANGE: doscan = 1; break; default: doscan = 0; break; } } else { printk(KERN_ERR "invalid evt_detail!\n"); kfree(ev); return; } if (doscan) { printk(KERN_INFO "scanning ...\n"); megasas_get_pd_list(instance); for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; sdev1 = scsi_device_lookup(host, i, j, 0); if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { if (!sdev1) { scsi_add_device(host, i, j, 0); } if (sdev1) scsi_device_put(sdev1); } else { if (sdev1) { scsi_remove_device(sdev1); scsi_device_put(sdev1); } } } } if (megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) megasas_get_ld_list(instance); for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); if (instance->ld_ids[ld_index] != 0xff) { if (!sdev1) { scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); } else { scsi_device_put(sdev1); } } else { if (sdev1) { scsi_remove_device(sdev1); scsi_device_put(sdev1); } } } } } if ( instance->aen_cmd != NULL ) { kfree(ev); return ; } seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; mutex_lock(&instance->aen_mutex); error = megasas_register_aen(instance, seq_num, class_locale.word); mutex_unlock(&instance->aen_mutex); if (error) printk(KERN_ERR "register aen failed error %x\n", error); kfree(ev); } /** * megasas_init - Driver load entry point */ static int __init megasas_init(void) { int rval; /* * Announce driver version and other information */ printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, MEGASAS_EXT_VERSION); spin_lock_init(&poll_aen_lock); support_poll_for_event = 2; support_device_change = 1; memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); /* * Register character device node */ rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); if (rval < 0) { printk(KERN_DEBUG "megasas: failed to open device node\n"); return rval; } megasas_mgmt_majorno = rval; /* * Register ourselves as PCI hotplug module */ rval = pci_register_driver(&megasas_pci_driver); if (rval) { printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n"); goto err_pcidrv; } rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_version); if (rval) goto err_dcf_attr_ver; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_release_date); if (rval) goto err_dcf_rel_date; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); if (rval) goto err_dcf_support_poll_for_event; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); if (rval) goto err_dcf_dbg_lvl; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_support_device_change); if (rval) goto err_dcf_support_device_change; return rval; err_dcf_support_device_change: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); err_dcf_dbg_lvl: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); err_dcf_support_poll_for_event: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_release_date); err_dcf_rel_date: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); err_dcf_attr_ver: pci_unregister_driver(&megasas_pci_driver); err_pcidrv: unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); return rval; } /** * megasas_exit - Driver unload entry point */ static void __exit megasas_exit(void) { driver_remove_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_device_change); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_release_date); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); pci_unregister_driver(&megasas_pci_driver); unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); } module_init(megasas_init); module_exit(megasas_exit);
gpl-2.0
tectronics/force-distribution-analysis.gromacs-4-5-3
src/gmxlib/gmx_lapack/dgebd2.c
6
2053
#include "gmx_lapack.h" void F77_FUNC(dgebd2,DGEBD2)(int *m, int *n, double *a, int *lda, double *d, double *e, double *tauq, double *taup, double *work, int *info) { int i,i1,i2,i3; *info = 0; if(*m>=*n) { /* reduce to upper bidiag. form */ for(i=0;i<*n;i++) { i1 = *m - i; i2 = ( (i+1) < (*m-1)) ? (i+1) : (*m-1); i3 = 1; F77_FUNC(dlarfg,DLARFG)(&i1,&(a[i*(*lda)+i]),&(a[i*(*lda)+i2]),&i3,&(tauq[i])); d[i] = a[i*(*lda)+i]; a[i*(*lda)+i] = 1.0; i2 = *n - i - 1; F77_FUNC(dlarf,DLARF)("L",&i1,&i2,&(a[i*(*lda)+i]),&i3,&(tauq[i]),&(a[(i+1)*(*lda)+i]),lda,work); a[i*(*lda)+i] = d[i]; if(i<(*n-1)) { i1 = *n - i -1; i2 = ( (i+2) < (*n-1)) ? (i+2) : (*n-1); F77_FUNC(dlarfg,DLARFG)(&i1,&(a[(i+1)*(*lda)+i]),&(a[i2*(*lda)+i]),lda,&(taup[i])); e[i] = a[(i+1)*(*lda)+i]; a[(i+1)*(*lda)+i] = 1.0; i1 = *m - i - 1; i2 = *n - i - 1; F77_FUNC(dlarf,DLARF)("R",&i1,&i2,&(a[(i+1)*(*lda)+i]),lda,&(taup[i]),&(a[(i+1)*(*lda)+i+1]),lda,work); a[(i+1)*(*lda)+i] = e[i]; } else taup[i] = 0.0; } } else { /* reduce to lower bidiag. form */ for(i=0;i<*m;i++) { i1 = *n - i; i2 = ( (i+1) < (*n-1)) ? (i+1) : (*n-1); i3 = 1; F77_FUNC(dlarfg,DLARFG)(&i1,&(a[i*(*lda)+i]),&(a[i2*(*lda)+i]),lda,&(taup[i])); d[i] = a[i*(*lda)+i]; a[i*(*lda)+i] = 1.0; i2 = *m - i - 1; i3 = ( (i+1) < (*m-1)) ? (i+1) : (*m-1); F77_FUNC(dlarf,DLARF)("R",&i2,&i1,&(a[i*(*lda)+i]),lda,&(taup[i]),&(a[(i)*(*lda)+i3]),lda,work); a[i*(*lda)+i] = d[i]; if(i<(*m-1)) { i1 = *m - i - 1; i2 = ( (i+2) < (*m-1)) ? (i+2) : (*m-1); i3 = 1; F77_FUNC(dlarfg,DLARFG)(&i1,&(a[(i)*(*lda)+i+1]),&(a[i*(*lda)+i2]),&i3,&(tauq[i])); e[i] = a[(i)*(*lda)+i+1]; a[(i)*(*lda)+i+1] = 1.0; i1 = *m - i - 1; i2 = *n - i - 1; i3 = 1; F77_FUNC(dlarf,DLARF)("L",&i1,&i2,&(a[(i)*(*lda)+i+1]),&i3,&(tauq[i]),&(a[(i+1)*(*lda)+i+1]),lda,work); a[(i)*(*lda)+i+1] = e[i]; } else tauq[i] = 0.0; } } return; }
gpl-2.0
teamfx/openjfx-9-dev-rt
modules/javafx.web/src/main/native/Source/WebCore/platform/MainThreadSharedTimer.cpp
6
1917
/* * Copyright (C) 2015 Igalia S.L. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include "MainThreadSharedTimer.h" namespace WebCore { MainThreadSharedTimer& MainThreadSharedTimer::singleton() { static NeverDestroyed<MainThreadSharedTimer> instance; return instance; } #if !PLATFORM(GTK) MainThreadSharedTimer::MainThreadSharedTimer() { } #endif void MainThreadSharedTimer::setFiredFunction(std::function<void()>&& firedFunction) { RELEASE_ASSERT(!m_firedFunction || !firedFunction); m_firedFunction = WTFMove(firedFunction); } void MainThreadSharedTimer::fired() { ASSERT(m_firedFunction); m_firedFunction(); } } // namespace WebCore
gpl-2.0