code_text
stringlengths
604
999k
repo_name
stringlengths
4
100
file_path
stringlengths
4
873
language
stringclasses
23 values
license
stringclasses
15 values
size
int32
1.02k
999k
/* Copyright (C) 2004 - 2008 Versant Inc. http://www.db4o.com This file is part of the sharpen open source java to c# translator. sharpen is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation and as clarified by db4objects' GPL interpretation policy, available at http://www.db4o.com/about/company/legalpolicies/gplinterpretation/ Alternatively you can write to db4objects, Inc., 1900 S Norfolk Street, Suite 350, San Mateo, CA 94403, USA. sharpen is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ package sharpen.core.csharp.ast; public class CSDeclarationStatement extends CSStatement { private CSVariableDeclaration _declaration; public CSDeclarationStatement(int startPosition, CSVariableDeclaration declaration) { super(startPosition); _declaration = declaration; } public void accept(CSVisitor visitor) { visitor.visit(this); } public CSVariableDeclaration declaration() { return _declaration; } }
ydanila/sharpen_imazen
src/main/sharpen/core/csharp/ast/CSDeclarationStatement.java
Java
gpl-2.0
1,422
/* * arch/arm/mach-tegra/tegra_simon.c * * Copyright (c) 2013-2015, NVIDIA CORPORATION. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/hrtimer.h> #include <linux/delay.h> #include <linux/thermal.h> #include <linux/regulator/consumer.h> #include "tegra_simon.h" #include <linux/platform/tegra/clock.h> #include <linux/platform/tegra/dvfs.h> #include "pm.h" #include <linux/platform/tegra/tegra_cl_dvfs.h> static DEFINE_MUTEX(simon_lock); static RAW_NOTIFIER_HEAD(simon_nh); static void tegra_simon_grade_notify(struct work_struct *work); static u32 grading_sec = TEGRA_SIMON_GRADING_INTERVAL_SEC; static u32 timeout_sec = TEGRA_SIMON_GRADING_TIMEOUT_SEC; static struct tegra_simon_grader simon_graders[TEGRA_SIMON_DOMAIN_NUM] = { [TEGRA_SIMON_DOMAIN_CPU] = { .domain_name = "cpu", .domain = TEGRA_SIMON_DOMAIN_CPU, }, [TEGRA_SIMON_DOMAIN_GPU] = { .domain_name = "gpu", .domain = TEGRA_SIMON_DOMAIN_GPU, }, }; static void settle_delay(struct tegra_simon_grader *grader) { int us = grader->desc->settle_us; if (us < MAX_UDELAY_MS * 1000) udelay(us); else usleep_range(us, us + 100); } static inline void mod_wdt_on_grade(struct tegra_simon_grader *grader) { if (grader->grade) { /* restart WDT while at high grade */ struct timespec ts = {timeout_sec, 0}; mod_timer(&grader->grade_wdt, jiffies + timespec_to_jiffies(&ts)); } } static void tegra_simon_reset_grade(unsigned long data) { unsigned long flags; struct tegra_simon_grader *grader = (struct tegra_simon_grader *)data; pr_info("%s: %s grade = 0\n", __func__, grader->domain_name); spin_lock_irqsave(&grader->grade_lock, flags); grader->grade = 0; spin_unlock_irqrestore(&grader->grade_lock, flags); schedule_work(&grader->grade_update_work); } static inline void mod_grading_timer_on_grade(struct tegra_simon_grader *grader) { if (grader->grade) { /* restart timer while at high grade */ struct timespec ts = {grading_sec, 0}; mod_timer(&grader->grade_timer, jiffies + timespec_to_jiffies(&ts)); } } static void tegra_simon_restart_grader(unsigned long data) { unsigned long flags; struct tegra_simon_grader *grader = (struct tegra_simon_grader *)data; spin_lock_irqsave(&grader->grade_lock, flags); if (grader->grade) { /* restart grading while at high grade */ grader->stop_grading = false; pr_info("%s: %s grader restarted\n", __func__, grader->domain_name); } spin_unlock_irqrestore(&grader->grade_lock, flags); } static void tegra_simon_grade_set(struct tegra_simon_grader *grader, int grade) { unsigned long flags; spin_lock_irqsave(&grader->grade_lock, flags); /* once grading is successful, stop grading, and restart timers */ grader->stop_grading = true; if (grader->grade == grade) { mod_wdt_on_grade(grader); mod_grading_timer_on_grade(grader); spin_unlock_irqrestore(&grader->grade_lock, flags); return; } grader->grade = grade; mod_wdt_on_grade(grader); mod_grading_timer_on_grade(grader); spin_unlock_irqrestore(&grader->grade_lock, flags); schedule_work(&grader->grade_update_work); } /* * GPU grading is implemented within vdd_gpu post-change notification chain that * guarantees constant voltage during grading. First grading after boot can be * executed anytime set voltage is below specified threshold, next grading is * always separated by the grading interval from the last successful grading. */ static int tegra_simon_gpu_grading_cb( struct notifier_block *nb, unsigned long event, void *v) { int mv = (int)((long)v); struct tegra_simon_grader *grader = container_of( nb, struct tegra_simon_grader, grading_condition_nb); unsigned long t; int grade = 0; if (!(event & REGULATOR_EVENT_OUT_POSTCHANGE)) return NOTIFY_DONE; if (grader->stop_grading) return NOTIFY_OK; mv = (mv > 0) ? mv / 1000 : mv; if ((mv <= 0) || (mv > grader->desc->grading_mv_max)) return NOTIFY_OK; if (grader->tzd->ops->get_temp(grader->tzd, &t)) { pr_err("%s: Failed to get %s temperature\n", __func__, grader->domain_name); return NOTIFY_OK; } if (t < grader->desc->grading_temperature_min) return NOTIFY_OK; if (grader->desc->grade_simon_domain) { settle_delay(grader); /* delay for voltage to settle */ grade = grader->desc->grade_simon_domain(grader->domain, mv, t); if (grade < 0) { pr_err("%s: Failed to grade %s\n", __func__, grader->domain_name); return NOTIFY_OK; } } grader->last_grading = ktime_get(); tegra_simon_grade_set(grader, grade); pr_info("%s: graded %s: v = %d, t = %lu, grade = %d\n", __func__, grader->domain_name, mv, t, grade); return NOTIFY_OK; } static int __init tegra_simon_init_gpu(void) { int ret; struct tegra_simon_grader *grader = &simon_graders[TEGRA_SIMON_DOMAIN_GPU]; spin_lock_init(&grader->grade_lock); setup_timer(&grader->grade_timer, tegra_simon_restart_grader, (unsigned long)grader); setup_timer(&grader->grade_wdt, tegra_simon_reset_grade, (unsigned long)grader); INIT_WORK(&grader->grade_update_work, tegra_simon_grade_notify); grader->tzd = thermal_zone_get_zone_by_name("GPU-therm"); if (IS_ERR(grader->tzd)) { pr_err("%s: Failed to find %s thermal zone\n", __func__, grader->domain_name); grader->tzd = NULL; return -ENOENT; } grader->grading_condition_nb.notifier_call = tegra_simon_gpu_grading_cb; ret = tegra_dvfs_rail_register_notifier( tegra_gpu_rail, &grader->grading_condition_nb); if (ret) { pr_err("%s: Failed to register %s dvfs rail notifier\n", __func__, grader->domain_name); return ret; } return 0; } /* * CPU grading is implemented within CPU rate post-change notification chain * that guarantees constant frequency during grading. Grading is executed only * when running on G-CPU, with DFLL as clock source, at rate low enough for DFLL * to be close to saturation at minimum voltage. To avoid still possible closed * loop voltage fluctuation for sure, DFLL maximum limit is clamped to minimum * during measurements. * * First grading after boot can be executed anytime the conditions above are * met, next grading is always separated by the grading interval from the last * successful grading. */ static int tegra_simon_cpu_grading_cb( struct notifier_block *nb, unsigned long rate, void *v) { struct tegra_simon_grader *grader = container_of( nb, struct tegra_simon_grader, grading_condition_nb); struct clk *dfll_clk; unsigned long t; int mv; int grade = 0; if (grader->stop_grading) return NOTIFY_OK; if (is_lp_cluster() || (rate > grader->desc->grading_rate_max) || !tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail)) return NOTIFY_OK; if (grader->tzd->ops->get_temp(grader->tzd, &t)) { pr_err("%s: Failed to get %s temperature\n", __func__, grader->domain_name); return NOTIFY_OK; } if (t < grader->desc->grading_temperature_min) return NOTIFY_OK; dfll_clk = clk_get_parent(clk_get_parent(grader->clk)); mv = tegra_dvfs_clamp_dfll_at_vmin(dfll_clk, true); if (mv < 0) { pr_err("%s: Failed to clamp %s voltage\n", __func__, grader->domain_name); return NOTIFY_OK; } if (grader->desc->grade_simon_domain) { settle_delay(grader); /* delay for voltage to settle */ grade = grader->desc->grade_simon_domain(grader->domain, mv, t); if (grade < 0) { pr_err("%s: Failed to grade %s\n", __func__, grader->domain_name); tegra_dvfs_clamp_dfll_at_vmin(dfll_clk, false); return NOTIFY_OK; } } tegra_dvfs_clamp_dfll_at_vmin(dfll_clk, false); grader->last_grading = ktime_get(); tegra_simon_grade_set(grader, grade); pr_info("%s: graded %s: v = %d, t = %lu, grade = %d\n", __func__, grader->domain_name, mv, t, grade); return NOTIFY_OK; } static int __init tegra_simon_init_cpu(void) { struct tegra_simon_grader *grader = &simon_graders[TEGRA_SIMON_DOMAIN_CPU]; struct clk *c; int r; spin_lock_init(&grader->grade_lock); setup_timer(&grader->grade_timer, tegra_simon_restart_grader, (unsigned long)grader); setup_timer(&grader->grade_wdt, tegra_simon_reset_grade, (unsigned long)grader); INIT_WORK(&grader->grade_update_work, tegra_simon_grade_notify); grader->tzd = thermal_zone_get_zone_by_name("CPU-therm"); if (IS_ERR(grader->tzd)) { pr_err("%s: Failed to find %s thermal zone\n", __func__, grader->domain_name); return -ENOENT; } c = clk_get_sys("tegra_simon", "cpu"); if (IS_ERR(c)) { pr_err("%s: Failed to get %s clock\n", __func__, grader->domain_name); return -ENOENT; } grader->grading_condition_nb.notifier_call = tegra_simon_cpu_grading_cb; r = tegra_register_clk_rate_notifier(c, &grader->grading_condition_nb); if (r) { pr_err("%s: Failed to register for %s rate change notify\n", __func__, c->name); return r; } grader->clk = c; return 0; } /* * Interface for grader driver to add grader description */ int tegra_simon_add_grader(struct tegra_simon_grader_desc *desc) { int ret; struct tegra_simon_grader *grader; if (!desc || (desc->domain >= TEGRA_SIMON_DOMAIN_NUM)) { pr_err("%s: Invalid grader data\n", __func__); return -EINVAL; } grader = &simon_graders[desc->domain]; if (grader->desc) { pr_err("%s: Duplicate grader for %s\n", __func__, grader->domain_name); return -EINVAL; } set_mb(grader->desc, desc); switch (grader->domain) { case TEGRA_SIMON_DOMAIN_CPU: ret = tegra_simon_init_cpu(); break; case TEGRA_SIMON_DOMAIN_GPU: ret = tegra_simon_init_gpu(); break; default: pr_err("%s: Grader for %s is not supported\n", __func__, grader->domain_name); return -EINVAL; } if (ret) { grader->desc = NULL; pr_err("%s: Failed to initialize grader for %s\n", __func__, grader->domain_name); return ret; } return 0; } /* * Grade notification chain */ int tegra_register_simon_notifier(struct notifier_block *nb) { int ret; mutex_lock(&simon_lock); ret = raw_notifier_chain_register(&simon_nh, nb); mutex_unlock(&simon_lock); return ret; } void tegra_unregister_simon_notifier(struct notifier_block *nb) { mutex_lock(&simon_lock); raw_notifier_chain_unregister(&simon_nh, nb); mutex_unlock(&simon_lock); } static void grade_notify(struct tegra_simon_grader *grader) { mutex_lock(&simon_lock); raw_notifier_call_chain(&simon_nh, grader->grade, (void *)((long)grader->domain)); mutex_unlock(&simon_lock); } static void tegra_simon_grade_notify(struct work_struct *work) { struct tegra_simon_grader *grader = container_of( work, struct tegra_simon_grader, grade_update_work); grade_notify(grader); } #ifdef CONFIG_DEBUG_FS static int grade_get(void *data, u64 *val) { struct tegra_simon_grader *grader = data; if (grader->domain >= TEGRA_SIMON_DOMAIN_NUM) { *val = -EINVAL; return -EINVAL; } *val = grader->grade; return 0; } static int grade_set(void *data, u64 val) { int grade = (int)val; struct tegra_simon_grader *grader = data; if (grader->domain >= TEGRA_SIMON_DOMAIN_NUM) return -EINVAL; if (!grader->desc && (grader->grade != grade)) { grader->grade = grade; grade_notify(grader); } else if (grader->desc) { tegra_simon_grade_set(grader, grade); } return 0; } DEFINE_SIMPLE_ATTRIBUTE(grade_fops, grade_get, grade_set, "%llu\n"); static int __init simon_debugfs_init_domain(struct dentry *dir, struct tegra_simon_grader *grader) { struct dentry *d; d = debugfs_create_dir(grader->domain_name, dir); if (!d) return -ENOMEM; if (!debugfs_create_file("grade", S_IWUSR | S_IRUGO, d, (void *)grader, &grade_fops)) return -ENOMEM; if (!debugfs_create_bool("grading_stopped", S_IRUGO, d, (u32 *)&grader->stop_grading)) return -ENOMEM; return 0; } static int __init simon_debugfs_init(void) { int i; struct tegra_simon_grader *grader; struct dentry *dir; dir = debugfs_create_dir("tegra_simon", NULL); if (!dir) return -ENOMEM; if (!debugfs_create_u32("grading_sec", S_IWUSR | S_IRUGO, dir, &grading_sec)) goto err_out; if (!debugfs_create_u32("timeout_sec", S_IWUSR | S_IRUGO, dir, &timeout_sec)) goto err_out; for (i = 0; i < TEGRA_SIMON_DOMAIN_NUM; i++) { grader = &simon_graders[i]; if (!grader->domain_name) continue; if (simon_debugfs_init_domain(dir, grader)) goto err_out; } return 0; err_out: debugfs_remove_recursive(dir); return -ENOMEM; } late_initcall(simon_debugfs_init); #endif /* FIXME: Add fake graders - to be removed when actual graders are implemnted */ #ifdef CONFIG_ARCH_TEGRA_12x_SOC #ifndef CONFIG_ARCH_TEGRA_13x_SOC static int fake_grader(int domain, int mv, int temperature) { return 0; /* safe low grade */ } static struct tegra_simon_grader_desc gpu_grader_desc = { .domain = TEGRA_SIMON_DOMAIN_GPU, .grading_mv_max = 850, .grading_temperature_min = 20000, .settle_us = 3000, .grade_simon_domain = fake_grader, }; static struct tegra_simon_grader_desc cpu_grader_desc = { .domain = TEGRA_SIMON_DOMAIN_CPU, .grading_rate_max = 850000000, .grading_temperature_min = 20000, .settle_us = 3000, .grade_simon_domain = fake_grader, }; static int __init tegra_simon_add_graders(void) { tegra_simon_add_grader(&gpu_grader_desc); tegra_simon_add_grader(&cpu_grader_desc); return 0; } late_initcall_sync(tegra_simon_add_graders); #endif #endif
Jetson-TX1-AndroidTV/android_kernel_shield_tv_video4linux
arch/arm/mach-tegra/tegra_simon.c
C
gpl-2.0
13,904
/***************************************************************************** * depth.c: bit-depth conversion video filter ***************************************************************************** * Copyright (C) 2010-2011 x264 project * * Authors: Oskar Arvidsson <oskar@irock.se> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. * * This program is also available under a commercial proprietary license. * For more information, contact us at licensing@x264.com. *****************************************************************************/ #include "video.h" #define NAME "depth" #define FAIL_IF_ERROR( cond, ... ) FAIL_IF_ERR( cond, NAME, __VA_ARGS__ ) cli_vid_filter_t depth_filter; typedef struct { hnd_t prev_hnd; cli_vid_filter_t prev_filter; int bit_depth; int dst_csp; cli_pic_t buffer; int16_t *error_buf; } depth_hnd_t; static int depth_filter_csp_is_supported( int csp ) { int csp_mask = csp & X264_CSP_MASK; return csp_mask == X264_CSP_I420 || csp_mask == X264_CSP_I422 || csp_mask == X264_CSP_I444 || csp_mask == X264_CSP_YV24 || csp_mask == X264_CSP_YV12 || csp_mask == X264_CSP_NV12; } static int csp_num_interleaved( int csp, int plane ) { int csp_mask = csp & X264_CSP_MASK; return ( csp_mask == X264_CSP_NV12 && plane == 1 ) ? 2 : 1; } /* The dithering algorithm is based on Sierra-2-4A error diffusion. It has been * written in such a way so that if the source has been upconverted using the * same algorithm as used in scale_image, dithering down to the source bit * depth again is lossless. */ #define DITHER_PLANE( pitch ) \ static void dither_plane_##pitch( pixel *dst, int dst_stride, uint16_t *src, int src_stride, \ int width, int height, int16_t *errors ) \ { \ const int lshift = 16-BIT_DEPTH; \ const int rshift = 2*BIT_DEPTH-16; \ const int pixel_max = (1 << BIT_DEPTH)-1; \ const int half = 1 << (16-BIT_DEPTH); \ memset( errors, 0, (width+1) * sizeof(int16_t) ); \ for( int y = 0; y < height; y++, src += src_stride, dst += dst_stride ) \ { \ int err = 0; \ for( int x = 0; x < width; x++ ) \ { \ err = err*2 + errors[x] + errors[x+1]; \ dst[x*pitch] = x264_clip3( (((src[x*pitch]+half)<<2)+err)*pixel_max >> 18, 0, pixel_max ); \ errors[x] = err = src[x*pitch] - (dst[x*pitch] << lshift) - (dst[x*pitch] >> rshift); \ } \ } \ } DITHER_PLANE( 1 ) DITHER_PLANE( 2 ) static void dither_image( cli_image_t *out, cli_image_t *img, int16_t *error_buf ) { int csp_mask = img->csp & X264_CSP_MASK; for( int i = 0; i < img->planes; i++ ) { int num_interleaved = csp_num_interleaved( img->csp, i ); int height = x264_cli_csps[csp_mask].height[i] * img->height; int width = x264_cli_csps[csp_mask].width[i] * img->width / num_interleaved; #define CALL_DITHER_PLANE( pitch, off ) \ dither_plane_##pitch( ((pixel*)out->plane[i])+off, out->stride[i]/sizeof(pixel), \ ((uint16_t*)img->plane[i])+off, img->stride[i]/2, width, height, error_buf ) if( num_interleaved == 1 ) { CALL_DITHER_PLANE( 1, 0 ); } else { CALL_DITHER_PLANE( 2, 0 ); CALL_DITHER_PLANE( 2, 1 ); } } } static void scale_image( cli_image_t *output, cli_image_t *img ) { /* this function mimics how swscale does upconversion. 8-bit is converted * to 16-bit through left shifting the orginal value with 8 and then adding * the original value to that. This effectively keeps the full color range * while also being fast. for n-bit we basically do the same thing, but we * discard the lower 16-n bits. */ int csp_mask = img->csp & X264_CSP_MASK; const int shift = 16-BIT_DEPTH; for( int i = 0; i < img->planes; i++ ) { uint8_t *src = img->plane[i]; uint16_t *dst = (uint16_t*)output->plane[i]; int height = x264_cli_csps[csp_mask].height[i] * img->height; int width = x264_cli_csps[csp_mask].width[i] * img->width; for( int j = 0; j < height; j++ ) { for( int k = 0; k < width; k++ ) dst[k] = ((src[k] << 8) + src[k]) >> shift; src += img->stride[i]; dst += output->stride[i]/2; } } } static int get_frame( hnd_t handle, cli_pic_t *output, int frame ) { depth_hnd_t *h = handle; if( h->prev_filter.get_frame( h->prev_hnd, output, frame ) ) return -1; if( h->bit_depth < 16 && output->img.csp & X264_CSP_HIGH_DEPTH ) { dither_image( &h->buffer.img, &output->img, h->error_buf ); output->img = h->buffer.img; } else if( h->bit_depth > 8 && !(output->img.csp & X264_CSP_HIGH_DEPTH) ) { scale_image( &h->buffer.img, &output->img ); output->img = h->buffer.img; } return 0; } static int release_frame( hnd_t handle, cli_pic_t *pic, int frame ) { depth_hnd_t *h = handle; return h->prev_filter.release_frame( h->prev_hnd, pic, frame ); } static void free_filter( hnd_t handle ) { depth_hnd_t *h = handle; h->prev_filter.free( h->prev_hnd ); x264_cli_pic_clean( &h->buffer ); x264_free( h ); } static int init( hnd_t *handle, cli_vid_filter_t *filter, video_info_t *info, x264_param_t *param, char *opt_string ) { int ret = 0; int change_fmt = (info->csp ^ param->i_csp) & X264_CSP_HIGH_DEPTH; int csp = ~(~info->csp ^ change_fmt); int bit_depth = 8*x264_cli_csp_depth_factor( csp ); if( opt_string ) { static const char *optlist[] = { "bit_depth", NULL }; char **opts = x264_split_options( opt_string, optlist ); if( opts ) { char *str_bit_depth = x264_get_option( "bit_depth", opts ); bit_depth = x264_otoi( str_bit_depth, -1 ); ret = bit_depth < 8 || bit_depth > 16; csp = bit_depth > 8 ? csp | X264_CSP_HIGH_DEPTH : csp & ~X264_CSP_HIGH_DEPTH; change_fmt = (info->csp ^ csp) & X264_CSP_HIGH_DEPTH; x264_free_string_array( opts ); } else ret = 1; } FAIL_IF_ERROR( bit_depth != BIT_DEPTH, "this build supports only bit depth %d\n", BIT_DEPTH ) FAIL_IF_ERROR( ret, "unsupported bit depth conversion.\n" ) /* only add the filter to the chain if it's needed */ if( change_fmt || bit_depth != 8 * x264_cli_csp_depth_factor( csp ) ) { FAIL_IF_ERROR( !depth_filter_csp_is_supported(csp), "unsupported colorspace.\n" ) depth_hnd_t *h = x264_malloc( sizeof(depth_hnd_t) + (info->width+1)*sizeof(int16_t) ); if( !h ) return -1; h->error_buf = (int16_t*)(h + 1); h->dst_csp = csp; h->bit_depth = bit_depth; h->prev_hnd = *handle; h->prev_filter = *filter; if( x264_cli_pic_alloc( &h->buffer, h->dst_csp, info->width, info->height ) ) { x264_free( h ); return -1; } *handle = h; *filter = depth_filter; info->csp = h->dst_csp; } return 0; } cli_vid_filter_t depth_filter = { NAME, NULL, init, get_frame, release_frame, free_filter, NULL };
hp-sam/voip-client-ios
submodules/build-i386-apple-darwin/externals/x264/filters/video/depth.c
C
gpl-2.0
8,011
using System; using System.Collections.Generic; using System.IO; using System.Net; using Server; using Server.Accounting; using Server.Commands; using Server.Engines.Help; using Server.Network; using Server.Regions; namespace Server.Misc { public enum PasswordProtection { None, Crypt, NewCrypt } public class AccountHandler { private static int MaxAccountsPerIP = 1; private static bool AutoAccountCreation = true; private static bool RestrictDeletion = !TestCenter.Enabled; private static TimeSpan DeleteDelay = TimeSpan.FromDays( 7.0 ); public static PasswordProtection ProtectPasswords = PasswordProtection.NewCrypt; private static AccessLevel m_LockdownLevel; public static AccessLevel LockdownLevel { get{ return m_LockdownLevel; } set{ m_LockdownLevel = value; } } private static CityInfo[] StartingCities = new CityInfo[] { new CityInfo( "New Haven", "New Haven Bank", 1150168, 3667, 2625, 0 ), new CityInfo( "Yew", "The Empath Abbey", 1075072, 633, 858, 0 ), new CityInfo( "Minoc", "The Barnacle", 1075073, 2476, 413, 15 ), new CityInfo( "Britain", "The Wayfarer's Inn", 1075074, 1602, 1591, 20 ), new CityInfo( "Moonglow", "The Scholars Inn", 1075075, 4408, 1168, 0 ), new CityInfo( "Trinsic", "The Traveler's Inn", 1075076, 1845, 2745, 0 ), new CityInfo( "Jhelom", "The Mercenary Inn", 1075078, 1374, 3826, 0 ), new CityInfo( "Skara Brae", "The Falconer's Inn", 1075079, 618, 2234, 0 ), new CityInfo( "Vesper", "The Ironwood Inn", 1075080, 2771, 976, 0 ) }; /* Old Haven/Magincia Locations new CityInfo( "Britain", "Sweet Dreams Inn", 1496, 1628, 10 ); // .. // Trinsic new CityInfo( "Magincia", "The Great Horns Tavern", 3734, 2222, 20 ), // Jhelom // .. new CityInfo( "Haven", "Buckler's Hideaway", 3667, 2625, 0 ) if ( Core.AOS ) { //CityInfo haven = new CityInfo( "Haven", "Uzeraan's Mansion", 3618, 2591, 0 ); CityInfo haven = new CityInfo( "Haven", "Uzeraan's Mansion", 3503, 2574, 14 ); StartingCities[StartingCities.Length - 1] = haven; } */ private static bool PasswordCommandEnabled = false; public static void Initialize() { EventSink.DeleteRequest += new DeleteRequestEventHandler( EventSink_DeleteRequest ); EventSink.AccountLogin += new AccountLoginEventHandler( EventSink_AccountLogin ); EventSink.GameLogin += new GameLoginEventHandler( EventSink_GameLogin ); if ( PasswordCommandEnabled ) CommandSystem.Register( "Password", AccessLevel.Player, new CommandEventHandler( Password_OnCommand ) ); } [Usage( "Password <newPassword> <repeatPassword>" )] [Description( "Changes the password of the commanding players account. Requires the same C-class IP address as the account's creator." )] public static void Password_OnCommand( CommandEventArgs e ) { Mobile from = e.Mobile; Account acct = from.Account as Account; if ( acct == null ) return; IPAddress[] accessList = acct.LoginIPs; if ( accessList.Length == 0 ) return; NetState ns = from.NetState; if ( ns == null ) return; if ( e.Length == 0 ) { from.SendMessage( "You must specify the new password." ); return; } else if ( e.Length == 1 ) { from.SendMessage( "To prevent potential typing mistakes, you must type the password twice. Use the format:" ); from.SendMessage( "Password \"(newPassword)\" \"(repeated)\"" ); return; } string pass = e.GetString( 0 ); string pass2 = e.GetString( 1 ); if ( pass != pass2 ) { from.SendMessage( "The passwords do not match." ); return; } bool isSafe = true; for ( int i = 0; isSafe && i < pass.Length; ++i ) isSafe = ( pass[i] >= 0x20 && pass[i] < 0x7F ); if ( !isSafe ) { from.SendMessage( "That is not a valid password." ); return; } try { IPAddress ipAddress = ns.Address; if ( Utility.IPMatchClassC( accessList[0], ipAddress ) ) { acct.SetPassword( pass ); from.SendMessage( "The password to your account has changed." ); } else { PageEntry entry = PageQueue.GetEntry( from ); if ( entry != null ) { if ( entry.Message.StartsWith( "[Automated: Change Password]" ) ) from.SendMessage( "You already have a password change request in the help system queue." ); else from.SendMessage( "Your IP address does not match that which created this account." ); } else if ( PageQueue.CheckAllowedToPage( from ) ) { from.SendMessage( "Your IP address does not match that which created this account. A page has been entered into the help system on your behalf." ); from.SendLocalizedMessage( 501234, "", 0x35 ); /* The next available Counselor/Game Master will respond as soon as possible. * Please check your Journal for messages every few minutes. */ PageQueue.Enqueue( new PageEntry( from, String.Format( "[Automated: Change Password]<br>Desired password: {0}<br>Current IP address: {1}<br>Account IP address: {2}", pass, ipAddress, accessList[0] ), PageType.Account ) ); } } } catch { } } private static void EventSink_DeleteRequest( DeleteRequestEventArgs e ) { NetState state = e.State; int index = e.Index; Account acct = state.Account as Account; if ( acct == null ) { state.Dispose(); } else if ( index < 0 || index >= acct.Length ) { state.Send( new DeleteResult( DeleteResultType.BadRequest ) ); state.Send( new CharacterListUpdate( acct ) ); } else { Mobile m = acct[index]; if ( m == null ) { state.Send( new DeleteResult( DeleteResultType.CharNotExist ) ); state.Send( new CharacterListUpdate( acct ) ); } else if ( m.NetState != null ) { state.Send( new DeleteResult( DeleteResultType.CharBeingPlayed ) ); state.Send( new CharacterListUpdate( acct ) ); } else if ( RestrictDeletion && DateTime.UtcNow < (m.CreationTime + DeleteDelay) ) { state.Send( new DeleteResult( DeleteResultType.CharTooYoung ) ); state.Send( new CharacterListUpdate( acct ) ); } else if ( m.AccessLevel == AccessLevel.Player && Region.Find( m.LogoutLocation, m.LogoutMap ).GetRegion( typeof( Jail ) ) != null ) //Don't need to check current location, if netstate is null, they're logged out { state.Send( new DeleteResult( DeleteResultType.BadRequest ) ); state.Send( new CharacterListUpdate( acct ) ); } else { Console.WriteLine( "Client: {0}: Deleting character {1} (0x{2:X})", state, index, m.Serial.Value ); acct.Comments.Add( new AccountComment( "System", String.Format( "Character #{0} {1} deleted by {2}", index + 1, m, state ) ) ); m.Delete(); state.Send( new CharacterListUpdate( acct ) ); } } } public static bool CanCreate( IPAddress ip ) { if ( !IPTable.ContainsKey( ip ) ) return true; return ( IPTable[ip] < MaxAccountsPerIP ); } private static Dictionary<IPAddress, Int32> m_IPTable; public static Dictionary<IPAddress, Int32> IPTable { get { if ( m_IPTable == null ) { m_IPTable = new Dictionary<IPAddress, Int32>(); foreach ( Account a in Accounts.GetAccounts() ) if ( a.LoginIPs.Length > 0 ) { IPAddress ip = a.LoginIPs[0]; if ( m_IPTable.ContainsKey( ip ) ) m_IPTable[ip]++; else m_IPTable[ip] = 1; } } return m_IPTable; } } private static readonly char[] m_ForbiddenChars = new char[] { '<', '>', ':', '"', '/', '\\', '|', '?', '*' }; private static bool IsForbiddenChar( char c ) { for ( int i = 0; i < m_ForbiddenChars.Length; ++i ) if ( c == m_ForbiddenChars[i] ) return true; return false; } private static Account CreateAccount( NetState state, string un, string pw ) { if ( un.Length == 0 || pw.Length == 0 ) return null; bool isSafe = !( un.StartsWith( " " ) || un.EndsWith( " " ) || un.EndsWith( "." ) ); for ( int i = 0; isSafe && i < un.Length; ++i ) isSafe = ( un[i] >= 0x20 && un[i] < 0x7F && !IsForbiddenChar( un[i] ) ); for ( int i = 0; isSafe && i < pw.Length; ++i ) isSafe = ( pw[i] >= 0x20 && pw[i] < 0x7F ); if ( !isSafe ) return null; if ( !CanCreate( state.Address ) ) { Console.WriteLine( "Login: {0}: Account '{1}' not created, ip already has {2} account{3}.", state, un, MaxAccountsPerIP, MaxAccountsPerIP == 1 ? "" : "s" ); return null; } Console.WriteLine( "Login: {0}: Creating new account '{1}'", state, un ); Account a = new Account( un, pw ); return a; } public static void EventSink_AccountLogin( AccountLoginEventArgs e ) { if ( !IPLimiter.SocketBlock && !IPLimiter.Verify( e.State.Address ) ) { e.Accepted = false; e.RejectReason = ALRReason.InUse; Console.WriteLine( "Login: {0}: Past IP limit threshold", e.State ); using ( StreamWriter op = new StreamWriter( "ipLimits.log", true ) ) op.WriteLine( "{0}\tPast IP limit threshold\t{1}", e.State, DateTime.UtcNow ); return; } string un = e.Username; string pw = e.Password; e.Accepted = false; Account acct = Accounts.GetAccount( un ) as Account; if ( acct == null ) { if ( AutoAccountCreation && un.Trim().Length > 0 ) // To prevent someone from making an account of just '' or a bunch of meaningless spaces { e.State.Account = acct = CreateAccount( e.State, un, pw ); e.Accepted = acct == null ? false : acct.CheckAccess( e.State ); if ( !e.Accepted ) e.RejectReason = ALRReason.BadComm; } else { Console.WriteLine( "Login: {0}: Invalid username '{1}'", e.State, un ); e.RejectReason = ALRReason.Invalid; } } else if ( !acct.HasAccess( e.State ) ) { Console.WriteLine( "Login: {0}: Access denied for '{1}'", e.State, un ); e.RejectReason = ( m_LockdownLevel > AccessLevel.Player ? ALRReason.BadComm : ALRReason.BadPass ); } else if ( !acct.CheckPassword( pw ) ) { Console.WriteLine( "Login: {0}: Invalid password for '{1}'", e.State, un ); e.RejectReason = ALRReason.BadPass; } else if ( acct.Banned ) { Console.WriteLine( "Login: {0}: Banned account '{1}'", e.State, un ); e.RejectReason = ALRReason.Blocked; } else { Console.WriteLine( "Login: {0}: Valid credentials for '{1}'", e.State, un ); e.State.Account = acct; e.Accepted = true; acct.LogAccess( e.State ); } if ( !e.Accepted ) AccountAttackLimiter.RegisterInvalidAccess( e.State ); } public static void EventSink_GameLogin( GameLoginEventArgs e ) { if ( !IPLimiter.SocketBlock && !IPLimiter.Verify( e.State.Address ) ) { e.Accepted = false; Console.WriteLine( "Login: {0}: Past IP limit threshold", e.State ); using ( StreamWriter op = new StreamWriter( "ipLimits.log", true ) ) op.WriteLine( "{0}\tPast IP limit threshold\t{1}", e.State, DateTime.UtcNow ); return; } string un = e.Username; string pw = e.Password; Account acct = Accounts.GetAccount( un ) as Account; if ( acct == null ) { e.Accepted = false; } else if ( !acct.HasAccess( e.State ) ) { Console.WriteLine( "Login: {0}: Access denied for '{1}'", e.State, un ); e.Accepted = false; } else if ( !acct.CheckPassword( pw ) ) { Console.WriteLine( "Login: {0}: Invalid password for '{1}'", e.State, un ); e.Accepted = false; } else if ( acct.Banned ) { Console.WriteLine( "Login: {0}: Banned account '{1}'", e.State, un ); e.Accepted = false; } else { acct.LogAccess( e.State ); Console.WriteLine( "Login: {0}: Account '{1}' at character list", e.State, un ); e.State.Account = acct; e.Accepted = true; e.CityInfo = StartingCities; } if ( !e.Accepted ) AccountAttackLimiter.RegisterInvalidAccess( e.State ); } public static bool CheckAccount( Mobile mobCheck, Mobile accCheck ) { if ( accCheck != null ) { Account a = accCheck.Account as Account; if ( a != null ) { for ( int i = 0; i < a.Length; ++i ) { if ( a[i] == mobCheck ) return true; } } } return false; } } }
felladrin/runuo-pt-br
Scripts/Accounting/AccountHandler.cs
C#
gpl-2.0
12,759
/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 2.0.8 * * Do not make changes to this file unless you know what you are doing--modify * the SWIG interface file instead. * ----------------------------------------------------------------------------- */ using System; using System.Runtime.InteropServices; public class aiBoneVector : IDisposable, System.Collections.IEnumerable #if !SWIG_DOTNET_1 , System.Collections.Generic.IList<aiBone> #endif { private HandleRef swigCPtr; protected bool swigCMemOwn; internal aiBoneVector(IntPtr cPtr, bool cMemoryOwn) { swigCMemOwn = cMemoryOwn; swigCPtr = new HandleRef(this, cPtr); } internal static HandleRef getCPtr(aiBoneVector obj) { return (obj == null) ? new HandleRef(null, IntPtr.Zero) : obj.swigCPtr; } ~aiBoneVector() { Dispose(); } public virtual void Dispose() { lock(this) { if (swigCPtr.Handle != IntPtr.Zero) { if (swigCMemOwn) { swigCMemOwn = false; AssimpPINVOKE.delete_aiBoneVector(swigCPtr); } swigCPtr = new HandleRef(null, IntPtr.Zero); } GC.SuppressFinalize(this); } } public aiBoneVector(System.Collections.ICollection c) : this() { if (c == null) throw new ArgumentNullException("c"); foreach (aiBone element in c) { this.Add(element); } } public bool IsFixedSize { get { return false; } } public bool IsReadOnly { get { return false; } } public aiBone this[int index] { get { return getitem(index); } set { setitem(index, value); } } public int Capacity { get { return (int)capacity(); } set { if (value < size()) throw new ArgumentOutOfRangeException("Capacity"); reserve((uint)value); } } public int Count { get { return (int)size(); } } public bool IsSynchronized { get { return false; } } #if SWIG_DOTNET_1 public void CopyTo(System.Array array) #else public void CopyTo(aiBone[] array) #endif { CopyTo(0, array, 0, this.Count); } #if SWIG_DOTNET_1 public void CopyTo(System.Array array, int arrayIndex) #else public void CopyTo(aiBone[] array, int arrayIndex) #endif { CopyTo(0, array, arrayIndex, this.Count); } #if SWIG_DOTNET_1 public void CopyTo(int index, System.Array array, int arrayIndex, int count) #else public void CopyTo(int index, aiBone[] array, int arrayIndex, int count) #endif { if (array == null) throw new ArgumentNullException("array"); if (index < 0) throw new ArgumentOutOfRangeException("index", "Value is less than zero"); if (arrayIndex < 0) throw new ArgumentOutOfRangeException("arrayIndex", "Value is less than zero"); if (count < 0) throw new ArgumentOutOfRangeException("count", "Value is less than zero"); if (array.Rank > 1) throw new ArgumentException("Multi dimensional array.", "array"); if (index+count > this.Count || arrayIndex+count > array.Length) throw new ArgumentException("Number of elements to copy is too large."); for (int i=0; i<count; i++) array.SetValue(getitemcopy(index+i), arrayIndex+i); } #if !SWIG_DOTNET_1 System.Collections.Generic.IEnumerator<aiBone> System.Collections.Generic.IEnumerable<aiBone>.GetEnumerator() { return new aiBoneVectorEnumerator(this); } #endif System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator() { return new aiBoneVectorEnumerator(this); } public aiBoneVectorEnumerator GetEnumerator() { return new aiBoneVectorEnumerator(this); } // Type-safe enumerator /// Note that the IEnumerator documentation requires an InvalidOperationException to be thrown /// whenever the collection is modified. This has been done for changes in the size of the /// collection but not when one of the elements of the collection is modified as it is a bit /// tricky to detect unmanaged code that modifies the collection under our feet. public sealed class aiBoneVectorEnumerator : System.Collections.IEnumerator #if !SWIG_DOTNET_1 , System.Collections.Generic.IEnumerator<aiBone> #endif { private aiBoneVector collectionRef; private int currentIndex; private object currentObject; private int currentSize; public aiBoneVectorEnumerator(aiBoneVector collection) { collectionRef = collection; currentIndex = -1; currentObject = null; currentSize = collectionRef.Count; } // Type-safe iterator Current public aiBone Current { get { if (currentIndex == -1) throw new InvalidOperationException("Enumeration not started."); if (currentIndex > currentSize - 1) throw new InvalidOperationException("Enumeration finished."); if (currentObject == null) throw new InvalidOperationException("Collection modified."); return (aiBone)currentObject; } } // Type-unsafe IEnumerator.Current object System.Collections.IEnumerator.Current { get { return Current; } } public bool MoveNext() { int size = collectionRef.Count; bool moveOkay = (currentIndex+1 < size) && (size == currentSize); if (moveOkay) { currentIndex++; currentObject = collectionRef[currentIndex]; } else { currentObject = null; } return moveOkay; } public void Reset() { currentIndex = -1; currentObject = null; if (collectionRef.Count != currentSize) { throw new InvalidOperationException("Collection modified."); } } #if !SWIG_DOTNET_1 public void Dispose() { currentIndex = -1; currentObject = null; } #endif } public void Clear() { AssimpPINVOKE.aiBoneVector_Clear(swigCPtr); } public void Add(aiBone x) { AssimpPINVOKE.aiBoneVector_Add(swigCPtr, aiBone.getCPtr(x)); } private uint size() { uint ret = AssimpPINVOKE.aiBoneVector_size(swigCPtr); return ret; } private uint capacity() { uint ret = AssimpPINVOKE.aiBoneVector_capacity(swigCPtr); return ret; } private void reserve(uint n) { AssimpPINVOKE.aiBoneVector_reserve(swigCPtr, n); } public aiBoneVector() : this(AssimpPINVOKE.new_aiBoneVector__SWIG_0(), true) { } public aiBoneVector(aiBoneVector other) : this(AssimpPINVOKE.new_aiBoneVector__SWIG_1(aiBoneVector.getCPtr(other)), true) { if (AssimpPINVOKE.SWIGPendingException.Pending) throw AssimpPINVOKE.SWIGPendingException.Retrieve(); } public aiBoneVector(int capacity) : this(AssimpPINVOKE.new_aiBoneVector__SWIG_2(capacity), true) { if (AssimpPINVOKE.SWIGPendingException.Pending) throw AssimpPINVOKE.SWIGPendingException.Retrieve(); } private aiBone getitemcopy(int index) { IntPtr cPtr = AssimpPINVOKE.aiBoneVector_getitemcopy(swigCPtr, index); aiBone ret = (cPtr == IntPtr.Zero) ? null : new aiBone(cPtr, false); if (AssimpPINVOKE.SWIGPendingException.Pending) throw AssimpPINVOKE.SWIGPendingException.Retrieve(); return ret; } private aiBone getitem(int index) { IntPtr cPtr = AssimpPINVOKE.aiBoneVector_getitem(swigCPtr, index); aiBone ret = (cPtr == IntPtr.Zero) ? null : new aiBone(cPtr, false); if (AssimpPINVOKE.SWIGPendingException.Pending) throw AssimpPINVOKE.SWIGPendingException.Retrieve(); return ret; } private void setitem(int index, aiBone val) { AssimpPINVOKE.aiBoneVector_setitem(swigCPtr, index, aiBone.getCPtr(val)); if (AssimpPINVOKE.SWIGPendingException.Pending) throw AssimpPINVOKE.SWIGPendingException.Retrieve(); } public void AddRange(aiBoneVector values) { AssimpPINVOKE.aiBoneVector_AddRange(swigCPtr, aiBoneVector.getCPtr(values)); if (AssimpPINVOKE.SWIGPendingException.Pending) throw AssimpPINVOKE.SWIGPendingException.Retrieve(); } public aiBoneVector GetRange(int index, int count) { IntPtr cPtr = AssimpPINVOKE.aiBoneVector_GetRange(swigCPtr, index, count); aiBoneVector ret = (cPtr == IntPtr.Zero) ? null : new aiBoneVector(cPtr, true); if (AssimpPINVOKE.SWIGPendingException.Pending) throw AssimpPINVOKE.SWIGPendingException.Retrieve(); return ret; } public void Insert(int index, aiBone x) { AssimpPINVOKE.aiBoneVector_Insert(swigCPtr, index, aiBone.getCPtr(x)); if (AssimpPINVOKE.SWIGPendingException.Pending) throw AssimpPINVOKE.SWIGPendingException.Retrieve(); } public void InsertRange(int index, aiBoneVector values) { AssimpPINVOKE.aiBoneVector_InsertRange(swigCPtr, index, aiBoneVector.getCPtr(values)); if (AssimpPINVOKE.SWIGPendingException.Pending) throw AssimpPINVOKE.SWIGPendingException.Retrieve(); } public void RemoveAt(int index) { AssimpPINVOKE.aiBoneVector_RemoveAt(swigCPtr, index); if (AssimpPINVOKE.SWIGPendingException.Pending) throw AssimpPINVOKE.SWIGPendingException.Retrieve(); } public void RemoveRange(int index, int count) { AssimpPINVOKE.aiBoneVector_RemoveRange(swigCPtr, index, count); if (AssimpPINVOKE.SWIGPendingException.Pending) throw AssimpPINVOKE.SWIGPendingException.Retrieve(); } public static aiBoneVector Repeat(aiBone value, int count) { IntPtr cPtr = AssimpPINVOKE.aiBoneVector_Repeat(aiBone.getCPtr(value), count); aiBoneVector ret = (cPtr == IntPtr.Zero) ? null : new aiBoneVector(cPtr, true); if (AssimpPINVOKE.SWIGPendingException.Pending) throw AssimpPINVOKE.SWIGPendingException.Retrieve(); return ret; } public void Reverse() { AssimpPINVOKE.aiBoneVector_Reverse__SWIG_0(swigCPtr); } public void Reverse(int index, int count) { AssimpPINVOKE.aiBoneVector_Reverse__SWIG_1(swigCPtr, index, count); if (AssimpPINVOKE.SWIGPendingException.Pending) throw AssimpPINVOKE.SWIGPendingException.Retrieve(); } public void SetRange(int index, aiBoneVector values) { AssimpPINVOKE.aiBoneVector_SetRange(swigCPtr, index, aiBoneVector.getCPtr(values)); if (AssimpPINVOKE.SWIGPendingException.Pending) throw AssimpPINVOKE.SWIGPendingException.Retrieve(); } public bool Contains(aiBone value) { bool ret = AssimpPINVOKE.aiBoneVector_Contains(swigCPtr, aiBone.getCPtr(value)); return ret; } public int IndexOf(aiBone value) { int ret = AssimpPINVOKE.aiBoneVector_IndexOf(swigCPtr, aiBone.getCPtr(value)); return ret; } public int LastIndexOf(aiBone value) { int ret = AssimpPINVOKE.aiBoneVector_LastIndexOf(swigCPtr, aiBone.getCPtr(value)); return ret; } public bool Remove(aiBone value) { bool ret = AssimpPINVOKE.aiBoneVector_Remove(swigCPtr, aiBone.getCPtr(value)); return ret; } }
gripus-team/gripus
dependencies/assimp/port/Assimp.NET/Assimp.NET_CS/aiBoneVector.cs
C#
gpl-2.0
11,132
<!-- * FCKeditor - The text editor for internet * Copyright (C) 2003-2004 Frederico Caldeira Knabben * * Licensed under the terms of the GNU Lesser General Public License: * http://www.opensource.org/licenses/lgpl-license.php * * For further information visit: * http://www.fckeditor.net/ * * File Name: fckeditor.html * Main page that holds the editor. * * Version: 2.0 RC3 * Modified: 2005-03-02 10:54:21 * * File Authors: * Frederico Caldeira Knabben (fredck@fckeditor.net) --> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>FCKeditor</title> <meta name="robots" content="noindex, nofollow" /> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <script type="text/javascript" src="lang/fcklanguagemanager.js"></script> <meta http-equiv="Cache-Control" content="public"> <script type="text/javascript" src="js/fck_startup.js"></script> </head> <body> <table height="100%" width="100%" cellpadding="0" cellspacing="0" border="0" style="TABLE-LAYOUT: fixed"> <tr> <td unselectable="on" style="OVERFLOW: hidden"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr id="Collapsed" style="DISPLAY: none"> <td id="ExpandHandle" class="TB_Expand" unselectable="on" colspan="3" onclick="FCKToolbarSet.Expand();return false;"><img class="TB_ExpandImg" src="images/spacer.gif" width="8" height="4" unselectable="on"></td> </tr> <tr id="Expanded" style="DISPLAY: none"> <td id="CollapseHandle" style="DISPLAY: none" class="TB_Collapse" unselectable="on" valign="bottom" onclick="FCKToolbarSet.Collapse();return false;"><img class="TB_CollapseImg" src="images/spacer.gif" width="8" height="4" unselectable="on"></td> <td id="eToolbar" class="TB_ToolbarSet" unselectable="on"></td> <td width="1" style="BACKGROUND-COLOR: #696969"></td> </tr> </table> </td> </tr> <tr id="eWysiwyg"> <td id="eWysiwygCell" height="100%" valign="top"> <iframe id="eEditorArea" name="eEditorArea" height="100%" width="100%" frameborder="no" src="fckeditorarea.html"></iframe> </td> </tr> <tr id="eSource" style="DISPLAY: none"> <td class="Source" height="100%" valign="top"> <textarea id="eSourceField" dir="ltr" style="WIDTH: 100%; HEIGHT: 100%"></textarea> </td> </tr> </table> </body> </html>
jonathonfigueroa/calendarinfusion_3.0b1
public/scripts/fckeditor/editor/fckeditor.html
HTML
gpl-2.0
2,465
<?php /** * Module Name: Gravatar Hovercards * Module Description: Show a pop-up business card of your users' gravatar profiles in comments. * Sort Order: 8 * First Introduced: 1.1 * Requires Connection: No */ define( 'GROFILES__CACHE_BUSTER', gmdate( 'YM' ) . 'aa' ); // Break CDN cache, increment when gravatar.com/js/gprofiles.js changes function grofiles_hovercards_init() { add_filter( 'get_avatar', 'grofiles_get_avatar', 10, 2 ); add_action( 'wp_enqueue_scripts', 'grofiles_attach_cards' ); add_action( 'wp_footer', 'grofiles_extra_data' ); add_action( 'admin_init', 'grofiles_add_settings' ); add_action( 'load-index.php', 'grofiles_admin_cards' ); add_action( 'load-users.php', 'grofiles_admin_cards' ); add_action( 'load-edit-comments.php', 'grofiles_admin_cards' ); add_action( 'load-options-discussion.php', 'grofiles_admin_cards_forced' ); Jetpack::enable_module_configurable( __FILE__ ); Jetpack::module_configuration_load( __FILE__, 'gravatar_hovercards_configuration_load' ); } function gravatar_hovercards_configuration_load() { wp_safe_redirect( admin_url( 'options-discussion.php#gravatar-hovercard-options' ) ); exit; } add_action( 'jetpack_modules_loaded', 'grofiles_hovercards_init' ); /* Hovercard Settings */ /** * Adds Gravatar Hovercard setting * * @todo - always print HTML, hide via CSS/JS if !show_avatars */ function grofiles_add_settings() { if ( !get_option( 'show_avatars' ) ) return; add_settings_field( 'gravatar_disable_hovercards', __( 'Gravatar Hovercards', 'jetpack' ), 'grofiles_setting_callback', 'discussion', 'avatars' ); register_setting( 'discussion', 'gravatar_disable_hovercards', 'grofiles_hovercard_option_sanitize' ); } /** * HTML for Gravatar Hovercard setting */ function grofiles_setting_callback() { global $current_user; $checked = 'disabled' == get_option( 'gravatar_disable_hovercards' ) ? '' : 'checked="checked" '; echo "<label id='gravatar-hovercard-options'><input {$checked}name='gravatar_disable_hovercards' id='gravatar_disable_hovercards' type='checkbox' value='enabled' class='code' /> " . __( "View people's profiles when you mouse over their Gravatars", 'jetpack' ) . "</label>"; ?> <style type="text/css"> #grav-profile-example img { float: left; } #grav-profile-example span { padding: 0 1em; } </style> <script type="text/javascript"> // <![CDATA[ jQuery( function($) { var tr = $( '#gravatar_disable_hovercards' ).change( function() { if ( $( this ).is( ':checked' ) ) { $( '#grav-profile-example' ).slideDown( 'fast' ); } else { $( '#grav-profile-example' ).slideUp( 'fast' ); } } ).parents( 'tr' ); var ftr = tr.parents( 'table' ).find( 'tr:first' ); if ( ftr.size() && !ftr.find( '#gravatar_disable_hovercards' ).size() ) { ftr.after( tr ); } } ); // ]]> </script> <p id="grav-profile-example" class="hide-if-no-js"<?php if ( !$checked ) echo ' style="display:none"'; ?>><?php echo get_avatar( $current_user->ID, 64 ); ?> <span><?php _e( 'Put your mouse over your Gravatar to check out your profile.', 'jetpack' ); ?> <br class="clear" /></span></p> <?php } /** * Sanitation filter for Gravatar Hovercard setting */ function grofiles_hovercard_option_sanitize( $val ) { if ( 'disabled' == $val ) { return $val; } return $val ? 'enabled' : 'disabled'; } /* Hovercard Display */ /** * Stores the gravatars' users that need extra profile data attached. * * Getter/Setter * * @param int|string|null $author Setter: User ID or email address. Getter: null. * * @return mixed Setter: void. Getter: array of user IDs and email addresses. */ function grofiles_gravatars_to_append( $author = null ) { static $authors = array(); // Get if ( is_null( $author ) ) { return array_keys( $authors ); } // Set if ( is_numeric( $author ) ) { $author = (int) $author; } $authors[$author] = true; } /** * Stores the user ID or email address for each gravatar generated. * * Attached to the 'get_avatar' filter. * * @param string $avatar The <img/> element of the avatar. * @param mixed $author User ID, email address, user login, comment object, user object, post object * * @return The <img/> element of the avatar. */ function grofiles_get_avatar( $avatar, $author ) { if ( is_numeric( $author ) ) { grofiles_gravatars_to_append( $author ); } else if ( is_string( $author ) ) { if ( false !== strpos( $author, '@' ) ) { grofiles_gravatars_to_append( $author ); } else { if ( $user = get_user_by( 'slug', $author ) ) grofiles_gravatars_to_append( $user->ID ); } } else if ( isset( $author->comment_type ) ) { if ( '' != $author->comment_type && 'comment' != $author->comment_type ) return $avatar; if ( $author->user_id ) grofiles_gravatars_to_append( $author->user_id ); else grofiles_gravatars_to_append( $author->comment_author_email ); } else if ( isset( $author->user_login ) ) { grofiles_gravatars_to_append( $author->ID ); } else if ( isset( $author->post_author ) ) { grofiles_gravatars_to_append( $author->post_author ); } return $avatar; } /** * Loads Gravatar Hovercard script. * * @todo is_singular() only? */ function grofiles_attach_cards() { global $blog_id; if ( 'disabled' == get_option( 'gravatar_disable_hovercards' ) ) return; wp_enqueue_script( 'grofiles-cards', ( is_ssl() ? 'https://secure' : 'http://s' ) . '.gravatar.com/js/gprofiles.js', array( 'jquery' ), GROFILES__CACHE_BUSTER, true ); wp_enqueue_script( 'wpgroho', plugins_url( 'wpgroho.js', __FILE__ ), array( 'grofiles-cards' ), false, true ); if ( is_user_logged_in() ) { $cu = wp_get_current_user(); $my_hash = md5( $cu->user_email ); } else if ( !empty( $_COOKIE['comment_author_email_' . COOKIEHASH] ) ) { $my_hash = md5( $_COOKIE['comment_author_email_' . COOKIEHASH] ); } else { $my_hash = ''; } wp_localize_script( 'wpgroho', 'WPGroHo', compact( 'my_hash' ) ); } function grofiles_attach_cards_forced() { add_filter( 'pre_option_gravatar_disable_hovercards', 'grofiles_force_gravatar_enable_hovercards' ); grofiles_attach_cards(); } function grofiles_force_gravatar_enable_hovercards() { return 'enabled'; } function grofiles_admin_cards_forced() { add_action( 'admin_footer', 'grofiles_attach_cards_forced' ); } function grofiles_admin_cards() { add_action( 'admin_footer', 'grofiles_attach_cards' ); } function grofiles_extra_data() { ?> <div style="display:none"> <?php foreach ( grofiles_gravatars_to_append() as $author ) grofiles_hovercards_data_html( $author ); ?> </div> <?php } /** * Echoes the data from grofiles_hovercards_data() as HTML elements. * * @param int|string $author User ID or email address */ function grofiles_hovercards_data_html( $author ) { $data = grofiles_hovercards_data( $author ); if ( is_numeric( $author ) ) { $user = get_userdata( $author ); $hash = md5( $user->user_email ); } else { $hash = md5( $author ); } ?> <div class="grofile-hash-map-<?php echo $hash; ?>"> <?php foreach ( $data as $key => $value ) : ?> <span class="<?php echo esc_attr( $key ); ?>"><?php echo esc_html( $value ); ?></span> <?php endforeach; ?> </div> <?php } /* API */ /** * Returns the PHP callbacks for data sources. * * 'grofiles_hovercards_data_callbacks' filter * * @return array( data_key => data_callback, ... ) */ function grofiles_hovercards_data_callbacks() { return apply_filters( 'grofiles_hovercards_data_callbacks', array() ); } /** * Keyed JSON object containing all profile data provided by registered callbacks * * @param int|strung $author User ID or email address * * @return array( data_key => data, ... ) */ function grofiles_hovercards_data( $author ) { $r = array(); foreach ( grofiles_hovercards_data_callbacks() as $key => $callback ) { if ( !is_callable( $callback ) ) continue; $data = call_user_func( $callback, $author, $key ); if ( !is_null( $data ) ) $r[$key] = $data; } return $r; }
laurenpenn/Website
content/plugins/jetpack/modules/gravatar-hovercards.php
PHP
gpl-2.0
7,994
<html> <head><title> Avnet Design Services -- Motorola ColdFire 5282 uCLinux Web Server </title> </head> <!-- If you are seeing this file in romfs/home/httpd, do not edit it. It is copied (!) at build time from vendors/{?}/httpd, e.g. vendors/Avnet/http/index.html. Please edit *that* or your changes will be clobbered and you will be sad. --> <body bgcolor="#ffffff" margin-top="0" margin-left="0" leftmargin="0" topmargin="0" marginwidth="0" marginheight="0"> <table width=100% border=0> <tr> <td> <img src="Avnet_logo.gif"> </td> <td> <img src="Motorola_logo.gif"> </td> </tr> </table> <center><H1><font color="#bc0505"> Motorola ColdFire 5282 uCLinux Web Server <H2> </H2> </font></H1></center> <br> <!-- <p> <center> <table width=100% border=0> <tr> <th width=50%> <center> Links </center> </th> <th width=100%> <center> Products </center> </th> </tr> <tr> <td> <center> <a href="cgi-bin/ps.sh"> <font color="#bc0505"> Process listing </font> </a> </center> </td> <td> <center> <a href="mcf5282-brief-061703.pdf"> Motorola ColdFire 5282 Evaluation Kit </a> </center> </td> </tr> <tr> <td> <center> <a href="cgi-bin/uptime.sh"> <font color="#bc0505"> Uptime and load </font> </a> </center> </td> <td> <center> <a href="multi.pdf"> Multi-product Brief </a> </center> </td> </tr> <tr> <td> <center> <a href="cgi-bin/ifconfig.sh"> <font color="#bc0505"> Network statistics </font> </a> </center> </td> <td> <center> &nbsp; </center> </td> </tr> <tr> <td> <center> <a href="cgi-bin/df.sh"> <font color="#bc0505"> Disk space </font> </a> </center> </td> <td> <center> &nbsp; </center> </td> </tr> </table> </center> --> <br> <center> <img src="mcf5282.jpg"> </center> </body> </html>
kidmaple/CoolWall
vendors/Avnet/httpd/index.html
HTML
gpl-2.0
1,784
<?php /** * @copyright Copyright (c) 2009-2017 Ryan Demmer. All rights reserved * @license GNU/GPL 2 or later - http://www.gnu.org/licenses/old-licenses/gpl-2.0.html * JCE is free software. This version may have been modified pursuant * to the GNU General Public License, and as distributed it includes or * is derivative of works licensed under the GNU General Public License or * other free or open source software licenses */ class WFFontselectPluginConfig { protected static $fonts = array('Andale Mono=andale mono,times', 'Arial=arial,helvetica,sans-serif', 'Arial Black=arial black,avant garde', 'Book Antiqua=book antiqua,palatino', 'Comic Sans MS=comic sans ms,sans-serif', 'Courier New=courier new,courier', 'Georgia=georgia,palatino', 'Helvetica=helvetica', 'Impact=impact,chicago', 'Symbol=symbol', 'Tahoma=tahoma,arial,helvetica,sans-serif', 'Terminal=terminal,monaco', 'Times New Roman=times new roman,times', 'Trebuchet MS=trebuchet ms,geneva', 'Verdana=verdana,geneva', 'Webdings=webdings', 'Wingdings=wingdings,zapf dingbats'); public static function getConfig(&$settings) { $wf = WFEditor::getInstance(); $settings['fontselect_fonts'] = self::getFonts(); } /** * Get a list of editor font families. * * @return string font family list * * @param string $add Font family to add * @param string $remove Font family to remove */ protected static function getFonts() { $wf = WFEditor::getInstance(); $fonts = $wf->getParam('fontselect.fonts'); // get fonts using legacy parameters if (empty($fonts)) { $fonts = self::$fonts; $add = $wf->getParam('editor.theme_advanced_fonts_add'); $remove = $wf->getParam('editor.theme_advanced_fonts_remove'); if (empty($remove) && empty($add)) { return ''; } $remove = preg_split('/[;,]+/', $remove); if (count($remove)) { foreach ($fonts as $key => $value) { foreach ($remove as $gone) { if ($gone && preg_match('/^'.$gone.'=/i', $value)) { // Remove family unset($fonts[$key]); } } } } foreach (explode(';', $add) as $new) { // Add new font family if (preg_match('/([^\=]+)(\=)([^\=]+)/', trim($new)) && !in_array($new, $fonts)) { $fonts[] = $new; } } natcasesort($fonts); $fonts = implode(';', $fonts); } return $fonts; } }
luffy22/aisha
components/com_jce/editor/tiny_mce/plugins/fontselect/classes/config.php
PHP
gpl-2.0
2,737
/* * Copyright (C) 2012 Max Kellermann <max@duempel.org> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * FOUNDATION OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "EventPipe.hpp" #include <stdint.h> bool EventPipe::Create() { assert(!IsDefined()); #ifdef HAVE_EVENTFD return r.CreateEventFD(); #else if (!FileDescriptor::CreatePipe(r, w)) return false; r.SetNonBlocking(); w.SetNonBlocking(); return true; #endif } void EventPipe::Signal() { #ifdef HAVE_EVENTFD static constexpr uint64_t value = 1; r.Write(&value, sizeof(value)); #else static constexpr char dummy = 0; w.Write(&dummy, 1); #endif } bool EventPipe::Read() { #ifdef HAVE_EVENTFD uint64_t value; return r.Read(&value, sizeof(value)) > 0; #else char buffer[256]; ssize_t nbytes = r.Read(buffer, sizeof(buffer)); return nbytes > 0; #endif }
DRNadler/DRN_TopHat
src/OS/EventPipe.cpp
C++
gpl-2.0
2,056
/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /////////////////////////////////////////////////// // ImageUtils.cpp // $Id: ImageUtils.cpp,v 1.12 2011/06/17 13:35:48 mbansal Exp $ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "ImageUtils.h" void ImageUtils::rgba2yvu(ImageType out, ImageType in, int width, int height) { int r,g,b, a; ImageType yimg = out; ImageType vimg = yimg + width*height; ImageType uimg = vimg + width*height; ImageType image = in; for (int ii = 0; ii < height; ii++) { for (int ij = 0; ij < width; ij++) { r = (*image++); g = (*image++); b = (*image++); a = (*image++); if (r < 0) r = 0; if (r > 255) r = 255; if (g < 0) g = 0; if (g > 255) g = 255; if (b < 0) b = 0; if (b > 255) b = 255; int val = (int) (REDY * r + GREENY * g + BLUEY * b) / 1000 + 16; if (val < 0) val = 0; if (val > 255) val = 255; *(yimg) = val; val = (int) (REDV * r - GREENV * g - BLUEV * b) / 1000 + 128; if (val < 0) val = 0; if (val > 255) val = 255; *(vimg) = val; val = (int) (-REDU * r - GREENU * g + BLUEU * b) / 1000 + 128; if (val < 0) val = 0; if (val > 255) val = 255; *(uimg) = val; yimg++; uimg++; vimg++; } } } void ImageUtils::rgb2yvu(ImageType out, ImageType in, int width, int height) { int r,g,b; ImageType yimg = out; ImageType vimg = yimg + width*height; ImageType uimg = vimg + width*height; ImageType image = in; for (int ii = 0; ii < height; ii++) { for (int ij = 0; ij < width; ij++) { r = (*image++); g = (*image++); b = (*image++); if (r < 0) r = 0; if (r > 255) r = 255; if (g < 0) g = 0; if (g > 255) g = 255; if (b < 0) b = 0; if (b > 255) b = 255; int val = (int) (REDY * r + GREENY * g + BLUEY * b) / 1000 + 16; if (val < 0) val = 0; if (val > 255) val = 255; *(yimg) = val; val = (int) (REDV * r - GREENV * g - BLUEV * b) / 1000 + 128; if (val < 0) val = 0; if (val > 255) val = 255; *(vimg) = val; val = (int) (-REDU * r - GREENU * g + BLUEU * b) / 1000 + 128; if (val < 0) val = 0; if (val > 255) val = 255; *(uimg) = val; yimg++; uimg++; vimg++; } } } ImageType ImageUtils::rgb2gray(ImageType in, int width, int height) { int r,g,b, nr, ng, nb, val; ImageType gray = NULL; ImageType image = in; ImageType out = ImageUtils::allocateImage(width, height, 1); ImageType outCopy = out; for (int ii = 0; ii < height; ii++) { for (int ij = 0; ij < width; ij++) { r = (*image++); g = (*image++); b = (*image++); if (r < 0) r = 0; if (r > 255) r = 255; if (g < 0) g = 0; if (g > 255) g = 255; if (b < 0) b = 0; if (b > 255) b = 255; (*outCopy) = ( 0.3*r + 0.59*g + 0.11*b); outCopy++; } } return out; } ImageType ImageUtils::rgb2gray(ImageType out, ImageType in, int width, int height) { int r,g,b, nr, ng, nb, val; ImageType gray = out; ImageType image = in; ImageType outCopy = out; for (int ii = 0; ii < height; ii++) { for (int ij = 0; ij < width; ij++) { r = (*image++); g = (*image++); b = (*image++); if (r < 0) r = 0; if (r > 255) r = 255; if (g < 0) g = 0; if (g > 255) g = 255; if (b < 0) b = 0; if (b > 255) b = 255; (*outCopy) = ( 0.3*r + 0.59*g + 0.11*b); outCopy++; } } return out; } ImageType *ImageUtils::imageTypeToRowPointers(ImageType in, int width, int height) { int i; int m_h = height; int m_w = width; ImageType *m_rows = new ImageType[m_h]; for (i=0;i<m_h;i++) { m_rows[i] = &in[(m_w)*i]; } return m_rows; } void ImageUtils::yvu2rgb(ImageType out, ImageType in, int width, int height) { int y,v,u, r, g, b; unsigned char *yimg = in; unsigned char *vimg = yimg + width*height; unsigned char *uimg = vimg + width*height; unsigned char *image = out; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { y = (*yimg); v = (*vimg); u = (*uimg); if (y < 0) y = 0; if (y > 255) y = 255; if (u < 0) u = 0; if (u > 255) u = 255; if (v < 0) v = 0; if (v > 255) v = 255; b = (int) ( 1.164*(y - 16) + 2.018*(u-128)); g = (int) ( 1.164*(y - 16) - 0.813*(v-128) - 0.391*(u-128)); r = (int) ( 1.164*(y - 16) + 1.596*(v-128)); if (r < 0) r = 0; if (r > 255) r = 255; if (g < 0) g = 0; if (g > 255) g = 255; if (b < 0) b = 0; if (b > 255) b = 255; *(image++) = r; *(image++) = g; *(image++) = b; yimg++; uimg++; vimg++; } } } void ImageUtils::yvu2bgr(ImageType out, ImageType in, int width, int height) { int y,v,u, r, g, b; unsigned char *yimg = in; unsigned char *vimg = yimg + width*height; unsigned char *uimg = vimg + width*height; unsigned char *image = out; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { y = (*yimg); v = (*vimg); u = (*uimg); if (y < 0) y = 0; if (y > 255) y = 255; if (u < 0) u = 0; if (u > 255) u = 255; if (v < 0) v = 0; if (v > 255) v = 255; b = (int) ( 1.164*(y - 16) + 2.018*(u-128)); g = (int) ( 1.164*(y - 16) - 0.813*(v-128) - 0.391*(u-128)); r = (int) ( 1.164*(y - 16) + 1.596*(v-128)); if (r < 0) r = 0; if (r > 255) r = 255; if (g < 0) g = 0; if (g > 255) g = 255; if (b < 0) b = 0; if (b > 255) b = 255; *(image++) = b; *(image++) = g; *(image++) = r; yimg++; uimg++; vimg++; } } } ImageType ImageUtils::readBinaryPPM(const char *filename, int &width, int &height) { FILE *imgin = NULL; int mval=0, format=0, eret; ImageType ret = IMAGE_TYPE_NOIMAGE; imgin = fopen(filename, "r"); if (imgin == NULL) { fprintf(stderr, "Error: Filename %s not found\n", filename); return ret; } eret = fscanf(imgin, "P%d\n", &format); if (format != 6) { fprintf(stderr, "Error: readBinaryPPM only supports PPM format (P6)\n"); return ret; } eret = fscanf(imgin, "%d %d\n", &width, &height); eret = fscanf(imgin, "%d\n", &mval); ret = allocateImage(width, height, IMAGE_TYPE_NUM_CHANNELS); eret = fread(ret, sizeof(ImageTypeBase), IMAGE_TYPE_NUM_CHANNELS*width*height, imgin); fclose(imgin); return ret; } void ImageUtils::writeBinaryPPM(ImageType image, const char *filename, int width, int height, int numChannels) { FILE *imgout = fopen(filename, "w"); if (imgout == NULL) { fprintf(stderr, "Error: Filename %s could not be opened for writing\n", filename); return; } if (numChannels == 3) { fprintf(imgout, "P6\n%d %d\n255\n", width, height); } else if (numChannels == 1) { fprintf(imgout, "P5\n%d %d\n255\n", width, height); } else { fprintf(stderr, "Error: writeBinaryPPM: Unsupported number of channels\n"); } fwrite(image, sizeof(ImageTypeBase), numChannels*width*height, imgout); fclose(imgout); } ImageType ImageUtils::allocateImage(int width, int height, int numChannels, short int border) { int overallocation = 256; return (ImageType) calloc(width*height*numChannels+overallocation, sizeof(ImageTypeBase)); } void ImageUtils::freeImage(ImageType image) { free(image); } // allocation of one color image used for tmp buffers, etc. // format of contiguous memory block: // YUVInfo struct (type + BimageInfo for Y,U, and V), // Y row pointers // U row pointers // V row pointers // Y image pixels // U image pixels // V image pixels YUVinfo *YUVinfo::allocateImage(unsigned short width, unsigned short height) { unsigned short heightUV, widthUV; widthUV = width; heightUV = height; // figure out how much space to hold all pixels... int size = ((width * height * 3) + 8); unsigned char *position = 0; // VC 8 does not like calling free on yuv->Y.ptr since it is in // the middle of a block. So rearrange the memory layout so after // calling mapYUVInforToImage yuv->Y.ptr points to the begginning // of the calloc'ed block. YUVinfo *yuv = (YUVinfo *) calloc(sizeof(YUVinfo), 1); if (yuv) { yuv->Y.width = yuv->Y.pitch = width; yuv->Y.height = height; yuv->Y.border = yuv->U.border = yuv->V.border = (unsigned short) 0; yuv->U.width = yuv->U.pitch = yuv->V.width = yuv->V.pitch = widthUV; yuv->U.height = yuv->V.height = heightUV; unsigned char* block = (unsigned char*) calloc( sizeof(unsigned char *) * (height + heightUV + heightUV) + sizeof(unsigned char) * size, 1); position = block; unsigned char **y = (unsigned char **) (block + size); /* Initialize and assign row pointers */ yuv->Y.ptr = y; yuv->V.ptr = &y[height]; yuv->U.ptr = &y[height + heightUV]; } if (size) mapYUVInfoToImage(yuv, position); return yuv; } // wrap YUVInfo row pointers around 3 contiguous image (color component) planes. // position = starting pixel in image. void YUVinfo::mapYUVInfoToImage(YUVinfo *img, unsigned char *position) { int i; for (i = 0; i < img->Y.height; i++, position += img->Y.width) img->Y.ptr[i] = position; for (i = 0; i < img->V.height; i++, position += img->V.width) img->V.ptr[i] = position; for (i = 0; i < img->U.height; i++, position += img->U.width) img->U.ptr[i] = position; }
rex-xxx/mt6572_x201
packages/apps/LegacyCamera/jni/feature_mos/src/mosaic/ImageUtils.cpp
C++
gpl-2.0
10,280
<?php /** * Joomla! Content Management System * * @copyright Copyright (C) 2005 - 2017 Open Source Matters, Inc. All rights reserved. * @license GNU General Public License version 2 or later; see LICENSE.txt */ namespace Joomla\CMS\User; defined('JPATH_PLATFORM') or die; use Joomla\CMS\Access\Access; use Joomla\CMS\Plugin\PluginHelper; use Joomla\CMS\Table\Table; use Joomla\Registry\Registry; use Joomla\Utilities\ArrayHelper; /** * User class. Handles all application interaction with a user * * @since 11.1 */ class User extends \JObject { /** * A cached switch for if this user has root access rights. * * @var boolean * @since 11.1 */ protected $isRoot = null; /** * Unique id * * @var integer * @since 11.1 */ public $id = null; /** * The user's real name (or nickname) * * @var string * @since 11.1 */ public $name = null; /** * The login name * * @var string * @since 11.1 */ public $username = null; /** * The email * * @var string * @since 11.1 */ public $email = null; /** * MD5 encrypted password * * @var string * @since 11.1 */ public $password = null; /** * Clear password, only available when a new password is set for a user * * @var string * @since 11.1 */ public $password_clear = ''; /** * Block status * * @var integer * @since 11.1 */ public $block = null; /** * Should this user receive system email * * @var integer * @since 11.1 */ public $sendEmail = null; /** * Date the user was registered * * @var \DateTime * @since 11.1 */ public $registerDate = null; /** * Date of last visit * * @var \DateTime * @since 11.1 */ public $lastvisitDate = null; /** * Activation hash * * @var string * @since 11.1 */ public $activation = null; /** * User parameters * * @var Registry * @since 11.1 */ public $params = null; /** * Associative array of user names => group ids * * @var array * @since 11.1 */ public $groups = array(); /** * Guest status * * @var boolean * @since 11.1 */ public $guest = null; /** * Last Reset Time * * @var string * @since 12.2 */ public $lastResetTime = null; /** * Count since last Reset Time * * @var int * @since 12.2 */ public $resetCount = null; /** * Flag to require the user's password be reset * * @var int * @since 3.2 */ public $requireReset = null; /** * User parameters * * @var Registry * @since 11.1 */ protected $_params = null; /** * Authorised access groups * * @var array * @since 11.1 */ protected $_authGroups = null; /** * Authorised access levels * * @var array * @since 11.1 */ protected $_authLevels = null; /** * Authorised access actions * * @var array * @since 11.1 */ protected $_authActions = null; /** * Error message * * @var string * @since 11.1 */ protected $_errorMsg = null; /** * UserWrapper object * * @var UserWrapper * @since 3.4 * @deprecated 4.0 Use `Joomla\CMS\User\UserHelper` directly */ protected $userHelper = null; /** * @var array User instances container. * @since 11.3 */ protected static $instances = array(); /** * Constructor activating the default information of the language * * @param integer $identifier The primary key of the user to load (optional). * @param UserWrapper $userHelper The UserWrapper for the static methods. [@deprecated 4.0] * * @since 11.1 */ public function __construct($identifier = 0, UserWrapper $userHelper = null) { if (null === $userHelper) { $userHelper = new UserWrapper; } $this->userHelper = $userHelper; // Create the user parameters object $this->_params = new Registry; // Load the user if it exists if (!empty($identifier)) { $this->load($identifier); } else { // Initialise $this->id = 0; $this->sendEmail = 0; $this->aid = 0; $this->guest = 1; } } /** * Returns the global User object, only creating it if it doesn't already exist. * * @param integer $identifier The primary key of the user to load (optional). * @param UserWrapper $userHelper The UserWrapper for the static methods. [@deprecated 4.0] * * @return User The User object. * * @since 11.1 */ public static function getInstance($identifier = 0, UserWrapper $userHelper = null) { if (null === $userHelper) { $userHelper = new UserWrapper; } // Find the user id if (!is_numeric($identifier)) { if (!$id = $userHelper->getUserId($identifier)) { // If the $identifier doesn't match with any id, just return an empty User. return new User; } } else { $id = $identifier; } // If the $id is zero, just return an empty User. // Note: don't cache this user because it'll have a new ID on save! if ($id === 0) { return new User; } // Check if the user ID is already cached. if (empty(self::$instances[$id])) { $user = new User($id, $userHelper); self::$instances[$id] = $user; } return self::$instances[$id]; } /** * Method to get a parameter value * * @param string $key Parameter key * @param mixed $default Parameter default value * * @return mixed The value or the default if it did not exist * * @since 11.1 */ public function getParam($key, $default = null) { return $this->_params->get($key, $default); } /** * Method to set a parameter * * @param string $key Parameter key * @param mixed $value Parameter value * * @return mixed Set parameter value * * @since 11.1 */ public function setParam($key, $value) { return $this->_params->set($key, $value); } /** * Method to set a default parameter if it does not exist * * @param string $key Parameter key * @param mixed $value Parameter value * * @return mixed Set parameter value * * @since 11.1 */ public function defParam($key, $value) { return $this->_params->def($key, $value); } /** * Method to check User object authorisation against an access control * object and optionally an access extension object * * @param string $action The name of the action to check for permission. * @param string $assetname The name of the asset on which to perform the action. * * @return boolean True if authorised * * @since 11.1 */ public function authorise($action, $assetname = null) { // Make sure we only check for core.admin once during the run. if ($this->isRoot === null) { $this->isRoot = false; // Check for the configuration file failsafe. $rootUser = \JFactory::getConfig()->get('root_user'); // The root_user variable can be a numeric user ID or a username. if (is_numeric($rootUser) && $this->id > 0 && $this->id == $rootUser) { $this->isRoot = true; } elseif ($this->username && $this->username == $rootUser) { $this->isRoot = true; } elseif ($this->id > 0) { // Get all groups against which the user is mapped. $identities = $this->getAuthorisedGroups(); array_unshift($identities, $this->id * -1); if (Access::getAssetRules(1)->allow('core.admin', $identities)) { $this->isRoot = true; return true; } } } return $this->isRoot ? true : (bool) Access::check($this->id, $action, $assetname); } /** * Method to return a list of all categories that a user has permission for a given action * * @param string $component The component from which to retrieve the categories * @param string $action The name of the section within the component from which to retrieve the actions. * * @return array List of categories that this group can do this action to (empty array if none). Categories must be published. * * @since 11.1 */ public function getAuthorisedCategories($component, $action) { // Brute force method: get all published category rows for the component and check each one // TODO: Modify the way permissions are stored in the db to allow for faster implementation and better scaling $db = \JFactory::getDbo(); $subQuery = $db->getQuery(true) ->select('id,asset_id') ->from('#__categories') ->where('extension = ' . $db->quote($component)) ->where('published = 1'); $query = $db->getQuery(true) ->select('c.id AS id, a.name AS asset_name') ->from('(' . (string) $subQuery . ') AS c') ->join('INNER', '#__assets AS a ON c.asset_id = a.id'); $db->setQuery($query); $allCategories = $db->loadObjectList('id'); $allowedCategories = array(); foreach ($allCategories as $category) { if ($this->authorise($action, $category->asset_name)) { $allowedCategories[] = (int) $category->id; } } return $allowedCategories; } /** * Gets an array of the authorised access levels for the user * * @return array * * @since 11.1 */ public function getAuthorisedViewLevels() { if ($this->_authLevels === null) { $this->_authLevels = array(); } if (empty($this->_authLevels)) { $this->_authLevels = Access::getAuthorisedViewLevels($this->id); } return $this->_authLevels; } /** * Gets an array of the authorised user groups * * @return array * * @since 11.1 */ public function getAuthorisedGroups() { if ($this->_authGroups === null) { $this->_authGroups = array(); } if (empty($this->_authGroups)) { $this->_authGroups = Access::getGroupsByUser($this->id); } return $this->_authGroups; } /** * Clears the access rights cache of this user * * @return void * * @since 3.4.0 */ public function clearAccessRights() { $this->_authLevels = null; $this->_authGroups = null; $this->isRoot = null; Access::clearStatics(); } /** * Pass through method to the table for setting the last visit date * * @param integer $timestamp The timestamp, defaults to 'now'. * * @return boolean True on success. * * @since 11.1 */ public function setLastVisit($timestamp = null) { // Create the user table object $table = $this->getTable(); $table->load($this->id); return $table->setLastVisit($timestamp); } /** * Method to get the user parameters * * This method used to load the user parameters from a file. * * @return object The user parameters object. * * @since 11.1 * @deprecated 12.3 (Platform) & 4.0 (CMS) - Instead use User::getParam() */ public function getParameters() { // @codeCoverageIgnoreStart \JLog::add('User::getParameters() is deprecated. User::getParam().', \JLog::WARNING, 'deprecated'); return $this->_params; // @codeCoverageIgnoreEnd } /** * Method to get the user timezone. * * If the user didn't set a timezone, it will return the server timezone * * @return \DateTimeZone * * @since 3.7.0 */ public function getTimezone() { $timezone = $this->getParam('timezone', \JFactory::getApplication()->get('offset', 'GMT')); return new \DateTimeZone($timezone); } /** * Method to get the user parameters * * @param object $params The user parameters object * * @return void * * @since 11.1 */ public function setParameters($params) { $this->_params = $params; } /** * Method to get the user table object * * This function uses a static variable to store the table name of the user table to * instantiate. You can call this function statically to set the table name if * needed. * * @param string $type The user table name to be used * @param string $prefix The user table prefix to be used * * @return object The user table object * * @note At 4.0 this method will no longer be static * @since 11.1 */ public static function getTable($type = null, $prefix = 'JTable') { static $tabletype; // Set the default tabletype; if (!isset($tabletype)) { $tabletype['name'] = 'user'; $tabletype['prefix'] = 'JTable'; } // Set a custom table type is defined if (isset($type)) { $tabletype['name'] = $type; $tabletype['prefix'] = $prefix; } // Create the user table object return Table::getInstance($tabletype['name'], $tabletype['prefix']); } /** * Method to bind an associative array of data to a user object * * @param array &$array The associative array to bind to the object * * @return boolean True on success * * @since 11.1 */ public function bind(&$array) { // Let's check to see if the user is new or not if (empty($this->id)) { // Check the password and create the crypted password if (empty($array['password'])) { $array['password'] = $this->userHelper->genRandomPassword(); $array['password2'] = $array['password']; } // Not all controllers check the password, although they should. // Hence this code is required: if (isset($array['password2']) && $array['password'] != $array['password2']) { \JFactory::getApplication()->enqueueMessage(\JText::_('JLIB_USER_ERROR_PASSWORD_NOT_MATCH'), 'error'); return false; } $this->password_clear = ArrayHelper::getValue($array, 'password', '', 'string'); $array['password'] = $this->userHelper->hashPassword($array['password']); // Set the registration timestamp $this->set('registerDate', \JFactory::getDate()->toSql()); // Check that username is not greater than 150 characters $username = $this->get('username'); if (strlen($username) > 150) { $username = substr($username, 0, 150); $this->set('username', $username); } } else { // Updating an existing user if (!empty($array['password'])) { if ($array['password'] != $array['password2']) { $this->setError(\JText::_('JLIB_USER_ERROR_PASSWORD_NOT_MATCH')); return false; } $this->password_clear = ArrayHelper::getValue($array, 'password', '', 'string'); // Check if the user is reusing the current password if required to reset their password if ($this->requireReset == 1 && $this->userHelper->verifyPassword($this->password_clear, $this->password)) { $this->setError(\JText::_('JLIB_USER_ERROR_CANNOT_REUSE_PASSWORD')); return false; } $array['password'] = $this->userHelper->hashPassword($array['password']); // Reset the change password flag $array['requireReset'] = 0; } else { $array['password'] = $this->password; } } if (array_key_exists('params', $array)) { $this->_params->loadArray($array['params']); if (is_array($array['params'])) { $params = (string) $this->_params; } else { $params = $array['params']; } $this->params = $params; } // Bind the array if (!$this->setProperties($array)) { $this->setError(\JText::_('JLIB_USER_ERROR_BIND_ARRAY')); return false; } // Make sure its an integer $this->id = (int) $this->id; return true; } /** * Method to save the User object to the database * * @param boolean $updateOnly Save the object only if not a new user * Currently only used in the user reset password method. * * @return boolean True on success * * @since 11.1 * @throws \RuntimeException */ public function save($updateOnly = false) { // Create the user table object $table = $this->getTable(); $this->params = (string) $this->_params; $table->bind($this->getProperties()); // Allow an exception to be thrown. try { // Check and store the object. if (!$table->check()) { $this->setError($table->getError()); return false; } // If user is made a Super Admin group and user is NOT a Super Admin // @todo ACL - this needs to be acl checked $my = \JFactory::getUser(); // Are we creating a new user $isNew = empty($this->id); // If we aren't allowed to create new users return if ($isNew && $updateOnly) { return true; } // Get the old user $oldUser = new User($this->id); // Access Checks // The only mandatory check is that only Super Admins can operate on other Super Admin accounts. // To add additional business rules, use a user plugin and throw an Exception with onUserBeforeSave. // Check if I am a Super Admin $iAmSuperAdmin = $my->authorise('core.admin'); $iAmRehashingSuperadmin = false; if (($my->id == 0 && !$isNew) && $this->id == $oldUser->id && $oldUser->authorise('core.admin') && $oldUser->password != $this->password) { $iAmRehashingSuperadmin = true; } // We are only worried about edits to this account if I am not a Super Admin. if ($iAmSuperAdmin != true && $iAmRehashingSuperadmin != true) { // I am not a Super Admin, and this one is, so fail. if (!$isNew && Access::check($this->id, 'core.admin')) { throw new \RuntimeException('User not Super Administrator'); } if ($this->groups != null) { // I am not a Super Admin and I'm trying to make one. foreach ($this->groups as $groupId) { if (Access::checkGroup($groupId, 'core.admin')) { throw new \RuntimeException('User not Super Administrator'); } } } } // Fire the onUserBeforeSave event. PluginHelper::importPlugin('user'); $dispatcher = \JEventDispatcher::getInstance(); $result = $dispatcher->trigger('onUserBeforeSave', array($oldUser->getProperties(), $isNew, $this->getProperties())); if (in_array(false, $result, true)) { // Plugin will have to raise its own error or throw an exception. return false; } // Store the user data in the database $result = $table->store(); // Set the id for the User object in case we created a new user. if (empty($this->id)) { $this->id = $table->get('id'); } if ($my->id == $table->id) { $registry = new Registry($table->params); $my->setParameters($registry); } // Fire the onUserAfterSave event $dispatcher->trigger('onUserAfterSave', array($this->getProperties(), $isNew, $result, $this->getError())); } catch (\Exception $e) { $this->setError($e->getMessage()); return false; } return $result; } /** * Method to delete the User object from the database * * @return boolean True on success * * @since 11.1 */ public function delete() { PluginHelper::importPlugin('user'); // Trigger the onUserBeforeDelete event $dispatcher = \JEventDispatcher::getInstance(); $dispatcher->trigger('onUserBeforeDelete', array($this->getProperties())); // Create the user table object $table = $this->getTable(); if (!$result = $table->delete($this->id)) { $this->setError($table->getError()); } // Trigger the onUserAfterDelete event $dispatcher->trigger('onUserAfterDelete', array($this->getProperties(), $result, $this->getError())); return $result; } /** * Method to load a User object by user id number * * @param mixed $id The user id of the user to load * * @return boolean True on success * * @since 11.1 */ public function load($id) { // Create the user table object $table = $this->getTable(); // Load the UserModel object based on the user id or throw a warning. if (!$table->load($id)) { // Reset to guest user $this->guest = 1; \JLog::add(\JText::sprintf('JLIB_USER_ERROR_UNABLE_TO_LOAD_USER', $id), \JLog::WARNING, 'jerror'); return false; } /* * Set the user parameters using the default XML file. We might want to * extend this in the future to allow for the ability to have custom * user parameters, but for right now we'll leave it how it is. */ $this->_params->loadString($table->params); // Assuming all is well at this point let's bind the data $this->setProperties($table->getProperties()); // The user is no longer a guest if ($this->id != 0) { $this->guest = 0; } else { $this->guest = 1; } return true; } /** * Method to allow serialize the object with minimal properties. * * @return array The names of the properties to include in serialization. * * @since 3.6.0 */ public function __sleep() { return array('id'); } /** * Method to recover the full object on unserialize. * * @return void * * @since 3.6.0 */ public function __wakeup() { // Initialise some variables $this->userHelper = new UserWrapper; $this->_params = new Registry; // Load the user if it exists if (!empty($this->id) && $this->load($this->id)) { // Push user into cached instances. self::$instances[$this->id] = $this; } else { // Initialise $this->id = 0; $this->sendEmail = 0; $this->aid = 0; $this->guest = 1; } } }
thongredweb/joomla-cms
libraries/src/User/User.php
PHP
gpl-2.0
20,889
/* * Copyright (C) 2009 by Jonathan Naylor G4KLX * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "DummyRepeaterHeaderEvent.h" CDummyRepeaterHeaderEvent::CDummyRepeaterHeaderEvent(CHeaderData* header, wxEventType type, int id) : wxEvent(id, type), m_header(header) { } CDummyRepeaterHeaderEvent::CDummyRepeaterHeaderEvent(const CDummyRepeaterHeaderEvent& event) : wxEvent(event), m_header(event.m_header) { } CDummyRepeaterHeaderEvent::~CDummyRepeaterHeaderEvent() { } CHeaderData* CDummyRepeaterHeaderEvent::getHeaderData() const { return m_header; } wxEvent* CDummyRepeaterHeaderEvent::Clone() const { return new CDummyRepeaterHeaderEvent(*this); }
n8ohu/DMRRepeater
DummyRepeater/DummyRepeater/DummyRepeaterHeaderEvent.cpp
C++
gpl-2.0
1,393
/* * Copyright (C) 2010-2013 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "mali_kernel_common.h" #include "mali_memory.h" #include "mali_memory_block_alloc.h" #include "mali_osk.h" #include <linux/mutex.h> #define MALI_BLOCK_SIZE (256UL * 1024UL) /* 256 kB, remember to keep the ()s */ struct block_info { struct block_info *next; }; typedef struct block_info block_info; typedef struct block_allocator { struct mutex mutex; block_info *all_blocks; block_info *first_free; u32 base; u32 cpu_usage_adjust; u32 num_blocks; u32 free_blocks; } block_allocator; static block_allocator *mali_mem_block_gobal_allocator = NULL; MALI_STATIC_INLINE u32 get_phys(block_allocator *info, block_info *block) { return info->base + ((block - info->all_blocks) * MALI_BLOCK_SIZE); } mali_mem_allocator *mali_mem_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size) { block_allocator *info; u32 usable_size; u32 num_blocks; usable_size = size & ~(MALI_BLOCK_SIZE - 1); MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size)); MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size)); num_blocks = usable_size / MALI_BLOCK_SIZE; MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks)); if (usable_size == 0) { MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size)); return NULL; } info = _mali_osk_malloc(sizeof(block_allocator)); if (NULL != info) { mutex_init(&info->mutex); info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks); if (NULL != info->all_blocks) { u32 i; info->first_free = NULL; info->num_blocks = num_blocks; info->free_blocks = num_blocks; info->base = base_address; info->cpu_usage_adjust = cpu_usage_adjust; for ( i = 0; i < num_blocks; i++) { info->all_blocks[i].next = info->first_free; info->first_free = &info->all_blocks[i]; } return (mali_mem_allocator *)info; } _mali_osk_free(info); } return NULL; } void mali_mem_block_allocator_destroy(mali_mem_allocator *allocator) { block_allocator *info = (block_allocator*)allocator; info = mali_mem_block_gobal_allocator; if (NULL == info) return; MALI_DEBUG_ASSERT_POINTER(info); _mali_osk_free(info->all_blocks); _mali_osk_free(info); } static void mali_mem_block_mali_map(mali_mem_allocation *descriptor, u32 phys, u32 virt, u32 size) { struct mali_page_directory *pagedir = descriptor->session->page_directory; u32 prop = descriptor->mali_mapping.properties; u32 offset = 0; while (size) { mali_mmu_pagedir_update(pagedir, virt + offset, phys + offset, MALI_MMU_PAGE_SIZE, prop); size -= MALI_MMU_PAGE_SIZE; offset += MALI_MMU_PAGE_SIZE; } } static int mali_mem_block_cpu_map(mali_mem_allocation *descriptor, struct vm_area_struct *vma, u32 mali_phys, u32 mapping_offset, u32 size, u32 cpu_usage_adjust) { u32 virt = vma->vm_start + mapping_offset; u32 cpu_phys = mali_phys + cpu_usage_adjust; u32 offset = 0; int ret; while (size) { ret = vm_insert_pfn(vma, virt + offset, __phys_to_pfn(cpu_phys + offset)); if (unlikely(ret)) { MALI_DEBUG_PRINT(1, ("Block allocator: Failed to insert pfn into vma\n")); return 1; } size -= MALI_MMU_PAGE_SIZE; offset += MALI_MMU_PAGE_SIZE; } return 0; } mali_mem_allocation *mali_mem_block_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session) { _mali_osk_errcode_t err; mali_mem_allocation *descriptor; block_allocator *info; u32 left; block_info *last_allocated = NULL; block_allocator_allocation *ret_allocation; u32 offset = 0; size = ALIGN(size, MALI_BLOCK_SIZE); info = mali_mem_block_gobal_allocator; if (NULL == info) return NULL; left = size; MALI_DEBUG_ASSERT(0 != left); descriptor = mali_mem_descriptor_create(session, MALI_MEM_BLOCK); if (NULL == descriptor) { return NULL; } descriptor->mali_mapping.addr = mali_addr; descriptor->size = size; descriptor->cpu_mapping.addr = (void __user*)vma->vm_start; descriptor->cpu_mapping.ref = 1; if (VM_SHARED == (VM_SHARED & vma->vm_flags)) { descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT; } else { /* Cached Mali memory mapping */ descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE; vma->vm_flags |= VM_SHARED; } ret_allocation = &descriptor->block_mem.mem; ret_allocation->mapping_length = 0; _mali_osk_mutex_wait(session->memory_lock); mutex_lock(&info->mutex); if (left > (info->free_blocks * MALI_BLOCK_SIZE)) { MALI_DEBUG_PRINT(2, ("Mali block allocator: not enough free blocks to service allocation (%u)\n", left)); mutex_unlock(&info->mutex); _mali_osk_mutex_signal(session->memory_lock); mali_mem_descriptor_destroy(descriptor); return NULL; } err = mali_mem_mali_map_prepare(descriptor); if (_MALI_OSK_ERR_OK != err) { mutex_unlock(&info->mutex); _mali_osk_mutex_signal(session->memory_lock); mali_mem_descriptor_destroy(descriptor); return NULL; } while ((left > 0) && (info->first_free)) { block_info *block; u32 phys_addr; u32 current_mapping_size; block = info->first_free; info->first_free = info->first_free->next; block->next = last_allocated; last_allocated = block; phys_addr = get_phys(info, block); if (MALI_BLOCK_SIZE < left) { current_mapping_size = MALI_BLOCK_SIZE; } else { current_mapping_size = left; } mali_mem_block_mali_map(descriptor, phys_addr, mali_addr + offset, current_mapping_size); if (mali_mem_block_cpu_map(descriptor, vma, phys_addr, offset, current_mapping_size, info->cpu_usage_adjust)) { /* release all memory back to the pool */ while (last_allocated) { /* This relinks every block we've just allocated back into the free-list */ block = last_allocated->next; last_allocated->next = info->first_free; info->first_free = last_allocated; last_allocated = block; } mutex_unlock(&info->mutex); _mali_osk_mutex_signal(session->memory_lock); mali_mem_mali_map_free(descriptor); mali_mem_descriptor_destroy(descriptor); return NULL; } left -= current_mapping_size; offset += current_mapping_size; ret_allocation->mapping_length += current_mapping_size; --info->free_blocks; } mutex_unlock(&info->mutex); _mali_osk_mutex_signal(session->memory_lock); MALI_DEBUG_ASSERT(0 == left); /* Record all the information about this allocation */ ret_allocation->last_allocated = last_allocated; ret_allocation->info = info; return descriptor; } void mali_mem_block_release(mali_mem_allocation *descriptor) { block_allocator *info = descriptor->block_mem.mem.info; block_info *block, *next; block_allocator_allocation *allocation = &descriptor->block_mem.mem; MALI_DEBUG_ASSERT(MALI_MEM_BLOCK == descriptor->type); block = allocation->last_allocated; MALI_DEBUG_ASSERT_POINTER(block); /* unmap */ mali_mem_mali_map_free(descriptor); mutex_lock(&info->mutex); while (block) { MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks)))); next = block->next; /* relink into free-list */ block->next = info->first_free; info->first_free = block; /* advance the loop */ block = next; ++info->free_blocks; } mutex_unlock(&info->mutex); } u32 mali_mem_block_allocator_stat(void) { block_allocator *info = (block_allocator *)mali_mem_block_gobal_allocator; if (NULL == info) return 0; MALI_DEBUG_ASSERT_POINTER(info); return (info->num_blocks - info->free_blocks) * MALI_BLOCK_SIZE; } _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size) { mali_mem_allocator *allocator; /* Do the low level linux operation first */ /* Request ownership of the memory */ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(start, size, "Dedicated Mali GPU memory")) { MALI_DEBUG_PRINT(1, ("Failed to request memory region for frame buffer (0x%08X - 0x%08X)\n", start, start + size - 1)); return _MALI_OSK_ERR_FAULT; } /* Create generic block allocator object to handle it */ allocator = mali_mem_block_allocator_create(start, 0 /* cpu_usage_adjust */, size); if (NULL == allocator) { MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n")); _mali_osk_mem_unreqregion(start, size); MALI_ERROR(_MALI_OSK_ERR_FAULT); } mali_mem_block_gobal_allocator = (block_allocator*)allocator; return _MALI_OSK_ERR_OK; }
koquantam/android_kernel_oc_vivalto3gvn
drivers/gpu/mali400/r4p0/linux/mali_memory_block_alloc.c
C
gpl-2.0
9,171
/* //@HEADER // ************************************************************************ // // Kokkos v. 2.0 // Copyright (2014) Sandia Corporation // // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, // the U.S. Government retains certain rights in this software. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the Corporation nor the names of the // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact Christian R. Trott (crtrott@sandia.gov) // // ************************************************************************ //@HEADER */ #include <qthreads/TestQthreads.hpp> namespace Test { TEST_F( qthreads, init ) { ; } TEST_F( qthreads, md_range ) { #if 0 TestMDRange_2D< Kokkos::Qthreads >::test_for2( 100, 100 ); TestMDRange_3D< Kokkos::Qthreads >::test_for3( 100, 100, 100 ); #endif } TEST_F( qthreads, policy_construction ) { #if 0 TestRangePolicyConstruction< Kokkos::Qthreads >(); TestTeamPolicyConstruction< Kokkos::Qthreads >(); #endif } TEST_F( qthreads, range_tag ) { #if 0 TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Static> >::test_for( 0 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Static> >::test_reduce( 0 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Static> >::test_scan( 0 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Dynamic> >::test_for( 0 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Dynamic> >::test_reduce( 0 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Dynamic> >::test_scan( 0 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Dynamic> >::test_dynamic_policy( 0 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Static> >::test_for( 2 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Static> >::test_reduce( 2 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Static> >::test_scan( 2 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Dynamic> >::test_for( 3 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Dynamic> >::test_reduce( 3 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Dynamic> >::test_scan( 3 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Dynamic> >::test_dynamic_policy( 3 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Static> >::test_for( 1000 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Static> >::test_reduce( 1000 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Static> >::test_scan( 1000 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Dynamic> >::test_for( 1001 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Dynamic> >::test_reduce( 1001 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Dynamic> >::test_scan( 1001 ); TestRange< Kokkos::Qthreads, Kokkos::Schedule<Kokkos::Dynamic> >::test_dynamic_policy( 1000 ); #endif } //---------------------------------------------------------------------------- TEST_F( qthreads, compiler_macros ) { #if 0 ASSERT_TRUE( ( TestCompilerMacros::Test< Kokkos::Qthreads >() ) ); #endif } //---------------------------------------------------------------------------- TEST_F( qthreads, memory_pool ) { #if 0 #endif } //---------------------------------------------------------------------------- #if defined( KOKKOS_ENABLE_TASKDAG ) TEST_F( qthreads, task_fib ) { #if 0 const int N = 24 ; // 25 triggers tbd bug on Cuda/Pascal for ( int i = 0; i < N; ++i ) { TestTaskScheduler::TestFib< Kokkos::Qthreads >::run( i, ( i + 1 ) * ( i + 1 ) * 10000 ); } #endif } TEST_F( qthreads, task_depend ) { #if 0 for ( int i = 0; i < 25; ++i ) { TestTaskScheduler::TestTaskDependence< Kokkos::Qthreads >::run( i ); } #endif } TEST_F( qthreads, task_team ) { #if 0 TestTaskScheduler::TestTaskTeam< Kokkos::Qthreads >::run( 1000 ); //TestTaskScheduler::TestTaskTeamValue< Kokkos::Qthreads >::run( 1000 ); // Put back after testing. #endif } #endif // #if defined( KOKKOS_ENABLE_TASKDAG ) //---------------------------------------------------------------------------- #if defined( KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_QTHREADS ) TEST_F( qthreads, cxx11 ) { #if 0 if ( std::is_same< Kokkos::DefaultExecutionSpace, Kokkos::Qthreads >::value ) { ASSERT_TRUE( ( TestCXX11::Test< Kokkos::Qthreads >( 1 ) ) ); ASSERT_TRUE( ( TestCXX11::Test< Kokkos::Qthreads >( 2 ) ) ); ASSERT_TRUE( ( TestCXX11::Test< Kokkos::Qthreads >( 3 ) ) ); ASSERT_TRUE( ( TestCXX11::Test< Kokkos::Qthreads >( 4 ) ) ); } #endif } #endif TEST_F( qthreads, tile_layout ) { #if 0 TestTile::test< Kokkos::Qthreads, 1, 1 >( 1, 1 ); TestTile::test< Kokkos::Qthreads, 1, 1 >( 2, 3 ); TestTile::test< Kokkos::Qthreads, 1, 1 >( 9, 10 ); TestTile::test< Kokkos::Qthreads, 2, 2 >( 1, 1 ); TestTile::test< Kokkos::Qthreads, 2, 2 >( 2, 3 ); TestTile::test< Kokkos::Qthreads, 2, 2 >( 4, 4 ); TestTile::test< Kokkos::Qthreads, 2, 2 >( 9, 9 ); TestTile::test< Kokkos::Qthreads, 2, 4 >( 9, 9 ); TestTile::test< Kokkos::Qthreads, 4, 2 >( 9, 9 ); TestTile::test< Kokkos::Qthreads, 4, 4 >( 1, 1 ); TestTile::test< Kokkos::Qthreads, 4, 4 >( 4, 4 ); TestTile::test< Kokkos::Qthreads, 4, 4 >( 9, 9 ); TestTile::test< Kokkos::Qthreads, 4, 4 >( 9, 11 ); TestTile::test< Kokkos::Qthreads, 8, 8 >( 1, 1 ); TestTile::test< Kokkos::Qthreads, 8, 8 >( 4, 4 ); TestTile::test< Kokkos::Qthreads, 8, 8 >( 9, 9 ); TestTile::test< Kokkos::Qthreads, 8, 8 >( 9, 11 ); #endif } TEST_F( qthreads, dispatch ) { #if 0 const int repeat = 100; for ( int i = 0; i < repeat; ++i ) { for ( int j = 0; j < repeat; ++j ) { Kokkos::parallel_for( Kokkos::RangePolicy< Kokkos::Qthreads >( 0, j ) , KOKKOS_LAMBDA( int ) {} ); } } #endif } } // namespace Test
pdebuyl/lammps
lib/kokkos/core/unit_test/qthreads/TestQthreads_Other.cpp
C++
gpl-2.0
7,165
/* Override generic sotruss-lib.c to define actual functions for MIPS. Copyright (C) 2012-2013 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */ #define HAVE_ARCH_PLTENTER #define HAVE_ARCH_PLTEXIT #include <elf/sotruss-lib.c> #if _MIPS_SIM == _ABIO32 ElfW(Addr) la_mips_o32_gnu_pltenter (ElfW(Sym) *sym __attribute__ ((unused)), unsigned int ndx __attribute__ ((unused)), uintptr_t *refcook, uintptr_t *defcook, La_mips_32_regs *regs, unsigned int *flags, const char *symname, long int *framesizep) { print_enter (refcook, defcook, symname, regs->lr_reg[0], regs->lr_reg[1], regs->lr_reg[2], *flags); /* No need to copy anything, we will not need the parameters in any case. */ *framesizep = 0; return sym->st_value; } unsigned int la_mips_o32_gnu_pltexit (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook, uintptr_t *defcook, const struct La_mips_32_regs *inregs, struct La_mips_32_retval *outregs, const char *symname) { print_exit (refcook, defcook, symname, outregs->lrv_v0); return 0; } #elif _MIPS_SIM == _ABIN32 ElfW(Addr) la_mips_n32_gnu_pltenter (ElfW(Sym) *sym __attribute__ ((unused)), unsigned int ndx __attribute__ ((unused)), uintptr_t *refcook, uintptr_t *defcook, La_mips_64_regs *regs, unsigned int *flags, const char *symname, long int *framesizep) { print_enter (refcook, defcook, symname, regs->lr_reg[0], regs->lr_reg[1], regs->lr_reg[2], *flags); /* No need to copy anything, we will not need the parameters in any case. */ *framesizep = 0; return sym->st_value; } unsigned int la_mips_n32_gnu_pltexit (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook, uintptr_t *defcook, const struct La_mips_64_regs *inregs, struct La_mips_64_retval *outregs, const char *symname) { print_exit (refcook, defcook, symname, outregs->lrv_v0); return 0; } #else ElfW(Addr) la_mips_n64_gnu_pltenter (ElfW(Sym) *sym __attribute__ ((unused)), unsigned int ndx __attribute__ ((unused)), uintptr_t *refcook, uintptr_t *defcook, La_mips_64_regs *regs, unsigned int *flags, const char *symname, long int *framesizep) { print_enter (refcook, defcook, symname, regs->lr_reg[0], regs->lr_reg[1], regs->lr_reg[2], *flags); /* No need to copy anything, we will not need the parameters in any case. */ *framesizep = 0; return sym->st_value; } unsigned int la_mips_n64_gnu_pltexit (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook, uintptr_t *defcook, const struct La_mips_64_regs *inregs, struct La_mips_64_retval *outregs, const char *symname) { print_exit (refcook, defcook, symname, outregs->lrv_v0); return 0; } #endif
walac/glibc
ports/sysdeps/mips/sotruss-lib.c
C
gpl-2.0
3,458
<?php namespace Drupal\Tests\node\Kernel\Config; use Drupal\field\Entity\FieldConfig; use Drupal\node\Entity\NodeType; use Drupal\KernelTests\KernelTestBase; /** * Create content types during config create method invocation. * * @group node */ class NodeImportCreateTest extends KernelTestBase { /** * Modules to enable. * * @var array */ public static $modules = ['node', 'field', 'text', 'system', 'user']; /** * Set the default field storage backend for fields created during tests. */ protected function setUp() { parent::setUp(); $this->installEntitySchema('user'); // Set default storage backend. $this->installConfig(['system', 'field']); } /** * Tests creating a content type during default config import. */ public function testImportCreateDefault() { $node_type_id = 'default'; // Check that the content type does not exist yet. $this->assertFalse(NodeType::load($node_type_id)); // Enable node_test_config module and check that the content type // shipped in the module's default config is created. $this->container->get('module_installer')->install(['node_test_config']); $node_type = NodeType::load($node_type_id); $this->assertTrue($node_type, 'The default content type was created.'); } /** * Tests creating a content type during config import. */ public function testImportCreate() { $node_type_id = 'import'; $node_type_config_name = "node.type.$node_type_id"; // Simulate config data to import. $active = $this->container->get('config.storage'); $sync = $this->container->get('config.storage.sync'); $this->copyConfig($active, $sync); // Manually add new node type. $src_dir = __DIR__ . '/../../../modules/node_test_config/sync'; $target_dir = config_get_config_directory(CONFIG_SYNC_DIRECTORY); $this->assertTrue(\Drupal::service('file_system')->copy("$src_dir/$node_type_config_name.yml", "$target_dir/$node_type_config_name.yml")); // Import the content of the sync directory. $this->configImporter()->import(); // Check that the content type was created. $node_type = NodeType::load($node_type_id); $this->assertTrue($node_type, 'Import node type from sync was created.'); $this->assertFalse(FieldConfig::loadByName('node', $node_type_id, 'body')); } }
wheelercreek/faithblog
core/modules/node/tests/src/Kernel/Config/NodeImportCreateTest.php
PHP
gpl-2.0
2,357
/*************************************************************************** qgsalgorithmsymmetricaldifference.cpp --------------------- Date : April 2018 Copyright : (C) 2018 by Martin Dobias Email : wonder dot sk at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include "qgsalgorithmsymmetricaldifference.h" #include "qgsoverlayutils.h" ///@cond PRIVATE QString QgsSymmetricalDifferenceAlgorithm::name() const { return QStringLiteral( "symmetricaldifference" ); } QString QgsSymmetricalDifferenceAlgorithm::displayName() const { return QObject::tr( "Symmetrical difference" ); } QStringList QgsSymmetricalDifferenceAlgorithm::tags() const { return QObject::tr( "difference,symdiff,not overlap" ).split( ',' ); } QString QgsSymmetricalDifferenceAlgorithm::group() const { return QObject::tr( "Vector overlay" ); } QString QgsSymmetricalDifferenceAlgorithm::groupId() const { return QStringLiteral( "vectoroverlay" ); } QString QgsSymmetricalDifferenceAlgorithm::shortHelpString() const { return QObject::tr( "This algorithm extracts the portions of features from both the Input and Overlay layers that do not overlap. " "Overlapping areas between the two layers are removed. The attribute table of the Symmetrical Difference layer " "contains original attributes from both the Input and Difference layers." ); } QgsProcessingAlgorithm *QgsSymmetricalDifferenceAlgorithm::createInstance() const { return new QgsSymmetricalDifferenceAlgorithm(); } void QgsSymmetricalDifferenceAlgorithm::initAlgorithm( const QVariantMap & ) { addParameter( new QgsProcessingParameterFeatureSource( QStringLiteral( "INPUT" ), QObject::tr( "Input layer" ) ) ); addParameter( new QgsProcessingParameterFeatureSource( QStringLiteral( "OVERLAY" ), QObject::tr( "Overlay layer" ) ) ); std::unique_ptr< QgsProcessingParameterString > prefix = std::make_unique< QgsProcessingParameterString >( QStringLiteral( "OVERLAY_FIELDS_PREFIX" ), QObject::tr( "Overlay fields prefix" ), QString(), false, true ); prefix->setFlags( prefix->flags() | QgsProcessingParameterDefinition::FlagAdvanced ); addParameter( prefix.release() ); addParameter( new QgsProcessingParameterFeatureSink( QStringLiteral( "OUTPUT" ), QObject::tr( "Symmetrical difference" ) ) ); } QVariantMap QgsSymmetricalDifferenceAlgorithm::processAlgorithm( const QVariantMap &parameters, QgsProcessingContext &context, QgsProcessingFeedback *feedback ) { std::unique_ptr< QgsFeatureSource > sourceA( parameterAsSource( parameters, QStringLiteral( "INPUT" ), context ) ); if ( !sourceA ) throw QgsProcessingException( invalidSourceError( parameters, QStringLiteral( "INPUT" ) ) ); std::unique_ptr< QgsFeatureSource > sourceB( parameterAsSource( parameters, QStringLiteral( "OVERLAY" ), context ) ); if ( !sourceB ) throw QgsProcessingException( invalidSourceError( parameters, QStringLiteral( "OVERLAY" ) ) ); const QgsWkbTypes::Type geomType = QgsWkbTypes::multiType( sourceA->wkbType() ); const QString overlayFieldsPrefix = parameterAsString( parameters, QStringLiteral( "OVERLAY_FIELDS_PREFIX" ), context ); const QgsFields fields = QgsProcessingUtils::combineFields( sourceA->fields(), sourceB->fields(), overlayFieldsPrefix ); QString dest; std::unique_ptr< QgsFeatureSink > sink( parameterAsSink( parameters, QStringLiteral( "OUTPUT" ), context, dest, fields, geomType, sourceA->sourceCrs(), QgsFeatureSink::RegeneratePrimaryKey ) ); if ( !sink ) throw QgsProcessingException( invalidSinkError( parameters, QStringLiteral( "OUTPUT" ) ) ); QVariantMap outputs; outputs.insert( QStringLiteral( "OUTPUT" ), dest ); long count = 0; const long total = sourceA->featureCount() + sourceB->featureCount(); QgsOverlayUtils::difference( *sourceA, *sourceB, *sink, context, feedback, count, total, QgsOverlayUtils::OutputAB ); if ( feedback->isCanceled() ) return outputs; QgsOverlayUtils::difference( *sourceB, *sourceA, *sink, context, feedback, count, total, QgsOverlayUtils::OutputBA ); return outputs; } ///@endcond PRIVATE
nyalldawson/QGIS
src/analysis/processing/qgsalgorithmsymmetricaldifference.cpp
C++
gpl-2.0
4,743
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"> <meta name="GENERATOR" content="Mozilla/4.01 [en] (Win95; I) [Netscape]"> <title>Hewlett Packard 58503A GPS Receiver and HP Z3801A</title> <link href="scripts/style.css" type="text/css" rel="stylesheet"> </head> <body> <h3>Hewlett Packard 58503A GPS Receiver and HP Z3801A</h3> <hr> <h4>Synopsis</h4> <p>Address: 127.127.26.<i>u</i><br> Reference ID: <tt>GPS</tt><br> Driver ID: <tt>GPS_HP</tt><br> Serial Port: <tt>/dev/hpgps<i>u</i></tt>; 9600 baud, 8-bits, no parity, 19200 baud 7-bits, odd parity for the HP Z3801A</p> <h4>Description</h4> <p>This driver supports the HP 58503A Time and Frequency Reference Receiver and HP Z3801A GPS Receiver. They use HP SmartClock (TM) to implement an Enhanced GPS receiver. The receiver accuracy when locked to GPS in normal operation is better than 1 usec. The accuracy when operating in holdover is typically better than 10 us per day. It receiver should be operated with factory default settings. Initial driver operation: expects the receiver to be already locked to GPS, configured and able to output timecode format 2 messages.</p> <p>The driver uses the poll sequence <tt>:PTIME:TCODE?</tt> to get a response from the receiver. The receiver responds with a timecode string of ASCII printing characters, followed by a &lt;cr&gt;&lt;lf&gt;, followed by a prompt string issued by the receiver, in the following format:</p> <pre>T#yyyymmddhhmmssMFLRVcc&lt;cr&gt;&lt;lf&gt;scpi &gt;</pre> The driver processes the response at the &lt;cr&gt; and &lt;lf&gt;, so what the driver sees is the prompt from the previous poll, followed by this timecode. The prompt from the current poll is (usually) left unread until the next poll. So (except on the very first poll) the driver sees this: <pre>scpi &gt;T#yyyymmddhhmmssMFLRVcc&lt;cr&gt;&lt;lf&gt;</pre> <p>The T is the on-time character, at 980 msec. before the next 1PPS edge. The # is the timecode format type. We look for format 2. Without any of the CLK or PPS stuff, then, the receiver buffer timestamp at the &lt;cr&gt; is 24 characters later, which is about 25 msec. at 9600 bps, so the first approximation for fudge time1 is nominally -0.955 seconds. This number probably needs adjusting for each machine / OS type, so far: -0.955000 on an HP 9000 Model 712/80 HP-UX 9.05 -0.953175 on an HP 9000 Model 370 HP-UX 9.10</p> This driver will probably work with the 58503B and 59551A if they are setup appropriately.<P> To use an HP Z3801A, specify <tt>mode 1</tt> on the server config line to setup the right line paramters.<P> The timekeeping portion of HP's business has been sold to <a href="http://www.symmetricom.com/">Symmetricom</a>.<P> <h4>Fudge Factors</h4> <dl> <dt><tt>time1 <i>time</i></tt> <dd>Specifies the time offset calibration factor, in seconds and fraction, with default 0.0. <dt><tt>time2 <i>time</i></tt> <dd>Not used by this driver. <dt><tt>stratum <i>number</i></tt> <dd>Specifies the driver stratum, in decimal from 0 to 15, with default 0. <dt><tt>refid <i>string</i></tt> <dd>Specifies the driver reference identifier, an ASCII string from one to four characters, with default <tt>GPS</tt>. <dt><tt>flag1 0 | 1</tt> <dd>Not used by this driver. <dt><tt>flag2 0 | 1</tt> <dd>Not used by this driver. <dt><tt>flag3 0 | 1</tt> <dd>Not used by this driver. <dt><tt>flag4 0 | 1</tt> <dd>Not used by this driver. </dl> <hr> <script type="text/javascript" language="javascript" src="scripts/footer.txt"></script> </body> </html>
ZHAW-INES/rioxo-uClinux-dist
user/ntp/ntp-4.2.4p5/html/drivers/driver26.html
HTML
gpl-2.0
3,998
<?php /* * Copyright 2014 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ class Google_Service_AndroidManagement_StatusReportingSettings extends Google_Model { protected $applicationReportingSettingsType = 'Google_Service_AndroidManagement_ApplicationReportingSettings'; protected $applicationReportingSettingsDataType = ''; public $applicationReportsEnabled; public $deviceSettingsEnabled; public $displayInfoEnabled; public $hardwareStatusEnabled; public $memoryInfoEnabled; public $networkInfoEnabled; public $powerManagementEventsEnabled; public $softwareInfoEnabled; public $systemPropertiesEnabled; /** * @param Google_Service_AndroidManagement_ApplicationReportingSettings */ public function setApplicationReportingSettings(Google_Service_AndroidManagement_ApplicationReportingSettings $applicationReportingSettings) { $this->applicationReportingSettings = $applicationReportingSettings; } /** * @return Google_Service_AndroidManagement_ApplicationReportingSettings */ public function getApplicationReportingSettings() { return $this->applicationReportingSettings; } public function setApplicationReportsEnabled($applicationReportsEnabled) { $this->applicationReportsEnabled = $applicationReportsEnabled; } public function getApplicationReportsEnabled() { return $this->applicationReportsEnabled; } public function setDeviceSettingsEnabled($deviceSettingsEnabled) { $this->deviceSettingsEnabled = $deviceSettingsEnabled; } public function getDeviceSettingsEnabled() { return $this->deviceSettingsEnabled; } public function setDisplayInfoEnabled($displayInfoEnabled) { $this->displayInfoEnabled = $displayInfoEnabled; } public function getDisplayInfoEnabled() { return $this->displayInfoEnabled; } public function setHardwareStatusEnabled($hardwareStatusEnabled) { $this->hardwareStatusEnabled = $hardwareStatusEnabled; } public function getHardwareStatusEnabled() { return $this->hardwareStatusEnabled; } public function setMemoryInfoEnabled($memoryInfoEnabled) { $this->memoryInfoEnabled = $memoryInfoEnabled; } public function getMemoryInfoEnabled() { return $this->memoryInfoEnabled; } public function setNetworkInfoEnabled($networkInfoEnabled) { $this->networkInfoEnabled = $networkInfoEnabled; } public function getNetworkInfoEnabled() { return $this->networkInfoEnabled; } public function setPowerManagementEventsEnabled($powerManagementEventsEnabled) { $this->powerManagementEventsEnabled = $powerManagementEventsEnabled; } public function getPowerManagementEventsEnabled() { return $this->powerManagementEventsEnabled; } public function setSoftwareInfoEnabled($softwareInfoEnabled) { $this->softwareInfoEnabled = $softwareInfoEnabled; } public function getSoftwareInfoEnabled() { return $this->softwareInfoEnabled; } public function setSystemPropertiesEnabled($systemPropertiesEnabled) { $this->systemPropertiesEnabled = $systemPropertiesEnabled; } public function getSystemPropertiesEnabled() { return $this->systemPropertiesEnabled; } }
skenow/formulize
libraries/googleapiclient/vendor/google/apiclient-services/src/Google/Service/AndroidManagement/StatusReportingSettings.php
PHP
gpl-2.0
3,717
<?php /** * @file * Contains \Drupal\Console\Command\Generate\PluginBlockCommand. */ namespace Drupal\Console\Command\Generate; use Symfony\Component\Console\Input\InputInterface; use Symfony\Component\Console\Input\InputOption; use Symfony\Component\Console\Output\OutputInterface; use Symfony\Component\Console\Command\Command; use Drupal\Console\Generator\PluginBlockGenerator; use Drupal\Console\Command\Shared\ServicesTrait; use Drupal\Console\Command\Shared\ModuleTrait; use Drupal\Console\Command\Shared\FormTrait; use Drupal\Console\Command\Shared\ConfirmationTrait; use Drupal\Console\Command\Shared\ContainerAwareCommandTrait; use Drupal\Console\Extension\Manager; use Drupal\Console\Utils\Validator; use Drupal\Console\Utils\StringConverter; use Drupal\Console\Style\DrupalStyle; use Drupal\Console\Utils\ChainQueue; use Drupal\Core\Config\ConfigFactory; use Drupal\Core\Entity\EntityTypeManagerInterface; use Drupal\Core\Render\ElementInfoManagerInterface; class PluginBlockCommand extends Command { use ServicesTrait; use ModuleTrait; use FormTrait; use ConfirmationTrait; use ContainerAwareCommandTrait; /** * @var ConfigFactory */ protected $configFactory; /** * @var ChainQueue */ protected $chainQueue; /** * @var PluginBlockGenerator */ protected $generator; /** * @var EntityTypeManagerInterface */ protected $entityTypeManager; /** * @var Manager */ protected $extensionManager; /** * @var Validator */ protected $validator; /** * @var StringConverter */ protected $stringConverter; /** * @var ElementInfoManagerInterface */ protected $elementInfoManager; /** * PluginBlockCommand constructor. * @param ConfigFactory $configFactory * @param ChainQueue $chainQueue * @param PluginBlockGenerator $generator * @param EntityTypeManagerInterface $entityTypeManager * @param Manager $extensionManager * @param Validator $validator * @param StringConverter $stringConverter * @param ElementInfoManagerInterface $elementInfoManager */ public function __construct( ConfigFactory $configFactory, ChainQueue $chainQueue, PluginBlockGenerator $generator, EntityTypeManagerInterface $entityTypeManager, Manager $extensionManager, Validator $validator, StringConverter $stringConverter, ElementInfoManagerInterface $elementInfoManager ) { $this->configFactory = $configFactory; $this->chainQueue = $chainQueue; $this->generator = $generator; $this->entityTypeManager = $entityTypeManager; $this->extensionManager = $extensionManager; $this->validator = $validator; $this->stringConverter = $stringConverter; $this->elementInfoManager = $elementInfoManager; parent::__construct(); } protected function configure() { $this ->setName('generate:plugin:block') ->setDescription($this->trans('commands.generate.plugin.block.description')) ->setHelp($this->trans('commands.generate.plugin.block.help')) ->addOption('module', '', InputOption::VALUE_REQUIRED, $this->trans('commands.common.options.module')) ->addOption( 'class', '', InputOption::VALUE_OPTIONAL, $this->trans('commands.generate.plugin.block.options.class') ) ->addOption( 'label', '', InputOption::VALUE_OPTIONAL, $this->trans('commands.generate.plugin.block.options.label') ) ->addOption( 'plugin-id', '', InputOption::VALUE_OPTIONAL, $this->trans('commands.generate.plugin.block.options.plugin-id') ) ->addOption( 'theme-region', '', InputOption::VALUE_OPTIONAL, $this->trans('commands.generate.plugin.block.options.theme-region') ) ->addOption( 'inputs', '', InputOption::VALUE_OPTIONAL | InputOption::VALUE_IS_ARRAY, $this->trans('commands.common.options.inputs') ) ->addOption( 'services', '', InputOption::VALUE_OPTIONAL | InputOption::VALUE_IS_ARRAY, $this->trans('commands.common.options.services') ); } /** * {@inheritdoc} */ protected function execute(InputInterface $input, OutputInterface $output) { $io = new DrupalStyle($input, $output); // @see use Drupal\Console\Command\Shared\ConfirmationTrait::confirmGeneration if (!$this->confirmGeneration($io)) { return 1; } $module = $input->getOption('module'); $class_name = $input->getOption('class'); $label = $input->getOption('label'); $plugin_id = $input->getOption('plugin-id'); $services = $input->getOption('services'); $theme_region = $input->getOption('theme-region'); $inputs = $input->getOption('inputs'); $theme = $this->configFactory->get('system.theme')->get('default'); $themeRegions = \system_region_list($theme, REGIONS_VISIBLE); if (!empty($theme_region) && !isset($themeRegions[$theme_region])) { $io->error( sprintf( $this->trans('commands.generate.plugin.block.messages.invalid-theme-region'), $theme_region ) ); return 1; } // @see use Drupal\Console\Command\Shared\ServicesTrait::buildServices $build_services = $this->buildServices($services); $this->generator ->generate( $module, $class_name, $label, $plugin_id, $build_services, $inputs ); $this->chainQueue->addCommand('cache:rebuild', ['cache' => 'discovery']); if ($theme_region) { $block = $this->entityTypeManager ->getStorage('block') ->create( [ 'id'=> $plugin_id, 'plugin' => $plugin_id, 'theme' => $theme ] ); $block->setRegion($theme_region); $block->save(); } } protected function interact(InputInterface $input, OutputInterface $output) { $io = new DrupalStyle($input, $output); $theme = $this->configFactory->get('system.theme')->get('default'); $themeRegions = \system_region_list($theme, REGIONS_VISIBLE); // --module option $module = $input->getOption('module'); if (!$module) { // @see Drupal\Console\Command\Shared\ModuleTrait::moduleQuestion $module = $this->moduleQuestion($io); $input->setOption('module', $module); } // --class option $class = $input->getOption('class'); if (!$class) { $class = $io->ask( $this->trans('commands.generate.plugin.block.options.class'), 'DefaultBlock', function ($class) { return $this->validator->validateClassName($class); } ); $input->setOption('class', $class); } // --label option $label = $input->getOption('label'); if (!$label) { $label = $io->ask( $this->trans('commands.generate.plugin.block.options.label'), $this->stringConverter->camelCaseToHuman($class) ); $input->setOption('label', $label); } // --plugin-id option $pluginId = $input->getOption('plugin-id'); if (!$pluginId) { $pluginId = $io->ask( $this->trans('commands.generate.plugin.block.options.plugin-id'), $this->stringConverter->camelCaseToUnderscore($class) ); $input->setOption('plugin-id', $pluginId); } // --theme-region option $themeRegion = $input->getOption('theme-region'); if (!$themeRegion) { $themeRegion = $io->choiceNoList( $this->trans('commands.generate.plugin.block.options.theme-region'), array_values($themeRegions), null, true ); $themeRegion = array_search($themeRegion, $themeRegions); $input->setOption('theme-region', $themeRegion); } // --services option // @see Drupal\Console\Command\Shared\ServicesTrait::servicesQuestion $services = $this->servicesQuestion($io); $input->setOption('services', $services); $output->writeln($this->trans('commands.generate.plugin.block.messages.inputs')); // @see Drupal\Console\Command\Shared\FormTrait::formQuestion $inputs = $this->formQuestion($io); $input->setOption('inputs', $inputs); } }
kbielawiec/intern_supersport
vendor/drupal/console/src/Command/Generate/PluginBlockCommand.php
PHP
gpl-2.0
9,468
<?php /** * Magento * * NOTICE OF LICENSE * * This source file is subject to the Open Software License (OSL 3.0) * that is bundled with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://opensource.org/licenses/osl-3.0.php * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@magentocommerce.com so we can send you a copy immediately. * * DISCLAIMER * * Do not edit or add to this file if you wish to upgrade Magento to newer * versions in the future. If you wish to customize Magento for your * needs please refer to http://www.magentocommerce.com for more information. * * @category Mage * @package Mage_XmlConnect * @copyright Copyright (c) 2012 Magento Inc. (http://www.magentocommerce.com) * @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0) */ /** * XML Connect Setup Resource Model * * @category Mage * @package Mage_XmlConnect * @author Magento Core Team <core@magentocommerce.com> */ class Mage_XmlConnect_Model_Mysql4_Setup extends Mage_XmlConnect_Model_Resource_Setup { }
donostudio/cw88
app/code/core/Mage/XmlConnect/Model/Mysql4/Setup.php
PHP
gpl-2.0
1,219
define("ace/mode/doc_comment_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(require, exports, module) { "use strict"; var oop = require("../lib/oop"); var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules; var DocCommentHighlightRules = function() { this.$rules = { "start" : [ { token : "comment.doc.tag", regex : "@[\\w\\d_]+" // TODO: fix email addresses }, DocCommentHighlightRules.getTagRule(), { defaultToken : "comment.doc", caseInsensitive: true }] }; }; oop.inherits(DocCommentHighlightRules, TextHighlightRules); DocCommentHighlightRules.getTagRule = function(start) { return { token : "comment.doc.tag.storage.type", regex : "\\b(?:TODO|FIXME|XXX|HACK)\\b" }; } DocCommentHighlightRules.getStartRule = function(start) { return { token : "comment.doc", // doc comment regex : "\\/\\*(?=\\*)", next : start }; }; DocCommentHighlightRules.getEndRule = function (start) { return { token : "comment.doc", // closing comment regex : "\\*\\/", next : start }; }; exports.DocCommentHighlightRules = DocCommentHighlightRules; }); define("ace/mode/javascript_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/doc_comment_highlight_rules","ace/mode/text_highlight_rules"], function(require, exports, module) { "use strict"; var oop = require("../lib/oop"); var DocCommentHighlightRules = require("./doc_comment_highlight_rules").DocCommentHighlightRules; var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules; var identifierRe = "[a-zA-Z\\$_\u00a1-\uffff][a-zA-Z\\d\\$_\u00a1-\uffff]*"; var JavaScriptHighlightRules = function(options) { var keywordMapper = this.createKeywordMapper({ "variable.language": "Array|Boolean|Date|Function|Iterator|Number|Object|RegExp|String|Proxy|" + // Constructors "Namespace|QName|XML|XMLList|" + // E4X "ArrayBuffer|Float32Array|Float64Array|Int16Array|Int32Array|Int8Array|" + "Uint16Array|Uint32Array|Uint8Array|Uint8ClampedArray|" + "Error|EvalError|InternalError|RangeError|ReferenceError|StopIteration|" + // Errors "SyntaxError|TypeError|URIError|" + "decodeURI|decodeURIComponent|encodeURI|encodeURIComponent|eval|isFinite|" + // Non-constructor functions "isNaN|parseFloat|parseInt|" + "JSON|Math|" + // Other "this|arguments|prototype|window|document" , // Pseudo "keyword": "const|yield|import|get|set|async|await|" + "break|case|catch|continue|default|delete|do|else|finally|for|function|" + "if|in|of|instanceof|new|return|switch|throw|try|typeof|let|var|while|with|debugger|" + "__parent__|__count__|escape|unescape|with|__proto__|" + "class|enum|extends|super|export|implements|private|public|interface|package|protected|static", "storage.type": "const|let|var|function", "constant.language": "null|Infinity|NaN|undefined", "support.function": "alert", "constant.language.boolean": "true|false" }, "identifier"); var kwBeforeRe = "case|do|else|finally|in|instanceof|return|throw|try|typeof|yield|void"; var escapedRe = "\\\\(?:x[0-9a-fA-F]{2}|" + // hex "u[0-9a-fA-F]{4}|" + // unicode "u{[0-9a-fA-F]{1,6}}|" + // es6 unicode "[0-2][0-7]{0,2}|" + // oct "3[0-7][0-7]?|" + // oct "[4-7][0-7]?|" + //oct ".)"; this.$rules = { "no_regex" : [ DocCommentHighlightRules.getStartRule("doc-start"), comments("no_regex"), { token : "string", regex : "'(?=.)", next : "qstring" }, { token : "string", regex : '"(?=.)', next : "qqstring" }, { token : "constant.numeric", // hexadecimal, octal and binary regex : /0(?:[xX][0-9a-fA-F]+|[oO][0-7]+|[bB][01]+)\b/ }, { token : "constant.numeric", // decimal integers and floats regex : /(?:\d\d*(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+\b)?/ }, { token : [ "storage.type", "punctuation.operator", "support.function", "punctuation.operator", "entity.name.function", "text","keyword.operator" ], regex : "(" + identifierRe + ")(\\.)(prototype)(\\.)(" + identifierRe +")(\\s*)(=)", next: "function_arguments" }, { token : [ "storage.type", "punctuation.operator", "entity.name.function", "text", "keyword.operator", "text", "storage.type", "text", "paren.lparen" ], regex : "(" + identifierRe + ")(\\.)(" + identifierRe +")(\\s*)(=)(\\s*)(function)(\\s*)(\\()", next: "function_arguments" }, { token : [ "entity.name.function", "text", "keyword.operator", "text", "storage.type", "text", "paren.lparen" ], regex : "(" + identifierRe +")(\\s*)(=)(\\s*)(function)(\\s*)(\\()", next: "function_arguments" }, { token : [ "storage.type", "punctuation.operator", "entity.name.function", "text", "keyword.operator", "text", "storage.type", "text", "entity.name.function", "text", "paren.lparen" ], regex : "(" + identifierRe + ")(\\.)(" + identifierRe +")(\\s*)(=)(\\s*)(function)(\\s+)(\\w+)(\\s*)(\\()", next: "function_arguments" }, { token : [ "storage.type", "text", "entity.name.function", "text", "paren.lparen" ], regex : "(function)(\\s+)(" + identifierRe + ")(\\s*)(\\()", next: "function_arguments" }, { token : [ "entity.name.function", "text", "punctuation.operator", "text", "storage.type", "text", "paren.lparen" ], regex : "(" + identifierRe + ")(\\s*)(:)(\\s*)(function)(\\s*)(\\()", next: "function_arguments" }, { token : [ "text", "text", "storage.type", "text", "paren.lparen" ], regex : "(:)(\\s*)(function)(\\s*)(\\()", next: "function_arguments" }, { token : "keyword", regex : "from(?=\\s*('|\"))" }, { token : "keyword", regex : "(?:" + kwBeforeRe + ")\\b", next : "start" }, { token : ["support.constant"], regex : /that\b/ }, { token : ["storage.type", "punctuation.operator", "support.function.firebug"], regex : /(console)(\.)(warn|info|log|error|time|trace|timeEnd|assert)\b/ }, { token : keywordMapper, regex : identifierRe }, { token : "punctuation.operator", regex : /[.](?![.])/, next : "property" }, { token : "storage.type", regex : /=>/ }, { token : "keyword.operator", regex : /--|\+\+|\.{3}|===|==|=|!=|!==|<+=?|>+=?|!|&&|\|\||\?:|[!$%&*+\-~\/^]=?/, next : "start" }, { token : "punctuation.operator", regex : /[?:,;.]/, next : "start" }, { token : "paren.lparen", regex : /[\[({]/, next : "start" }, { token : "paren.rparen", regex : /[\])}]/ }, { token: "comment", regex: /^#!.*$/ } ], property: [{ token : "text", regex : "\\s+" }, { token : [ "storage.type", "punctuation.operator", "entity.name.function", "text", "keyword.operator", "text", "storage.type", "text", "entity.name.function", "text", "paren.lparen" ], regex : "(" + identifierRe + ")(\\.)(" + identifierRe +")(\\s*)(=)(\\s*)(function)(?:(\\s+)(\\w+))?(\\s*)(\\()", next: "function_arguments" }, { token : "punctuation.operator", regex : /[.](?![.])/ }, { token : "support.function", regex : /(s(?:h(?:ift|ow(?:Mod(?:elessDialog|alDialog)|Help))|croll(?:X|By(?:Pages|Lines)?|Y|To)?|t(?:op|rike)|i(?:n|zeToContent|debar|gnText)|ort|u(?:p|b(?:str(?:ing)?)?)|pli(?:ce|t)|e(?:nd|t(?:Re(?:sizable|questHeader)|M(?:i(?:nutes|lliseconds)|onth)|Seconds|Ho(?:tKeys|urs)|Year|Cursor|Time(?:out)?|Interval|ZOptions|Date|UTC(?:M(?:i(?:nutes|lliseconds)|onth)|Seconds|Hours|Date|FullYear)|FullYear|Active)|arch)|qrt|lice|avePreferences|mall)|h(?:ome|andleEvent)|navigate|c(?:har(?:CodeAt|At)|o(?:s|n(?:cat|textual|firm)|mpile)|eil|lear(?:Timeout|Interval)?|a(?:ptureEvents|ll)|reate(?:StyleSheet|Popup|EventObject))|t(?:o(?:GMTString|S(?:tring|ource)|U(?:TCString|pperCase)|Lo(?:caleString|werCase))|est|a(?:n|int(?:Enabled)?))|i(?:s(?:NaN|Finite)|ndexOf|talics)|d(?:isableExternalCapture|ump|etachEvent)|u(?:n(?:shift|taint|escape|watch)|pdateCommands)|j(?:oin|avaEnabled)|p(?:o(?:p|w)|ush|lugins.refresh|a(?:ddings|rse(?:Int|Float)?)|r(?:int|ompt|eference))|e(?:scape|nableExternalCapture|val|lementFromPoint|x(?:p|ec(?:Script|Command)?))|valueOf|UTC|queryCommand(?:State|Indeterm|Enabled|Value)|f(?:i(?:nd|le(?:ModifiedDate|Size|CreatedDate|UpdatedDate)|xed)|o(?:nt(?:size|color)|rward)|loor|romCharCode)|watch|l(?:ink|o(?:ad|g)|astIndexOf)|a(?:sin|nchor|cos|t(?:tachEvent|ob|an(?:2)?)|pply|lert|b(?:s|ort))|r(?:ou(?:nd|teEvents)|e(?:size(?:By|To)|calc|turnValue|place|verse|l(?:oad|ease(?:Capture|Events)))|andom)|g(?:o|et(?:ResponseHeader|M(?:i(?:nutes|lliseconds)|onth)|Se(?:conds|lection)|Hours|Year|Time(?:zoneOffset)?|Da(?:y|te)|UTC(?:M(?:i(?:nutes|lliseconds)|onth)|Seconds|Hours|Da(?:y|te)|FullYear)|FullYear|A(?:ttention|llResponseHeaders)))|m(?:in|ove(?:B(?:y|elow)|To(?:Absolute)?|Above)|ergeAttributes|a(?:tch|rgins|x))|b(?:toa|ig|o(?:ld|rderWidths)|link|ack))\b(?=\()/ }, { token : "support.function.dom", regex : /(s(?:ub(?:stringData|mit)|plitText|e(?:t(?:NamedItem|Attribute(?:Node)?)|lect))|has(?:ChildNodes|Feature)|namedItem|c(?:l(?:ick|o(?:se|neNode))|reate(?:C(?:omment|DATASection|aption)|T(?:Head|extNode|Foot)|DocumentFragment|ProcessingInstruction|E(?:ntityReference|lement)|Attribute))|tabIndex|i(?:nsert(?:Row|Before|Cell|Data)|tem)|open|delete(?:Row|C(?:ell|aption)|T(?:Head|Foot)|Data)|focus|write(?:ln)?|a(?:dd|ppend(?:Child|Data))|re(?:set|place(?:Child|Data)|move(?:NamedItem|Child|Attribute(?:Node)?)?)|get(?:NamedItem|Element(?:sBy(?:Name|TagName|ClassName)|ById)|Attribute(?:Node)?)|blur)\b(?=\()/ }, { token : "support.constant", regex : /(s(?:ystemLanguage|cr(?:ipts|ollbars|een(?:X|Y|Top|Left))|t(?:yle(?:Sheets)?|atus(?:Text|bar)?)|ibling(?:Below|Above)|ource|uffixes|e(?:curity(?:Policy)?|l(?:ection|f)))|h(?:istory|ost(?:name)?|as(?:h|Focus))|y|X(?:MLDocument|SLDocument)|n(?:ext|ame(?:space(?:s|URI)|Prop))|M(?:IN_VALUE|AX_VALUE)|c(?:haracterSet|o(?:n(?:structor|trollers)|okieEnabled|lorDepth|mp(?:onents|lete))|urrent|puClass|l(?:i(?:p(?:boardData)?|entInformation)|osed|asses)|alle(?:e|r)|rypto)|t(?:o(?:olbar|p)|ext(?:Transform|Indent|Decoration|Align)|ags)|SQRT(?:1_2|2)|i(?:n(?:ner(?:Height|Width)|put)|ds|gnoreCase)|zIndex|o(?:scpu|n(?:readystatechange|Line)|uter(?:Height|Width)|p(?:sProfile|ener)|ffscreenBuffering)|NEGATIVE_INFINITY|d(?:i(?:splay|alog(?:Height|Top|Width|Left|Arguments)|rectories)|e(?:scription|fault(?:Status|Ch(?:ecked|arset)|View)))|u(?:ser(?:Profile|Language|Agent)|n(?:iqueID|defined)|pdateInterval)|_content|p(?:ixelDepth|ort|ersonalbar|kcs11|l(?:ugins|atform)|a(?:thname|dding(?:Right|Bottom|Top|Left)|rent(?:Window|Layer)?|ge(?:X(?:Offset)?|Y(?:Offset)?))|r(?:o(?:to(?:col|type)|duct(?:Sub)?|mpter)|e(?:vious|fix)))|e(?:n(?:coding|abledPlugin)|x(?:ternal|pando)|mbeds)|v(?:isibility|endor(?:Sub)?|Linkcolor)|URLUnencoded|P(?:I|OSITIVE_INFINITY)|f(?:ilename|o(?:nt(?:Size|Family|Weight)|rmName)|rame(?:s|Element)|gColor)|E|whiteSpace|l(?:i(?:stStyleType|n(?:eHeight|kColor))|o(?:ca(?:tion(?:bar)?|lName)|wsrc)|e(?:ngth|ft(?:Context)?)|a(?:st(?:M(?:odified|atch)|Index|Paren)|yer(?:s|X)|nguage))|a(?:pp(?:MinorVersion|Name|Co(?:deName|re)|Version)|vail(?:Height|Top|Width|Left)|ll|r(?:ity|guments)|Linkcolor|bove)|r(?:ight(?:Context)?|e(?:sponse(?:XML|Text)|adyState))|global|x|m(?:imeTypes|ultiline|enubar|argin(?:Right|Bottom|Top|Left))|L(?:N(?:10|2)|OG(?:10E|2E))|b(?:o(?:ttom|rder(?:Width|RightWidth|BottomWidth|Style|Color|TopWidth|LeftWidth))|ufferDepth|elow|ackground(?:Color|Image)))\b/ }, { token : "identifier", regex : identifierRe }, { regex: "", token: "empty", next: "no_regex" } ], "start": [ DocCommentHighlightRules.getStartRule("doc-start"), comments("start"), { token: "string.regexp", regex: "\\/", next: "regex" }, { token : "text", regex : "\\s+|^$", next : "start" }, { token: "empty", regex: "", next: "no_regex" } ], "regex": [ { token: "regexp.keyword.operator", regex: "\\\\(?:u[\\da-fA-F]{4}|x[\\da-fA-F]{2}|.)" }, { token: "string.regexp", regex: "/[sxngimy]*", next: "no_regex" }, { token : "invalid", regex: /\{\d+\b,?\d*\}[+*]|[+*$^?][+*]|[$^][?]|\?{3,}/ }, { token : "constant.language.escape", regex: /\(\?[:=!]|\)|\{\d+\b,?\d*\}|[+*]\?|[()$^+*?.]/ }, { token : "constant.language.delimiter", regex: /\|/ }, { token: "constant.language.escape", regex: /\[\^?/, next: "regex_character_class" }, { token: "empty", regex: "$", next: "no_regex" }, { defaultToken: "string.regexp" } ], "regex_character_class": [ { token: "regexp.charclass.keyword.operator", regex: "\\\\(?:u[\\da-fA-F]{4}|x[\\da-fA-F]{2}|.)" }, { token: "constant.language.escape", regex: "]", next: "regex" }, { token: "constant.language.escape", regex: "-" }, { token: "empty", regex: "$", next: "no_regex" }, { defaultToken: "string.regexp.charachterclass" } ], "function_arguments": [ { token: "variable.parameter", regex: identifierRe }, { token: "punctuation.operator", regex: "[, ]+" }, { token: "punctuation.operator", regex: "$" }, { token: "empty", regex: "", next: "no_regex" } ], "qqstring" : [ { token : "constant.language.escape", regex : escapedRe }, { token : "string", regex : "\\\\$", consumeLineEnd : true }, { token : "string", regex : '"|$', next : "no_regex" }, { defaultToken: "string" } ], "qstring" : [ { token : "constant.language.escape", regex : escapedRe }, { token : "string", regex : "\\\\$", consumeLineEnd : true }, { token : "string", regex : "'|$", next : "no_regex" }, { defaultToken: "string" } ] }; if (!options || !options.noES6) { this.$rules.no_regex.unshift({ regex: "[{}]", onMatch: function(val, state, stack) { this.next = val == "{" ? this.nextState : ""; if (val == "{" && stack.length) { stack.unshift("start", state); } else if (val == "}" && stack.length) { stack.shift(); this.next = stack.shift(); if (this.next.indexOf("string") != -1 || this.next.indexOf("jsx") != -1) return "paren.quasi.end"; } return val == "{" ? "paren.lparen" : "paren.rparen"; }, nextState: "start" }, { token : "string.quasi.start", regex : /`/, push : [{ token : "constant.language.escape", regex : escapedRe }, { token : "paren.quasi.start", regex : /\${/, push : "start" }, { token : "string.quasi.end", regex : /`/, next : "pop" }, { defaultToken: "string.quasi" }] }); if (!options || options.jsx != false) JSX.call(this); } this.embedRules(DocCommentHighlightRules, "doc-", [ DocCommentHighlightRules.getEndRule("no_regex") ]); this.normalizeRules(); }; oop.inherits(JavaScriptHighlightRules, TextHighlightRules); function JSX() { var tagRegex = identifierRe.replace("\\d", "\\d\\-"); var jsxTag = { onMatch : function(val, state, stack) { var offset = val.charAt(1) == "/" ? 2 : 1; if (offset == 1) { if (state != this.nextState) stack.unshift(this.next, this.nextState, 0); else stack.unshift(this.next); stack[2]++; } else if (offset == 2) { if (state == this.nextState) { stack[1]--; if (!stack[1] || stack[1] < 0) { stack.shift(); stack.shift(); } } } return [{ type: "meta.tag.punctuation." + (offset == 1 ? "" : "end-") + "tag-open.xml", value: val.slice(0, offset) }, { type: "meta.tag.tag-name.xml", value: val.substr(offset) }]; }, regex : "</?" + tagRegex + "", next: "jsxAttributes", nextState: "jsx" }; this.$rules.start.unshift(jsxTag); var jsxJsRule = { regex: "{", token: "paren.quasi.start", push: "start" }; this.$rules.jsx = [ jsxJsRule, jsxTag, {include : "reference"}, {defaultToken: "string"} ]; this.$rules.jsxAttributes = [{ token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", onMatch : function(value, currentState, stack) { if (currentState == stack[0]) stack.shift(); if (value.length == 2) { if (stack[0] == this.nextState) stack[1]--; if (!stack[1] || stack[1] < 0) { stack.splice(0, 2); } } this.next = stack[0] || "start"; return [{type: this.token, value: value}]; }, nextState: "jsx" }, jsxJsRule, comments("jsxAttributes"), { token : "entity.other.attribute-name.xml", regex : tagRegex }, { token : "keyword.operator.attribute-equals.xml", regex : "=" }, { token : "text.tag-whitespace.xml", regex : "\\s+" }, { token : "string.attribute-value.xml", regex : "'", stateName : "jsx_attr_q", push : [ {token : "string.attribute-value.xml", regex: "'", next: "pop"}, {include : "reference"}, {defaultToken : "string.attribute-value.xml"} ] }, { token : "string.attribute-value.xml", regex : '"', stateName : "jsx_attr_qq", push : [ {token : "string.attribute-value.xml", regex: '"', next: "pop"}, {include : "reference"}, {defaultToken : "string.attribute-value.xml"} ] }, jsxTag ]; this.$rules.reference = [{ token : "constant.language.escape.reference.xml", regex : "(?:&#[0-9]+;)|(?:&#x[0-9a-fA-F]+;)|(?:&[a-zA-Z0-9_:\\.-]+;)" }]; } function comments(next) { return [ { token : "comment", // multi line comment regex : /\/\*/, next: [ DocCommentHighlightRules.getTagRule(), {token : "comment", regex : "\\*\\/", next : next || "pop"}, {defaultToken : "comment", caseInsensitive: true} ] }, { token : "comment", regex : "\\/\\/", next: [ DocCommentHighlightRules.getTagRule(), {token : "comment", regex : "$|^", next : next || "pop"}, {defaultToken : "comment", caseInsensitive: true} ] } ]; } exports.JavaScriptHighlightRules = JavaScriptHighlightRules; }); define("ace/mode/matching_brace_outdent",["require","exports","module","ace/range"], function(require, exports, module) { "use strict"; var Range = require("../range").Range; var MatchingBraceOutdent = function() {}; (function() { this.checkOutdent = function(line, input) { if (! /^\s+$/.test(line)) return false; return /^\s*\}/.test(input); }; this.autoOutdent = function(doc, row) { var line = doc.getLine(row); var match = line.match(/^(\s*\})/); if (!match) return 0; var column = match[1].length; var openBracePos = doc.findMatchingBracket({row: row, column: column}); if (!openBracePos || openBracePos.row == row) return 0; var indent = this.$getIndent(doc.getLine(openBracePos.row)); doc.replace(new Range(row, 0, row, column-1), indent); }; this.$getIndent = function(line) { return line.match(/^\s*/)[0]; }; }).call(MatchingBraceOutdent.prototype); exports.MatchingBraceOutdent = MatchingBraceOutdent; }); define("ace/mode/folding/cstyle",["require","exports","module","ace/lib/oop","ace/range","ace/mode/folding/fold_mode"], function(require, exports, module) { "use strict"; var oop = require("../../lib/oop"); var Range = require("../../range").Range; var BaseFoldMode = require("./fold_mode").FoldMode; var FoldMode = exports.FoldMode = function(commentRegex) { if (commentRegex) { this.foldingStartMarker = new RegExp( this.foldingStartMarker.source.replace(/\|[^|]*?$/, "|" + commentRegex.start) ); this.foldingStopMarker = new RegExp( this.foldingStopMarker.source.replace(/\|[^|]*?$/, "|" + commentRegex.end) ); } }; oop.inherits(FoldMode, BaseFoldMode); (function() { this.foldingStartMarker = /(\{|\[)[^\}\]]*$|^\s*(\/\*)/; this.foldingStopMarker = /^[^\[\{]*(\}|\])|^[\s\*]*(\*\/)/; this.singleLineBlockCommentRe= /^\s*(\/\*).*\*\/\s*$/; this.tripleStarBlockCommentRe = /^\s*(\/\*\*\*).*\*\/\s*$/; this.startRegionRe = /^\s*(\/\*|\/\/)#?region\b/; this._getFoldWidgetBase = this.getFoldWidget; this.getFoldWidget = function(session, foldStyle, row) { var line = session.getLine(row); if (this.singleLineBlockCommentRe.test(line)) { if (!this.startRegionRe.test(line) && !this.tripleStarBlockCommentRe.test(line)) return ""; } var fw = this._getFoldWidgetBase(session, foldStyle, row); if (!fw && this.startRegionRe.test(line)) return "start"; // lineCommentRegionStart return fw; }; this.getFoldWidgetRange = function(session, foldStyle, row, forceMultiline) { var line = session.getLine(row); if (this.startRegionRe.test(line)) return this.getCommentRegionBlock(session, line, row); var match = line.match(this.foldingStartMarker); if (match) { var i = match.index; if (match[1]) return this.openingBracketBlock(session, match[1], row, i); var range = session.getCommentFoldRange(row, i + match[0].length, 1); if (range && !range.isMultiLine()) { if (forceMultiline) { range = this.getSectionRange(session, row); } else if (foldStyle != "all") range = null; } return range; } if (foldStyle === "markbegin") return; var match = line.match(this.foldingStopMarker); if (match) { var i = match.index + match[0].length; if (match[1]) return this.closingBracketBlock(session, match[1], row, i); return session.getCommentFoldRange(row, i, -1); } }; this.getSectionRange = function(session, row) { var line = session.getLine(row); var startIndent = line.search(/\S/); var startRow = row; var startColumn = line.length; row = row + 1; var endRow = row; var maxRow = session.getLength(); while (++row < maxRow) { line = session.getLine(row); var indent = line.search(/\S/); if (indent === -1) continue; if (startIndent > indent) break; var subRange = this.getFoldWidgetRange(session, "all", row); if (subRange) { if (subRange.start.row <= startRow) { break; } else if (subRange.isMultiLine()) { row = subRange.end.row; } else if (startIndent == indent) { break; } } endRow = row; } return new Range(startRow, startColumn, endRow, session.getLine(endRow).length); }; this.getCommentRegionBlock = function(session, line, row) { var startColumn = line.search(/\s*$/); var maxRow = session.getLength(); var startRow = row; var re = /^\s*(?:\/\*|\/\/|--)#?(end)?region\b/; var depth = 1; while (++row < maxRow) { line = session.getLine(row); var m = re.exec(line); if (!m) continue; if (m[1]) depth--; else depth++; if (!depth) break; } var endRow = row; if (endRow > startRow) { return new Range(startRow, startColumn, endRow, line.length); } }; }).call(FoldMode.prototype); }); define("ace/mode/javascript",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/javascript_highlight_rules","ace/mode/matching_brace_outdent","ace/worker/worker_client","ace/mode/behaviour/cstyle","ace/mode/folding/cstyle"], function(require, exports, module) { "use strict"; var oop = require("../lib/oop"); var TextMode = require("./text").Mode; var JavaScriptHighlightRules = require("./javascript_highlight_rules").JavaScriptHighlightRules; var MatchingBraceOutdent = require("./matching_brace_outdent").MatchingBraceOutdent; var WorkerClient = require("../worker/worker_client").WorkerClient; var CstyleBehaviour = require("./behaviour/cstyle").CstyleBehaviour; var CStyleFoldMode = require("./folding/cstyle").FoldMode; var Mode = function() { this.HighlightRules = JavaScriptHighlightRules; this.$outdent = new MatchingBraceOutdent(); this.$behaviour = new CstyleBehaviour(); this.foldingRules = new CStyleFoldMode(); }; oop.inherits(Mode, TextMode); (function() { this.lineCommentStart = "//"; this.blockComment = {start: "/*", end: "*/"}; this.$quotes = {'"': '"', "'": "'", "`": "`"}; this.getNextLineIndent = function(state, line, tab) { var indent = this.$getIndent(line); var tokenizedLine = this.getTokenizer().getLineTokens(line, state); var tokens = tokenizedLine.tokens; var endState = tokenizedLine.state; if (tokens.length && tokens[tokens.length-1].type == "comment") { return indent; } if (state == "start" || state == "no_regex") { var match = line.match(/^.*(?:\bcase\b.*:|[\{\(\[])\s*$/); if (match) { indent += tab; } } else if (state == "doc-start") { if (endState == "start" || endState == "no_regex") { return ""; } var match = line.match(/^\s*(\/?)\*/); if (match) { if (match[1]) { indent += " "; } indent += "* "; } } return indent; }; this.checkOutdent = function(state, line, input) { return this.$outdent.checkOutdent(line, input); }; this.autoOutdent = function(state, doc, row) { this.$outdent.autoOutdent(doc, row); }; this.createWorker = function(session) { var worker = new WorkerClient(["ace"], "ace/mode/javascript_worker", "JavaScriptWorker"); worker.attachToDocument(session.getDocument()); worker.on("annotate", function(results) { session.setAnnotations(results.data); }); worker.on("terminate", function() { session.clearAnnotations(); }); return worker; }; this.$id = "ace/mode/javascript"; }).call(Mode.prototype); exports.Mode = Mode; }); define("ace/mode/css_highlight_rules",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/mode/text_highlight_rules"], function(require, exports, module) { "use strict"; var oop = require("../lib/oop"); var lang = require("../lib/lang"); var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules; var supportType = exports.supportType = "align-content|align-items|align-self|all|animation|animation-delay|animation-direction|animation-duration|animation-fill-mode|animation-iteration-count|animation-name|animation-play-state|animation-timing-function|backface-visibility|background|background-attachment|background-blend-mode|background-clip|background-color|background-image|background-origin|background-position|background-repeat|background-size|border|border-bottom|border-bottom-color|border-bottom-left-radius|border-bottom-right-radius|border-bottom-style|border-bottom-width|border-collapse|border-color|border-image|border-image-outset|border-image-repeat|border-image-slice|border-image-source|border-image-width|border-left|border-left-color|border-left-style|border-left-width|border-radius|border-right|border-right-color|border-right-style|border-right-width|border-spacing|border-style|border-top|border-top-color|border-top-left-radius|border-top-right-radius|border-top-style|border-top-width|border-width|bottom|box-shadow|box-sizing|caption-side|clear|clip|color|column-count|column-fill|column-gap|column-rule|column-rule-color|column-rule-style|column-rule-width|column-span|column-width|columns|content|counter-increment|counter-reset|cursor|direction|display|empty-cells|filter|flex|flex-basis|flex-direction|flex-flow|flex-grow|flex-shrink|flex-wrap|float|font|font-family|font-size|font-size-adjust|font-stretch|font-style|font-variant|font-weight|hanging-punctuation|height|justify-content|left|letter-spacing|line-height|list-style|list-style-image|list-style-position|list-style-type|margin|margin-bottom|margin-left|margin-right|margin-top|max-height|max-width|min-height|min-width|nav-down|nav-index|nav-left|nav-right|nav-up|opacity|order|outline|outline-color|outline-offset|outline-style|outline-width|overflow|overflow-x|overflow-y|padding|padding-bottom|padding-left|padding-right|padding-top|page-break-after|page-break-before|page-break-inside|perspective|perspective-origin|position|quotes|resize|right|tab-size|table-layout|text-align|text-align-last|text-decoration|text-decoration-color|text-decoration-line|text-decoration-style|text-indent|text-justify|text-overflow|text-shadow|text-transform|top|transform|transform-origin|transform-style|transition|transition-delay|transition-duration|transition-property|transition-timing-function|unicode-bidi|vertical-align|visibility|white-space|width|word-break|word-spacing|word-wrap|z-index"; var supportFunction = exports.supportFunction = "rgb|rgba|url|attr|counter|counters"; var supportConstant = exports.supportConstant = "absolute|after-edge|after|all-scroll|all|alphabetic|always|antialiased|armenian|auto|avoid-column|avoid-page|avoid|balance|baseline|before-edge|before|below|bidi-override|block-line-height|block|bold|bolder|border-box|both|bottom|box|break-all|break-word|capitalize|caps-height|caption|center|central|char|circle|cjk-ideographic|clone|close-quote|col-resize|collapse|column|consider-shifts|contain|content-box|cover|crosshair|cubic-bezier|dashed|decimal-leading-zero|decimal|default|disabled|disc|disregard-shifts|distribute-all-lines|distribute-letter|distribute-space|distribute|dotted|double|e-resize|ease-in|ease-in-out|ease-out|ease|ellipsis|end|exclude-ruby|fill|fixed|georgian|glyphs|grid-height|groove|hand|hanging|hebrew|help|hidden|hiragana-iroha|hiragana|horizontal|icon|ideograph-alpha|ideograph-numeric|ideograph-parenthesis|ideograph-space|ideographic|inactive|include-ruby|inherit|initial|inline-block|inline-box|inline-line-height|inline-table|inline|inset|inside|inter-ideograph|inter-word|invert|italic|justify|katakana-iroha|katakana|keep-all|last|left|lighter|line-edge|line-through|line|linear|list-item|local|loose|lower-alpha|lower-greek|lower-latin|lower-roman|lowercase|lr-tb|ltr|mathematical|max-height|max-size|medium|menu|message-box|middle|move|n-resize|ne-resize|newspaper|no-change|no-close-quote|no-drop|no-open-quote|no-repeat|none|normal|not-allowed|nowrap|nw-resize|oblique|open-quote|outset|outside|overline|padding-box|page|pointer|pre-line|pre-wrap|pre|preserve-3d|progress|relative|repeat-x|repeat-y|repeat|replaced|reset-size|ridge|right|round|row-resize|rtl|s-resize|scroll|se-resize|separate|slice|small-caps|small-caption|solid|space|square|start|static|status-bar|step-end|step-start|steps|stretch|strict|sub|super|sw-resize|table-caption|table-cell|table-column-group|table-column|table-footer-group|table-header-group|table-row-group|table-row|table|tb-rl|text-after-edge|text-before-edge|text-bottom|text-size|text-top|text|thick|thin|transparent|underline|upper-alpha|upper-latin|upper-roman|uppercase|use-script|vertical-ideographic|vertical-text|visible|w-resize|wait|whitespace|z-index|zero"; var supportConstantColor = exports.supportConstantColor = "aliceblue|antiquewhite|aqua|aquamarine|azure|beige|bisque|black|blanchedalmond|blue|blueviolet|brown|burlywood|cadetblue|chartreuse|chocolate|coral|cornflowerblue|cornsilk|crimson|cyan|darkblue|darkcyan|darkgoldenrod|darkgray|darkgreen|darkgrey|darkkhaki|darkmagenta|darkolivegreen|darkorange|darkorchid|darkred|darksalmon|darkseagreen|darkslateblue|darkslategray|darkslategrey|darkturquoise|darkviolet|deeppink|deepskyblue|dimgray|dimgrey|dodgerblue|firebrick|floralwhite|forestgreen|fuchsia|gainsboro|ghostwhite|gold|goldenrod|gray|green|greenyellow|grey|honeydew|hotpink|indianred|indigo|ivory|khaki|lavender|lavenderblush|lawngreen|lemonchiffon|lightblue|lightcoral|lightcyan|lightgoldenrodyellow|lightgray|lightgreen|lightgrey|lightpink|lightsalmon|lightseagreen|lightskyblue|lightslategray|lightslategrey|lightsteelblue|lightyellow|lime|limegreen|linen|magenta|maroon|mediumaquamarine|mediumblue|mediumorchid|mediumpurple|mediumseagreen|mediumslateblue|mediumspringgreen|mediumturquoise|mediumvioletred|midnightblue|mintcream|mistyrose|moccasin|navajowhite|navy|oldlace|olive|olivedrab|orange|orangered|orchid|palegoldenrod|palegreen|paleturquoise|palevioletred|papayawhip|peachpuff|peru|pink|plum|powderblue|purple|rebeccapurple|red|rosybrown|royalblue|saddlebrown|salmon|sandybrown|seagreen|seashell|sienna|silver|skyblue|slateblue|slategray|slategrey|snow|springgreen|steelblue|tan|teal|thistle|tomato|turquoise|violet|wheat|white|whitesmoke|yellow|yellowgreen"; var supportConstantFonts = exports.supportConstantFonts = "arial|century|comic|courier|cursive|fantasy|garamond|georgia|helvetica|impact|lucida|symbol|system|tahoma|times|trebuchet|utopia|verdana|webdings|sans-serif|serif|monospace"; var numRe = exports.numRe = "\\-?(?:(?:[0-9]+(?:\\.[0-9]+)?)|(?:\\.[0-9]+))"; var pseudoElements = exports.pseudoElements = "(\\:+)\\b(after|before|first-letter|first-line|moz-selection|selection)\\b"; var pseudoClasses = exports.pseudoClasses = "(:)\\b(active|checked|disabled|empty|enabled|first-child|first-of-type|focus|hover|indeterminate|invalid|last-child|last-of-type|link|not|nth-child|nth-last-child|nth-last-of-type|nth-of-type|only-child|only-of-type|required|root|target|valid|visited)\\b"; var CssHighlightRules = function() { var keywordMapper = this.createKeywordMapper({ "support.function": supportFunction, "support.constant": supportConstant, "support.type": supportType, "support.constant.color": supportConstantColor, "support.constant.fonts": supportConstantFonts }, "text", true); this.$rules = { "start" : [{ include : ["strings", "url", "comments"] }, { token: "paren.lparen", regex: "\\{", next: "ruleset" }, { token: "paren.rparen", regex: "\\}" }, { token: "string", regex: "@", next: "media" }, { token: "keyword", regex: "#[a-z0-9-_]+" }, { token: "keyword", regex: "%" }, { token: "variable", regex: "\\.[a-z0-9-_]+" }, { token: "string", regex: ":[a-z0-9-_]+" }, { token : "constant.numeric", regex : numRe }, { token: "constant", regex: "[a-z0-9-_]+" }, { caseInsensitive: true }], "media": [{ include : ["strings", "url", "comments"] }, { token: "paren.lparen", regex: "\\{", next: "start" }, { token: "paren.rparen", regex: "\\}", next: "start" }, { token: "string", regex: ";", next: "start" }, { token: "keyword", regex: "(?:media|supports|document|charset|import|namespace|media|supports|document" + "|page|font|keyframes|viewport|counter-style|font-feature-values" + "|swash|ornaments|annotation|stylistic|styleset|character-variant)" }], "comments" : [{ token: "comment", // multi line comment regex: "\\/\\*", push: [{ token : "comment", regex : "\\*\\/", next : "pop" }, { defaultToken : "comment" }] }], "ruleset" : [{ regex : "-(webkit|ms|moz|o)-", token : "text" }, { token : "paren.rparen", regex : "\\}", next : "start" }, { include : ["strings", "url", "comments"] }, { token : ["constant.numeric", "keyword"], regex : "(" + numRe + ")(ch|cm|deg|em|ex|fr|gd|grad|Hz|in|kHz|mm|ms|pc|pt|px|rad|rem|s|turn|vh|vm|vw|%)" }, { token : "constant.numeric", regex : numRe }, { token : "constant.numeric", // hex6 color regex : "#[a-f0-9]{6}" }, { token : "constant.numeric", // hex3 color regex : "#[a-f0-9]{3}" }, { token : ["punctuation", "entity.other.attribute-name.pseudo-element.css"], regex : pseudoElements }, { token : ["punctuation", "entity.other.attribute-name.pseudo-class.css"], regex : pseudoClasses }, { include: "url" }, { token : keywordMapper, regex : "\\-?[a-zA-Z_][a-zA-Z0-9_\\-]*" }, { caseInsensitive: true }], url: [{ token : "support.function", regex : "(?:url(:?-prefix)?|domain|regexp)\\(", push: [{ token : "support.function", regex : "\\)", next : "pop" }, { defaultToken: "string" }] }], strings: [{ token : "string.start", regex : "'", push : [{ token : "string.end", regex : "'|$", next: "pop" }, { include : "escapes" }, { token : "constant.language.escape", regex : /\\$/, consumeLineEnd: true }, { defaultToken: "string" }] }, { token : "string.start", regex : '"', push : [{ token : "string.end", regex : '"|$', next: "pop" }, { include : "escapes" }, { token : "constant.language.escape", regex : /\\$/, consumeLineEnd: true }, { defaultToken: "string" }] }], escapes: [{ token : "constant.language.escape", regex : /\\([a-fA-F\d]{1,6}|[^a-fA-F\d])/ }] }; this.normalizeRules(); }; oop.inherits(CssHighlightRules, TextHighlightRules); exports.CssHighlightRules = CssHighlightRules; }); define("ace/mode/css_completions",["require","exports","module"], function(require, exports, module) { "use strict"; var propertyMap = { "background": {"#$0": 1}, "background-color": {"#$0": 1, "transparent": 1, "fixed": 1}, "background-image": {"url('/$0')": 1}, "background-repeat": {"repeat": 1, "repeat-x": 1, "repeat-y": 1, "no-repeat": 1, "inherit": 1}, "background-position": {"bottom":2, "center":2, "left":2, "right":2, "top":2, "inherit":2}, "background-attachment": {"scroll": 1, "fixed": 1}, "background-size": {"cover": 1, "contain": 1}, "background-clip": {"border-box": 1, "padding-box": 1, "content-box": 1}, "background-origin": {"border-box": 1, "padding-box": 1, "content-box": 1}, "border": {"solid $0": 1, "dashed $0": 1, "dotted $0": 1, "#$0": 1}, "border-color": {"#$0": 1}, "border-style": {"solid":2, "dashed":2, "dotted":2, "double":2, "groove":2, "hidden":2, "inherit":2, "inset":2, "none":2, "outset":2, "ridged":2}, "border-collapse": {"collapse": 1, "separate": 1}, "bottom": {"px": 1, "em": 1, "%": 1}, "clear": {"left": 1, "right": 1, "both": 1, "none": 1}, "color": {"#$0": 1, "rgb(#$00,0,0)": 1}, "cursor": {"default": 1, "pointer": 1, "move": 1, "text": 1, "wait": 1, "help": 1, "progress": 1, "n-resize": 1, "ne-resize": 1, "e-resize": 1, "se-resize": 1, "s-resize": 1, "sw-resize": 1, "w-resize": 1, "nw-resize": 1}, "display": {"none": 1, "block": 1, "inline": 1, "inline-block": 1, "table-cell": 1}, "empty-cells": {"show": 1, "hide": 1}, "float": {"left": 1, "right": 1, "none": 1}, "font-family": {"Arial":2,"Comic Sans MS":2,"Consolas":2,"Courier New":2,"Courier":2,"Georgia":2,"Monospace":2,"Sans-Serif":2, "Segoe UI":2,"Tahoma":2,"Times New Roman":2,"Trebuchet MS":2,"Verdana": 1}, "font-size": {"px": 1, "em": 1, "%": 1}, "font-weight": {"bold": 1, "normal": 1}, "font-style": {"italic": 1, "normal": 1}, "font-variant": {"normal": 1, "small-caps": 1}, "height": {"px": 1, "em": 1, "%": 1}, "left": {"px": 1, "em": 1, "%": 1}, "letter-spacing": {"normal": 1}, "line-height": {"normal": 1}, "list-style-type": {"none": 1, "disc": 1, "circle": 1, "square": 1, "decimal": 1, "decimal-leading-zero": 1, "lower-roman": 1, "upper-roman": 1, "lower-greek": 1, "lower-latin": 1, "upper-latin": 1, "georgian": 1, "lower-alpha": 1, "upper-alpha": 1}, "margin": {"px": 1, "em": 1, "%": 1}, "margin-right": {"px": 1, "em": 1, "%": 1}, "margin-left": {"px": 1, "em": 1, "%": 1}, "margin-top": {"px": 1, "em": 1, "%": 1}, "margin-bottom": {"px": 1, "em": 1, "%": 1}, "max-height": {"px": 1, "em": 1, "%": 1}, "max-width": {"px": 1, "em": 1, "%": 1}, "min-height": {"px": 1, "em": 1, "%": 1}, "min-width": {"px": 1, "em": 1, "%": 1}, "overflow": {"hidden": 1, "visible": 1, "auto": 1, "scroll": 1}, "overflow-x": {"hidden": 1, "visible": 1, "auto": 1, "scroll": 1}, "overflow-y": {"hidden": 1, "visible": 1, "auto": 1, "scroll": 1}, "padding": {"px": 1, "em": 1, "%": 1}, "padding-top": {"px": 1, "em": 1, "%": 1}, "padding-right": {"px": 1, "em": 1, "%": 1}, "padding-bottom": {"px": 1, "em": 1, "%": 1}, "padding-left": {"px": 1, "em": 1, "%": 1}, "page-break-after": {"auto": 1, "always": 1, "avoid": 1, "left": 1, "right": 1}, "page-break-before": {"auto": 1, "always": 1, "avoid": 1, "left": 1, "right": 1}, "position": {"absolute": 1, "relative": 1, "fixed": 1, "static": 1}, "right": {"px": 1, "em": 1, "%": 1}, "table-layout": {"fixed": 1, "auto": 1}, "text-decoration": {"none": 1, "underline": 1, "line-through": 1, "blink": 1}, "text-align": {"left": 1, "right": 1, "center": 1, "justify": 1}, "text-transform": {"capitalize": 1, "uppercase": 1, "lowercase": 1, "none": 1}, "top": {"px": 1, "em": 1, "%": 1}, "vertical-align": {"top": 1, "bottom": 1}, "visibility": {"hidden": 1, "visible": 1}, "white-space": {"nowrap": 1, "normal": 1, "pre": 1, "pre-line": 1, "pre-wrap": 1}, "width": {"px": 1, "em": 1, "%": 1}, "word-spacing": {"normal": 1}, "filter": {"alpha(opacity=$0100)": 1}, "text-shadow": {"$02px 2px 2px #777": 1}, "text-overflow": {"ellipsis-word": 1, "clip": 1, "ellipsis": 1}, "-moz-border-radius": 1, "-moz-border-radius-topright": 1, "-moz-border-radius-bottomright": 1, "-moz-border-radius-topleft": 1, "-moz-border-radius-bottomleft": 1, "-webkit-border-radius": 1, "-webkit-border-top-right-radius": 1, "-webkit-border-top-left-radius": 1, "-webkit-border-bottom-right-radius": 1, "-webkit-border-bottom-left-radius": 1, "-moz-box-shadow": 1, "-webkit-box-shadow": 1, "transform": {"rotate($00deg)": 1, "skew($00deg)": 1}, "-moz-transform": {"rotate($00deg)": 1, "skew($00deg)": 1}, "-webkit-transform": {"rotate($00deg)": 1, "skew($00deg)": 1 } }; var CssCompletions = function() { }; (function() { this.completionsDefined = false; this.defineCompletions = function() { if (document) { var style = document.createElement('c').style; for (var i in style) { if (typeof style[i] !== 'string') continue; var name = i.replace(/[A-Z]/g, function(x) { return '-' + x.toLowerCase(); }); if (!propertyMap.hasOwnProperty(name)) propertyMap[name] = 1; } } this.completionsDefined = true; } this.getCompletions = function(state, session, pos, prefix) { if (!this.completionsDefined) { this.defineCompletions(); } var token = session.getTokenAt(pos.row, pos.column); if (!token) return []; if (state==='ruleset'){ var line = session.getLine(pos.row).substr(0, pos.column); if (/:[^;]+$/.test(line)) { /([\w\-]+):[^:]*$/.test(line); return this.getPropertyValueCompletions(state, session, pos, prefix); } else { return this.getPropertyCompletions(state, session, pos, prefix); } } return []; }; this.getPropertyCompletions = function(state, session, pos, prefix) { var properties = Object.keys(propertyMap); return properties.map(function(property){ return { caption: property, snippet: property + ': $0', meta: "property", score: Number.MAX_VALUE }; }); }; this.getPropertyValueCompletions = function(state, session, pos, prefix) { var line = session.getLine(pos.row).substr(0, pos.column); var property = (/([\w\-]+):[^:]*$/.exec(line) || {})[1]; if (!property) return []; var values = []; if (property in propertyMap && typeof propertyMap[property] === "object") { values = Object.keys(propertyMap[property]); } return values.map(function(value){ return { caption: value, snippet: value, meta: "property value", score: Number.MAX_VALUE }; }); }; }).call(CssCompletions.prototype); exports.CssCompletions = CssCompletions; }); define("ace/mode/behaviour/css",["require","exports","module","ace/lib/oop","ace/mode/behaviour","ace/mode/behaviour/cstyle","ace/token_iterator"], function(require, exports, module) { "use strict"; var oop = require("../../lib/oop"); var Behaviour = require("../behaviour").Behaviour; var CstyleBehaviour = require("./cstyle").CstyleBehaviour; var TokenIterator = require("../../token_iterator").TokenIterator; var CssBehaviour = function () { this.inherit(CstyleBehaviour); this.add("colon", "insertion", function (state, action, editor, session, text) { if (text === ':') { var cursor = editor.getCursorPosition(); var iterator = new TokenIterator(session, cursor.row, cursor.column); var token = iterator.getCurrentToken(); if (token && token.value.match(/\s+/)) { token = iterator.stepBackward(); } if (token && token.type === 'support.type') { var line = session.doc.getLine(cursor.row); var rightChar = line.substring(cursor.column, cursor.column + 1); if (rightChar === ':') { return { text: '', selection: [1, 1] } } if (!line.substring(cursor.column).match(/^\s*;/)) { return { text: ':;', selection: [1, 1] } } } } }); this.add("colon", "deletion", function (state, action, editor, session, range) { var selected = session.doc.getTextRange(range); if (!range.isMultiLine() && selected === ':') { var cursor = editor.getCursorPosition(); var iterator = new TokenIterator(session, cursor.row, cursor.column); var token = iterator.getCurrentToken(); if (token && token.value.match(/\s+/)) { token = iterator.stepBackward(); } if (token && token.type === 'support.type') { var line = session.doc.getLine(range.start.row); var rightChar = line.substring(range.end.column, range.end.column + 1); if (rightChar === ';') { range.end.column ++; return range; } } } }); this.add("semicolon", "insertion", function (state, action, editor, session, text) { if (text === ';') { var cursor = editor.getCursorPosition(); var line = session.doc.getLine(cursor.row); var rightChar = line.substring(cursor.column, cursor.column + 1); if (rightChar === ';') { return { text: '', selection: [1, 1] } } } }); } oop.inherits(CssBehaviour, CstyleBehaviour); exports.CssBehaviour = CssBehaviour; }); define("ace/mode/css",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/css_highlight_rules","ace/mode/matching_brace_outdent","ace/worker/worker_client","ace/mode/css_completions","ace/mode/behaviour/css","ace/mode/folding/cstyle"], function(require, exports, module) { "use strict"; var oop = require("../lib/oop"); var TextMode = require("./text").Mode; var CssHighlightRules = require("./css_highlight_rules").CssHighlightRules; var MatchingBraceOutdent = require("./matching_brace_outdent").MatchingBraceOutdent; var WorkerClient = require("../worker/worker_client").WorkerClient; var CssCompletions = require("./css_completions").CssCompletions; var CssBehaviour = require("./behaviour/css").CssBehaviour; var CStyleFoldMode = require("./folding/cstyle").FoldMode; var Mode = function() { this.HighlightRules = CssHighlightRules; this.$outdent = new MatchingBraceOutdent(); this.$behaviour = new CssBehaviour(); this.$completer = new CssCompletions(); this.foldingRules = new CStyleFoldMode(); }; oop.inherits(Mode, TextMode); (function() { this.foldingRules = "cStyle"; this.blockComment = {start: "/*", end: "*/"}; this.getNextLineIndent = function(state, line, tab) { var indent = this.$getIndent(line); var tokens = this.getTokenizer().getLineTokens(line, state).tokens; if (tokens.length && tokens[tokens.length-1].type == "comment") { return indent; } var match = line.match(/^.*\{\s*$/); if (match) { indent += tab; } return indent; }; this.checkOutdent = function(state, line, input) { return this.$outdent.checkOutdent(line, input); }; this.autoOutdent = function(state, doc, row) { this.$outdent.autoOutdent(doc, row); }; this.getCompletions = function(state, session, pos, prefix) { return this.$completer.getCompletions(state, session, pos, prefix); }; this.createWorker = function(session) { var worker = new WorkerClient(["ace"], "ace/mode/css_worker", "Worker"); worker.attachToDocument(session.getDocument()); worker.on("annotate", function(e) { session.setAnnotations(e.data); }); worker.on("terminate", function() { session.clearAnnotations(); }); return worker; }; this.$id = "ace/mode/css"; }).call(Mode.prototype); exports.Mode = Mode; }); define("ace/mode/xml_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(require, exports, module) { "use strict"; var oop = require("../lib/oop"); var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules; var XmlHighlightRules = function(normalize) { var tagRegex = "[_:a-zA-Z\xc0-\uffff][-_:.a-zA-Z0-9\xc0-\uffff]*"; this.$rules = { start : [ {token : "string.cdata.xml", regex : "<\\!\\[CDATA\\[", next : "cdata"}, { token : ["punctuation.instruction.xml", "keyword.instruction.xml"], regex : "(<\\?)(" + tagRegex + ")", next : "processing_instruction" }, {token : "comment.start.xml", regex : "<\\!--", next : "comment"}, { token : ["xml-pe.doctype.xml", "xml-pe.doctype.xml"], regex : "(<\\!)(DOCTYPE)(?=[\\s])", next : "doctype", caseInsensitive: true }, {include : "tag"}, {token : "text.end-tag-open.xml", regex: "</"}, {token : "text.tag-open.xml", regex: "<"}, {include : "reference"}, {defaultToken : "text.xml"} ], processing_instruction : [{ token : "entity.other.attribute-name.decl-attribute-name.xml", regex : tagRegex }, { token : "keyword.operator.decl-attribute-equals.xml", regex : "=" }, { include: "whitespace" }, { include: "string" }, { token : "punctuation.xml-decl.xml", regex : "\\?>", next : "start" }], doctype : [ {include : "whitespace"}, {include : "string"}, {token : "xml-pe.doctype.xml", regex : ">", next : "start"}, {token : "xml-pe.xml", regex : "[-_a-zA-Z0-9:]+"}, {token : "punctuation.int-subset", regex : "\\[", push : "int_subset"} ], int_subset : [{ token : "text.xml", regex : "\\s+" }, { token: "punctuation.int-subset.xml", regex: "]", next: "pop" }, { token : ["punctuation.markup-decl.xml", "keyword.markup-decl.xml"], regex : "(<\\!)(" + tagRegex + ")", push : [{ token : "text", regex : "\\s+" }, { token : "punctuation.markup-decl.xml", regex : ">", next : "pop" }, {include : "string"}] }], cdata : [ {token : "string.cdata.xml", regex : "\\]\\]>", next : "start"}, {token : "text.xml", regex : "\\s+"}, {token : "text.xml", regex : "(?:[^\\]]|\\](?!\\]>))+"} ], comment : [ {token : "comment.end.xml", regex : "-->", next : "start"}, {defaultToken : "comment.xml"} ], reference : [{ token : "constant.language.escape.reference.xml", regex : "(?:&#[0-9]+;)|(?:&#x[0-9a-fA-F]+;)|(?:&[a-zA-Z0-9_:\\.-]+;)" }], attr_reference : [{ token : "constant.language.escape.reference.attribute-value.xml", regex : "(?:&#[0-9]+;)|(?:&#x[0-9a-fA-F]+;)|(?:&[a-zA-Z0-9_:\\.-]+;)" }], tag : [{ token : ["meta.tag.punctuation.tag-open.xml", "meta.tag.punctuation.end-tag-open.xml", "meta.tag.tag-name.xml"], regex : "(?:(<)|(</))((?:" + tagRegex + ":)?" + tagRegex + ")", next: [ {include : "attributes"}, {token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next : "start"} ] }], tag_whitespace : [ {token : "text.tag-whitespace.xml", regex : "\\s+"} ], whitespace : [ {token : "text.whitespace.xml", regex : "\\s+"} ], string: [{ token : "string.xml", regex : "'", push : [ {token : "string.xml", regex: "'", next: "pop"}, {defaultToken : "string.xml"} ] }, { token : "string.xml", regex : '"', push : [ {token : "string.xml", regex: '"', next: "pop"}, {defaultToken : "string.xml"} ] }], attributes: [{ token : "entity.other.attribute-name.xml", regex : tagRegex }, { token : "keyword.operator.attribute-equals.xml", regex : "=" }, { include: "tag_whitespace" }, { include: "attribute_value" }], attribute_value: [{ token : "string.attribute-value.xml", regex : "'", push : [ {token : "string.attribute-value.xml", regex: "'", next: "pop"}, {include : "attr_reference"}, {defaultToken : "string.attribute-value.xml"} ] }, { token : "string.attribute-value.xml", regex : '"', push : [ {token : "string.attribute-value.xml", regex: '"', next: "pop"}, {include : "attr_reference"}, {defaultToken : "string.attribute-value.xml"} ] }] }; if (this.constructor === XmlHighlightRules) this.normalizeRules(); }; (function() { this.embedTagRules = function(HighlightRules, prefix, tag){ this.$rules.tag.unshift({ token : ["meta.tag.punctuation.tag-open.xml", "meta.tag." + tag + ".tag-name.xml"], regex : "(<)(" + tag + "(?=\\s|>|$))", next: [ {include : "attributes"}, {token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next : prefix + "start"} ] }); this.$rules[tag + "-end"] = [ {include : "attributes"}, {token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next: "start", onMatch : function(value, currentState, stack) { stack.splice(0); return this.token; }} ] this.embedRules(HighlightRules, prefix, [{ token: ["meta.tag.punctuation.end-tag-open.xml", "meta.tag." + tag + ".tag-name.xml"], regex : "(</)(" + tag + "(?=\\s|>|$))", next: tag + "-end" }, { token: "string.cdata.xml", regex : "<\\!\\[CDATA\\[" }, { token: "string.cdata.xml", regex : "\\]\\]>" }]); }; }).call(TextHighlightRules.prototype); oop.inherits(XmlHighlightRules, TextHighlightRules); exports.XmlHighlightRules = XmlHighlightRules; }); define("ace/mode/html_highlight_rules",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/mode/css_highlight_rules","ace/mode/javascript_highlight_rules","ace/mode/xml_highlight_rules"], function(require, exports, module) { "use strict"; var oop = require("../lib/oop"); var lang = require("../lib/lang"); var CssHighlightRules = require("./css_highlight_rules").CssHighlightRules; var JavaScriptHighlightRules = require("./javascript_highlight_rules").JavaScriptHighlightRules; var XmlHighlightRules = require("./xml_highlight_rules").XmlHighlightRules; var tagMap = lang.createMap({ a : 'anchor', button : 'form', form : 'form', img : 'image', input : 'form', label : 'form', option : 'form', script : 'script', select : 'form', textarea : 'form', style : 'style', table : 'table', tbody : 'table', td : 'table', tfoot : 'table', th : 'table', tr : 'table' }); var HtmlHighlightRules = function() { XmlHighlightRules.call(this); this.addRules({ attributes: [{ include : "tag_whitespace" }, { token : "entity.other.attribute-name.xml", regex : "[-_a-zA-Z0-9:.]+" }, { token : "keyword.operator.attribute-equals.xml", regex : "=", push : [{ include: "tag_whitespace" }, { token : "string.unquoted.attribute-value.html", regex : "[^<>='\"`\\s]+", next : "pop" }, { token : "empty", regex : "", next : "pop" }] }, { include : "attribute_value" }], tag: [{ token : function(start, tag) { var group = tagMap[tag]; return ["meta.tag.punctuation." + (start == "<" ? "" : "end-") + "tag-open.xml", "meta.tag" + (group ? "." + group : "") + ".tag-name.xml"]; }, regex : "(</?)([-_a-zA-Z0-9:.]+)", next: "tag_stuff" }], tag_stuff: [ {include : "attributes"}, {token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next : "start"} ] }); this.embedTagRules(CssHighlightRules, "css-", "style"); this.embedTagRules(new JavaScriptHighlightRules({jsx: false}).getRules(), "js-", "script"); if (this.constructor === HtmlHighlightRules) this.normalizeRules(); }; oop.inherits(HtmlHighlightRules, XmlHighlightRules); exports.HtmlHighlightRules = HtmlHighlightRules; }); define("ace/mode/behaviour/xml",["require","exports","module","ace/lib/oop","ace/mode/behaviour","ace/token_iterator","ace/lib/lang"], function(require, exports, module) { "use strict"; var oop = require("../../lib/oop"); var Behaviour = require("../behaviour").Behaviour; var TokenIterator = require("../../token_iterator").TokenIterator; var lang = require("../../lib/lang"); function is(token, type) { return token.type.lastIndexOf(type + ".xml") > -1; } var XmlBehaviour = function () { this.add("string_dquotes", "insertion", function (state, action, editor, session, text) { if (text == '"' || text == "'") { var quote = text; var selected = session.doc.getTextRange(editor.getSelectionRange()); if (selected !== "" && selected !== "'" && selected != '"' && editor.getWrapBehavioursEnabled()) { return { text: quote + selected + quote, selection: false }; } var cursor = editor.getCursorPosition(); var line = session.doc.getLine(cursor.row); var rightChar = line.substring(cursor.column, cursor.column + 1); var iterator = new TokenIterator(session, cursor.row, cursor.column); var token = iterator.getCurrentToken(); if (rightChar == quote && (is(token, "attribute-value") || is(token, "string"))) { return { text: "", selection: [1, 1] }; } if (!token) token = iterator.stepBackward(); if (!token) return; while (is(token, "tag-whitespace") || is(token, "whitespace")) { token = iterator.stepBackward(); } var rightSpace = !rightChar || rightChar.match(/\s/); if (is(token, "attribute-equals") && (rightSpace || rightChar == '>') || (is(token, "decl-attribute-equals") && (rightSpace || rightChar == '?'))) { return { text: quote + quote, selection: [1, 1] }; } } }); this.add("string_dquotes", "deletion", function(state, action, editor, session, range) { var selected = session.doc.getTextRange(range); if (!range.isMultiLine() && (selected == '"' || selected == "'")) { var line = session.doc.getLine(range.start.row); var rightChar = line.substring(range.start.column + 1, range.start.column + 2); if (rightChar == selected) { range.end.column++; return range; } } }); this.add("autoclosing", "insertion", function (state, action, editor, session, text) { if (text == '>') { var position = editor.getSelectionRange().start; var iterator = new TokenIterator(session, position.row, position.column); var token = iterator.getCurrentToken() || iterator.stepBackward(); if (!token || !(is(token, "tag-name") || is(token, "tag-whitespace") || is(token, "attribute-name") || is(token, "attribute-equals") || is(token, "attribute-value"))) return; if (is(token, "reference.attribute-value")) return; if (is(token, "attribute-value")) { var firstChar = token.value.charAt(0); if (firstChar == '"' || firstChar == "'") { var lastChar = token.value.charAt(token.value.length - 1); var tokenEnd = iterator.getCurrentTokenColumn() + token.value.length; if (tokenEnd > position.column || tokenEnd == position.column && firstChar != lastChar) return; } } while (!is(token, "tag-name")) { token = iterator.stepBackward(); if (token.value == "<") { token = iterator.stepForward(); break; } } var tokenRow = iterator.getCurrentTokenRow(); var tokenColumn = iterator.getCurrentTokenColumn(); if (is(iterator.stepBackward(), "end-tag-open")) return; var element = token.value; if (tokenRow == position.row) element = element.substring(0, position.column - tokenColumn); if (this.voidElements.hasOwnProperty(element.toLowerCase())) return; return { text: ">" + "</" + element + ">", selection: [1, 1] }; } }); this.add("autoindent", "insertion", function (state, action, editor, session, text) { if (text == "\n") { var cursor = editor.getCursorPosition(); var line = session.getLine(cursor.row); var iterator = new TokenIterator(session, cursor.row, cursor.column); var token = iterator.getCurrentToken(); if (token && token.type.indexOf("tag-close") !== -1) { if (token.value == "/>") return; while (token && token.type.indexOf("tag-name") === -1) { token = iterator.stepBackward(); } if (!token) { return; } var tag = token.value; var row = iterator.getCurrentTokenRow(); token = iterator.stepBackward(); if (!token || token.type.indexOf("end-tag") !== -1) { return; } if (this.voidElements && !this.voidElements[tag]) { var nextToken = session.getTokenAt(cursor.row, cursor.column+1); var line = session.getLine(row); var nextIndent = this.$getIndent(line); var indent = nextIndent + session.getTabString(); if (nextToken && nextToken.value === "</") { return { text: "\n" + indent + "\n" + nextIndent, selection: [1, indent.length, 1, indent.length] }; } else { return { text: "\n" + indent }; } } } } }); }; oop.inherits(XmlBehaviour, Behaviour); exports.XmlBehaviour = XmlBehaviour; }); define("ace/mode/folding/mixed",["require","exports","module","ace/lib/oop","ace/mode/folding/fold_mode"], function(require, exports, module) { "use strict"; var oop = require("../../lib/oop"); var BaseFoldMode = require("./fold_mode").FoldMode; var FoldMode = exports.FoldMode = function(defaultMode, subModes) { this.defaultMode = defaultMode; this.subModes = subModes; }; oop.inherits(FoldMode, BaseFoldMode); (function() { this.$getMode = function(state) { if (typeof state != "string") state = state[0]; for (var key in this.subModes) { if (state.indexOf(key) === 0) return this.subModes[key]; } return null; }; this.$tryMode = function(state, session, foldStyle, row) { var mode = this.$getMode(state); return (mode ? mode.getFoldWidget(session, foldStyle, row) : ""); }; this.getFoldWidget = function(session, foldStyle, row) { return ( this.$tryMode(session.getState(row-1), session, foldStyle, row) || this.$tryMode(session.getState(row), session, foldStyle, row) || this.defaultMode.getFoldWidget(session, foldStyle, row) ); }; this.getFoldWidgetRange = function(session, foldStyle, row) { var mode = this.$getMode(session.getState(row-1)); if (!mode || !mode.getFoldWidget(session, foldStyle, row)) mode = this.$getMode(session.getState(row)); if (!mode || !mode.getFoldWidget(session, foldStyle, row)) mode = this.defaultMode; return mode.getFoldWidgetRange(session, foldStyle, row); }; }).call(FoldMode.prototype); }); define("ace/mode/folding/xml",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/range","ace/mode/folding/fold_mode","ace/token_iterator"], function(require, exports, module) { "use strict"; var oop = require("../../lib/oop"); var lang = require("../../lib/lang"); var Range = require("../../range").Range; var BaseFoldMode = require("./fold_mode").FoldMode; var TokenIterator = require("../../token_iterator").TokenIterator; var FoldMode = exports.FoldMode = function(voidElements, optionalEndTags) { BaseFoldMode.call(this); this.voidElements = voidElements || {}; this.optionalEndTags = oop.mixin({}, this.voidElements); if (optionalEndTags) oop.mixin(this.optionalEndTags, optionalEndTags); }; oop.inherits(FoldMode, BaseFoldMode); var Tag = function() { this.tagName = ""; this.closing = false; this.selfClosing = false; this.start = {row: 0, column: 0}; this.end = {row: 0, column: 0}; }; function is(token, type) { return token.type.lastIndexOf(type + ".xml") > -1; } (function() { this.getFoldWidget = function(session, foldStyle, row) { var tag = this._getFirstTagInLine(session, row); if (!tag) return this.getCommentFoldWidget(session, row); if (tag.closing || (!tag.tagName && tag.selfClosing)) return foldStyle == "markbeginend" ? "end" : ""; if (!tag.tagName || tag.selfClosing || this.voidElements.hasOwnProperty(tag.tagName.toLowerCase())) return ""; if (this._findEndTagInLine(session, row, tag.tagName, tag.end.column)) return ""; return "start"; }; this.getCommentFoldWidget = function(session, row) { if (/comment/.test(session.getState(row)) && /<!-/.test(session.getLine(row))) return "start"; return ""; } this._getFirstTagInLine = function(session, row) { var tokens = session.getTokens(row); var tag = new Tag(); for (var i = 0; i < tokens.length; i++) { var token = tokens[i]; if (is(token, "tag-open")) { tag.end.column = tag.start.column + token.value.length; tag.closing = is(token, "end-tag-open"); token = tokens[++i]; if (!token) return null; tag.tagName = token.value; tag.end.column += token.value.length; for (i++; i < tokens.length; i++) { token = tokens[i]; tag.end.column += token.value.length; if (is(token, "tag-close")) { tag.selfClosing = token.value == '/>'; break; } } return tag; } else if (is(token, "tag-close")) { tag.selfClosing = token.value == '/>'; return tag; } tag.start.column += token.value.length; } return null; }; this._findEndTagInLine = function(session, row, tagName, startColumn) { var tokens = session.getTokens(row); var column = 0; for (var i = 0; i < tokens.length; i++) { var token = tokens[i]; column += token.value.length; if (column < startColumn) continue; if (is(token, "end-tag-open")) { token = tokens[i + 1]; if (token && token.value == tagName) return true; } } return false; }; this._readTagForward = function(iterator) { var token = iterator.getCurrentToken(); if (!token) return null; var tag = new Tag(); do { if (is(token, "tag-open")) { tag.closing = is(token, "end-tag-open"); tag.start.row = iterator.getCurrentTokenRow(); tag.start.column = iterator.getCurrentTokenColumn(); } else if (is(token, "tag-name")) { tag.tagName = token.value; } else if (is(token, "tag-close")) { tag.selfClosing = token.value == "/>"; tag.end.row = iterator.getCurrentTokenRow(); tag.end.column = iterator.getCurrentTokenColumn() + token.value.length; iterator.stepForward(); return tag; } } while(token = iterator.stepForward()); return null; }; this._readTagBackward = function(iterator) { var token = iterator.getCurrentToken(); if (!token) return null; var tag = new Tag(); do { if (is(token, "tag-open")) { tag.closing = is(token, "end-tag-open"); tag.start.row = iterator.getCurrentTokenRow(); tag.start.column = iterator.getCurrentTokenColumn(); iterator.stepBackward(); return tag; } else if (is(token, "tag-name")) { tag.tagName = token.value; } else if (is(token, "tag-close")) { tag.selfClosing = token.value == "/>"; tag.end.row = iterator.getCurrentTokenRow(); tag.end.column = iterator.getCurrentTokenColumn() + token.value.length; } } while(token = iterator.stepBackward()); return null; }; this._pop = function(stack, tag) { while (stack.length) { var top = stack[stack.length-1]; if (!tag || top.tagName == tag.tagName) { return stack.pop(); } else if (this.optionalEndTags.hasOwnProperty(top.tagName)) { stack.pop(); continue; } else { return null; } } }; this.getFoldWidgetRange = function(session, foldStyle, row) { var firstTag = this._getFirstTagInLine(session, row); if (!firstTag) { return this.getCommentFoldWidget(session, row) && session.getCommentFoldRange(row, session.getLine(row).length); } var isBackward = firstTag.closing || firstTag.selfClosing; var stack = []; var tag; if (!isBackward) { var iterator = new TokenIterator(session, row, firstTag.start.column); var start = { row: row, column: firstTag.start.column + firstTag.tagName.length + 2 }; if (firstTag.start.row == firstTag.end.row) start.column = firstTag.end.column; while (tag = this._readTagForward(iterator)) { if (tag.selfClosing) { if (!stack.length) { tag.start.column += tag.tagName.length + 2; tag.end.column -= 2; return Range.fromPoints(tag.start, tag.end); } else continue; } if (tag.closing) { this._pop(stack, tag); if (stack.length == 0) return Range.fromPoints(start, tag.start); } else { stack.push(tag); } } } else { var iterator = new TokenIterator(session, row, firstTag.end.column); var end = { row: row, column: firstTag.start.column }; while (tag = this._readTagBackward(iterator)) { if (tag.selfClosing) { if (!stack.length) { tag.start.column += tag.tagName.length + 2; tag.end.column -= 2; return Range.fromPoints(tag.start, tag.end); } else continue; } if (!tag.closing) { this._pop(stack, tag); if (stack.length == 0) { tag.start.column += tag.tagName.length + 2; if (tag.start.row == tag.end.row && tag.start.column < tag.end.column) tag.start.column = tag.end.column; return Range.fromPoints(tag.start, end); } } else { stack.push(tag); } } } }; }).call(FoldMode.prototype); }); define("ace/mode/folding/html",["require","exports","module","ace/lib/oop","ace/mode/folding/mixed","ace/mode/folding/xml","ace/mode/folding/cstyle"], function(require, exports, module) { "use strict"; var oop = require("../../lib/oop"); var MixedFoldMode = require("./mixed").FoldMode; var XmlFoldMode = require("./xml").FoldMode; var CStyleFoldMode = require("./cstyle").FoldMode; var FoldMode = exports.FoldMode = function(voidElements, optionalTags) { MixedFoldMode.call(this, new XmlFoldMode(voidElements, optionalTags), { "js-": new CStyleFoldMode(), "css-": new CStyleFoldMode() }); }; oop.inherits(FoldMode, MixedFoldMode); }); define("ace/mode/html_completions",["require","exports","module","ace/token_iterator"], function(require, exports, module) { "use strict"; var TokenIterator = require("../token_iterator").TokenIterator; var commonAttributes = [ "accesskey", "class", "contenteditable", "contextmenu", "dir", "draggable", "dropzone", "hidden", "id", "inert", "itemid", "itemprop", "itemref", "itemscope", "itemtype", "lang", "spellcheck", "style", "tabindex", "title", "translate" ]; var eventAttributes = [ "onabort", "onblur", "oncancel", "oncanplay", "oncanplaythrough", "onchange", "onclick", "onclose", "oncontextmenu", "oncuechange", "ondblclick", "ondrag", "ondragend", "ondragenter", "ondragleave", "ondragover", "ondragstart", "ondrop", "ondurationchange", "onemptied", "onended", "onerror", "onfocus", "oninput", "oninvalid", "onkeydown", "onkeypress", "onkeyup", "onload", "onloadeddata", "onloadedmetadata", "onloadstart", "onmousedown", "onmousemove", "onmouseout", "onmouseover", "onmouseup", "onmousewheel", "onpause", "onplay", "onplaying", "onprogress", "onratechange", "onreset", "onscroll", "onseeked", "onseeking", "onselect", "onshow", "onstalled", "onsubmit", "onsuspend", "ontimeupdate", "onvolumechange", "onwaiting" ]; var globalAttributes = commonAttributes.concat(eventAttributes); var attributeMap = { "html": {"manifest": 1}, "head": {}, "title": {}, "base": {"href": 1, "target": 1}, "link": {"href": 1, "hreflang": 1, "rel": {"stylesheet": 1, "icon": 1}, "media": {"all": 1, "screen": 1, "print": 1}, "type": {"text/css": 1, "image/png": 1, "image/jpeg": 1, "image/gif": 1}, "sizes": 1}, "meta": {"http-equiv": {"content-type": 1}, "name": {"description": 1, "keywords": 1}, "content": {"text/html; charset=UTF-8": 1}, "charset": 1}, "style": {"type": 1, "media": {"all": 1, "screen": 1, "print": 1}, "scoped": 1}, "script": {"charset": 1, "type": {"text/javascript": 1}, "src": 1, "defer": 1, "async": 1}, "noscript": {"href": 1}, "body": {"onafterprint": 1, "onbeforeprint": 1, "onbeforeunload": 1, "onhashchange": 1, "onmessage": 1, "onoffline": 1, "onpopstate": 1, "onredo": 1, "onresize": 1, "onstorage": 1, "onundo": 1, "onunload": 1}, "section": {}, "nav": {}, "article": {"pubdate": 1}, "aside": {}, "h1": {}, "h2": {}, "h3": {}, "h4": {}, "h5": {}, "h6": {}, "header": {}, "footer": {}, "address": {}, "main": {}, "p": {}, "hr": {}, "pre": {}, "blockquote": {"cite": 1}, "ol": {"start": 1, "reversed": 1}, "ul": {}, "li": {"value": 1}, "dl": {}, "dt": {}, "dd": {}, "figure": {}, "figcaption": {}, "div": {}, "a": {"href": 1, "target": {"_blank": 1, "top": 1}, "ping": 1, "rel": {"nofollow": 1, "alternate": 1, "author": 1, "bookmark": 1, "help": 1, "license": 1, "next": 1, "noreferrer": 1, "prefetch": 1, "prev": 1, "search": 1, "tag": 1}, "media": 1, "hreflang": 1, "type": 1}, "em": {}, "strong": {}, "small": {}, "s": {}, "cite": {}, "q": {"cite": 1}, "dfn": {}, "abbr": {}, "data": {}, "time": {"datetime": 1}, "code": {}, "var": {}, "samp": {}, "kbd": {}, "sub": {}, "sup": {}, "i": {}, "b": {}, "u": {}, "mark": {}, "ruby": {}, "rt": {}, "rp": {}, "bdi": {}, "bdo": {}, "span": {}, "br": {}, "wbr": {}, "ins": {"cite": 1, "datetime": 1}, "del": {"cite": 1, "datetime": 1}, "img": {"alt": 1, "src": 1, "height": 1, "width": 1, "usemap": 1, "ismap": 1}, "iframe": {"name": 1, "src": 1, "height": 1, "width": 1, "sandbox": {"allow-same-origin": 1, "allow-top-navigation": 1, "allow-forms": 1, "allow-scripts": 1}, "seamless": {"seamless": 1}}, "embed": {"src": 1, "height": 1, "width": 1, "type": 1}, "object": {"param": 1, "data": 1, "type": 1, "height" : 1, "width": 1, "usemap": 1, "name": 1, "form": 1, "classid": 1}, "param": {"name": 1, "value": 1}, "video": {"src": 1, "autobuffer": 1, "autoplay": {"autoplay": 1}, "loop": {"loop": 1}, "controls": {"controls": 1}, "width": 1, "height": 1, "poster": 1, "muted": {"muted": 1}, "preload": {"auto": 1, "metadata": 1, "none": 1}}, "audio": {"src": 1, "autobuffer": 1, "autoplay": {"autoplay": 1}, "loop": {"loop": 1}, "controls": {"controls": 1}, "muted": {"muted": 1}, "preload": {"auto": 1, "metadata": 1, "none": 1 }}, "source": {"src": 1, "type": 1, "media": 1}, "track": {"kind": 1, "src": 1, "srclang": 1, "label": 1, "default": 1}, "canvas": {"width": 1, "height": 1}, "map": {"name": 1}, "area": {"shape": 1, "coords": 1, "href": 1, "hreflang": 1, "alt": 1, "target": 1, "media": 1, "rel": 1, "ping": 1, "type": 1}, "svg": {}, "math": {}, "table": {"summary": 1}, "caption": {}, "colgroup": {"span": 1}, "col": {"span": 1}, "tbody": {}, "thead": {}, "tfoot": {}, "tr": {}, "td": {"headers": 1, "rowspan": 1, "colspan": 1}, "th": {"headers": 1, "rowspan": 1, "colspan": 1, "scope": 1}, "form": {"accept-charset": 1, "action": 1, "autocomplete": 1, "enctype": {"multipart/form-data": 1, "application/x-www-form-urlencoded": 1}, "method": {"get": 1, "post": 1}, "name": 1, "novalidate": 1, "target": {"_blank": 1, "top": 1}}, "fieldset": {"disabled": 1, "form": 1, "name": 1}, "legend": {}, "label": {"form": 1, "for": 1}, "input": { "type": {"text": 1, "password": 1, "hidden": 1, "checkbox": 1, "submit": 1, "radio": 1, "file": 1, "button": 1, "reset": 1, "image": 31, "color": 1, "date": 1, "datetime": 1, "datetime-local": 1, "email": 1, "month": 1, "number": 1, "range": 1, "search": 1, "tel": 1, "time": 1, "url": 1, "week": 1}, "accept": 1, "alt": 1, "autocomplete": {"on": 1, "off": 1}, "autofocus": {"autofocus": 1}, "checked": {"checked": 1}, "disabled": {"disabled": 1}, "form": 1, "formaction": 1, "formenctype": {"application/x-www-form-urlencoded": 1, "multipart/form-data": 1, "text/plain": 1}, "formmethod": {"get": 1, "post": 1}, "formnovalidate": {"formnovalidate": 1}, "formtarget": {"_blank": 1, "_self": 1, "_parent": 1, "_top": 1}, "height": 1, "list": 1, "max": 1, "maxlength": 1, "min": 1, "multiple": {"multiple": 1}, "name": 1, "pattern": 1, "placeholder": 1, "readonly": {"readonly": 1}, "required": {"required": 1}, "size": 1, "src": 1, "step": 1, "width": 1, "files": 1, "value": 1}, "button": {"autofocus": 1, "disabled": {"disabled": 1}, "form": 1, "formaction": 1, "formenctype": 1, "formmethod": 1, "formnovalidate": 1, "formtarget": 1, "name": 1, "value": 1, "type": {"button": 1, "submit": 1}}, "select": {"autofocus": 1, "disabled": 1, "form": 1, "multiple": {"multiple": 1}, "name": 1, "size": 1, "readonly":{"readonly": 1}}, "datalist": {}, "optgroup": {"disabled": 1, "label": 1}, "option": {"disabled": 1, "selected": 1, "label": 1, "value": 1}, "textarea": {"autofocus": {"autofocus": 1}, "disabled": {"disabled": 1}, "form": 1, "maxlength": 1, "name": 1, "placeholder": 1, "readonly": {"readonly": 1}, "required": {"required": 1}, "rows": 1, "cols": 1, "wrap": {"on": 1, "off": 1, "hard": 1, "soft": 1}}, "keygen": {"autofocus": 1, "challenge": {"challenge": 1}, "disabled": {"disabled": 1}, "form": 1, "keytype": {"rsa": 1, "dsa": 1, "ec": 1}, "name": 1}, "output": {"for": 1, "form": 1, "name": 1}, "progress": {"value": 1, "max": 1}, "meter": {"value": 1, "min": 1, "max": 1, "low": 1, "high": 1, "optimum": 1}, "details": {"open": 1}, "summary": {}, "command": {"type": 1, "label": 1, "icon": 1, "disabled": 1, "checked": 1, "radiogroup": 1, "command": 1}, "menu": {"type": 1, "label": 1}, "dialog": {"open": 1} }; var elements = Object.keys(attributeMap); function is(token, type) { return token.type.lastIndexOf(type + ".xml") > -1; } function findTagName(session, pos) { var iterator = new TokenIterator(session, pos.row, pos.column); var token = iterator.getCurrentToken(); while (token && !is(token, "tag-name")){ token = iterator.stepBackward(); } if (token) return token.value; } function findAttributeName(session, pos) { var iterator = new TokenIterator(session, pos.row, pos.column); var token = iterator.getCurrentToken(); while (token && !is(token, "attribute-name")){ token = iterator.stepBackward(); } if (token) return token.value; } var HtmlCompletions = function() { }; (function() { this.getCompletions = function(state, session, pos, prefix) { var token = session.getTokenAt(pos.row, pos.column); if (!token) return []; if (is(token, "tag-name") || is(token, "tag-open") || is(token, "end-tag-open")) return this.getTagCompletions(state, session, pos, prefix); if (is(token, "tag-whitespace") || is(token, "attribute-name")) return this.getAttributeCompletions(state, session, pos, prefix); if (is(token, "attribute-value")) return this.getAttributeValueCompletions(state, session, pos, prefix); var line = session.getLine(pos.row).substr(0, pos.column); if (/&[a-z]*$/i.test(line)) return this.getHTMLEntityCompletions(state, session, pos, prefix); return []; }; this.getTagCompletions = function(state, session, pos, prefix) { return elements.map(function(element){ return { value: element, meta: "tag", score: Number.MAX_VALUE }; }); }; this.getAttributeCompletions = function(state, session, pos, prefix) { var tagName = findTagName(session, pos); if (!tagName) return []; var attributes = globalAttributes; if (tagName in attributeMap) { attributes = attributes.concat(Object.keys(attributeMap[tagName])); } return attributes.map(function(attribute){ return { caption: attribute, snippet: attribute + '="$0"', meta: "attribute", score: Number.MAX_VALUE }; }); }; this.getAttributeValueCompletions = function(state, session, pos, prefix) { var tagName = findTagName(session, pos); var attributeName = findAttributeName(session, pos); if (!tagName) return []; var values = []; if (tagName in attributeMap && attributeName in attributeMap[tagName] && typeof attributeMap[tagName][attributeName] === "object") { values = Object.keys(attributeMap[tagName][attributeName]); } return values.map(function(value){ return { caption: value, snippet: value, meta: "attribute value", score: Number.MAX_VALUE }; }); }; this.getHTMLEntityCompletions = function(state, session, pos, prefix) { var values = ['Aacute;', 'aacute;', 'Acirc;', 'acirc;', 'acute;', 'AElig;', 'aelig;', 'Agrave;', 'agrave;', 'alefsym;', 'Alpha;', 'alpha;', 'amp;', 'and;', 'ang;', 'Aring;', 'aring;', 'asymp;', 'Atilde;', 'atilde;', 'Auml;', 'auml;', 'bdquo;', 'Beta;', 'beta;', 'brvbar;', 'bull;', 'cap;', 'Ccedil;', 'ccedil;', 'cedil;', 'cent;', 'Chi;', 'chi;', 'circ;', 'clubs;', 'cong;', 'copy;', 'crarr;', 'cup;', 'curren;', 'Dagger;', 'dagger;', 'dArr;', 'darr;', 'deg;', 'Delta;', 'delta;', 'diams;', 'divide;', 'Eacute;', 'eacute;', 'Ecirc;', 'ecirc;', 'Egrave;', 'egrave;', 'empty;', 'emsp;', 'ensp;', 'Epsilon;', 'epsilon;', 'equiv;', 'Eta;', 'eta;', 'ETH;', 'eth;', 'Euml;', 'euml;', 'euro;', 'exist;', 'fnof;', 'forall;', 'frac12;', 'frac14;', 'frac34;', 'frasl;', 'Gamma;', 'gamma;', 'ge;', 'gt;', 'hArr;', 'harr;', 'hearts;', 'hellip;', 'Iacute;', 'iacute;', 'Icirc;', 'icirc;', 'iexcl;', 'Igrave;', 'igrave;', 'image;', 'infin;', 'int;', 'Iota;', 'iota;', 'iquest;', 'isin;', 'Iuml;', 'iuml;', 'Kappa;', 'kappa;', 'Lambda;', 'lambda;', 'lang;', 'laquo;', 'lArr;', 'larr;', 'lceil;', 'ldquo;', 'le;', 'lfloor;', 'lowast;', 'loz;', 'lrm;', 'lsaquo;', 'lsquo;', 'lt;', 'macr;', 'mdash;', 'micro;', 'middot;', 'minus;', 'Mu;', 'mu;', 'nabla;', 'nbsp;', 'ndash;', 'ne;', 'ni;', 'not;', 'notin;', 'nsub;', 'Ntilde;', 'ntilde;', 'Nu;', 'nu;', 'Oacute;', 'oacute;', 'Ocirc;', 'ocirc;', 'OElig;', 'oelig;', 'Ograve;', 'ograve;', 'oline;', 'Omega;', 'omega;', 'Omicron;', 'omicron;', 'oplus;', 'or;', 'ordf;', 'ordm;', 'Oslash;', 'oslash;', 'Otilde;', 'otilde;', 'otimes;', 'Ouml;', 'ouml;', 'para;', 'part;', 'permil;', 'perp;', 'Phi;', 'phi;', 'Pi;', 'pi;', 'piv;', 'plusmn;', 'pound;', 'Prime;', 'prime;', 'prod;', 'prop;', 'Psi;', 'psi;', 'quot;', 'radic;', 'rang;', 'raquo;', 'rArr;', 'rarr;', 'rceil;', 'rdquo;', 'real;', 'reg;', 'rfloor;', 'Rho;', 'rho;', 'rlm;', 'rsaquo;', 'rsquo;', 'sbquo;', 'Scaron;', 'scaron;', 'sdot;', 'sect;', 'shy;', 'Sigma;', 'sigma;', 'sigmaf;', 'sim;', 'spades;', 'sub;', 'sube;', 'sum;', 'sup;', 'sup1;', 'sup2;', 'sup3;', 'supe;', 'szlig;', 'Tau;', 'tau;', 'there4;', 'Theta;', 'theta;', 'thetasym;', 'thinsp;', 'THORN;', 'thorn;', 'tilde;', 'times;', 'trade;', 'Uacute;', 'uacute;', 'uArr;', 'uarr;', 'Ucirc;', 'ucirc;', 'Ugrave;', 'ugrave;', 'uml;', 'upsih;', 'Upsilon;', 'upsilon;', 'Uuml;', 'uuml;', 'weierp;', 'Xi;', 'xi;', 'Yacute;', 'yacute;', 'yen;', 'Yuml;', 'yuml;', 'Zeta;', 'zeta;', 'zwj;', 'zwnj;']; return values.map(function(value){ return { caption: value, snippet: value, meta: "html entity", score: Number.MAX_VALUE }; }); }; }).call(HtmlCompletions.prototype); exports.HtmlCompletions = HtmlCompletions; }); define("ace/mode/html",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/mode/text","ace/mode/javascript","ace/mode/css","ace/mode/html_highlight_rules","ace/mode/behaviour/xml","ace/mode/folding/html","ace/mode/html_completions","ace/worker/worker_client"], function(require, exports, module) { "use strict"; var oop = require("../lib/oop"); var lang = require("../lib/lang"); var TextMode = require("./text").Mode; var JavaScriptMode = require("./javascript").Mode; var CssMode = require("./css").Mode; var HtmlHighlightRules = require("./html_highlight_rules").HtmlHighlightRules; var XmlBehaviour = require("./behaviour/xml").XmlBehaviour; var HtmlFoldMode = require("./folding/html").FoldMode; var HtmlCompletions = require("./html_completions").HtmlCompletions; var WorkerClient = require("../worker/worker_client").WorkerClient; var voidElements = ["area", "base", "br", "col", "embed", "hr", "img", "input", "keygen", "link", "meta", "menuitem", "param", "source", "track", "wbr"]; var optionalEndTags = ["li", "dt", "dd", "p", "rt", "rp", "optgroup", "option", "colgroup", "td", "th"]; var Mode = function(options) { this.fragmentContext = options && options.fragmentContext; this.HighlightRules = HtmlHighlightRules; this.$behaviour = new XmlBehaviour(); this.$completer = new HtmlCompletions(); this.createModeDelegates({ "js-": JavaScriptMode, "css-": CssMode }); this.foldingRules = new HtmlFoldMode(this.voidElements, lang.arrayToMap(optionalEndTags)); }; oop.inherits(Mode, TextMode); (function() { this.blockComment = {start: "<!--", end: "-->"}; this.voidElements = lang.arrayToMap(voidElements); this.getNextLineIndent = function(state, line, tab) { return this.$getIndent(line); }; this.checkOutdent = function(state, line, input) { return false; }; this.getCompletions = function(state, session, pos, prefix) { return this.$completer.getCompletions(state, session, pos, prefix); }; this.createWorker = function(session) { if (this.constructor != Mode) return; var worker = new WorkerClient(["ace"], "ace/mode/html_worker", "Worker"); worker.attachToDocument(session.getDocument()); if (this.fragmentContext) worker.call("setOptions", [{context: this.fragmentContext}]); worker.on("error", function(e) { session.setAnnotations(e.data); }); worker.on("terminate", function() { session.clearAnnotations(); }); return worker; }; this.$id = "ace/mode/html"; }).call(Mode.prototype); exports.Mode = Mode; }); define("ace/mode/curly_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/html_highlight_rules"], function(require, exports, module) { "use strict"; var oop = require("../lib/oop"); var HtmlHighlightRules = require("./html_highlight_rules").HtmlHighlightRules; var CurlyHighlightRules = function() { HtmlHighlightRules.call(this); this.$rules["start"].unshift({ token: "variable", regex: "{{", push: "curly-start" }); this.$rules["curly-start"] = [{ token: "variable", regex: "}}", next: "pop" }]; this.normalizeRules(); }; oop.inherits(CurlyHighlightRules, HtmlHighlightRules); exports.CurlyHighlightRules = CurlyHighlightRules; }); define("ace/mode/curly",["require","exports","module","ace/lib/oop","ace/mode/html","ace/mode/matching_brace_outdent","ace/mode/folding/html","ace/mode/curly_highlight_rules"], function(require, exports, module) { "use strict"; var oop = require("../lib/oop"); var HtmlMode = require("./html").Mode; var MatchingBraceOutdent = require("./matching_brace_outdent").MatchingBraceOutdent; var HtmlFoldMode = require("./folding/html").FoldMode; var CurlyHighlightRules = require("./curly_highlight_rules").CurlyHighlightRules; var Mode = function() { HtmlMode.call(this); this.HighlightRules = CurlyHighlightRules; this.$outdent = new MatchingBraceOutdent(); this.foldingRules = new HtmlFoldMode(); }; oop.inherits(Mode, HtmlMode); (function() { this.$id = "ace/mode/curly"; }).call(Mode.prototype); exports.Mode = Mode; });
jtrachtenberg/vhscollector
libraries/ace-builds-master/src/mode-curly.js
JavaScript
gpl-2.0
102,131
/* * Copyright (C) 2005-2013 Team XBMC * http://xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "OggCallback.h" #include "FileItem.h" COggCallback::COggCallback(XFILE::CFile& file) : m_file(file) { } ov_callbacks COggCallback::Get(const CStdString& strFile) { // libvorbis requires that a non-seekable stream would always return -1 from seek actions. // so for network streams - tweak the seek method to a static one that always return -1. CFileItem item(strFile, false); ov_callbacks oggIOCallbacks; oggIOCallbacks.read_func=ReadCallback; oggIOCallbacks.seek_func=item.IsInternetStream()?NoSeekCallback:SeekCallback; oggIOCallbacks.tell_func=TellCallback; oggIOCallbacks.close_func=CloseCallback; return oggIOCallbacks; } size_t COggCallback::ReadCallback(void *ptr, size_t size, size_t nmemb, void *datasource) { COggCallback* pCallback=(COggCallback*)datasource; if (!pCallback) return 0; return pCallback->m_file.Read(ptr, size*nmemb); } int COggCallback::SeekCallback(void *datasource, ogg_int64_t offset, int whence) { COggCallback* pCallback=(COggCallback*)datasource; if (!pCallback) return 0; return (int)pCallback->m_file.Seek(offset, whence); } int COggCallback::NoSeekCallback(void *datasource, ogg_int64_t offset, int whence) { return -1; } int COggCallback::CloseCallback(void *datasource) { COggCallback* pCallback=(COggCallback*)datasource; if (!pCallback) return 0; pCallback->m_file.Close(); return 1; } long COggCallback::TellCallback(void *datasource) { COggCallback* pCallback=(COggCallback*)datasource; if (!pCallback) return 0; return (long)pCallback->m_file.GetPosition(); }
kingvuplus/xbmc
xbmc/cores/paplayer/OggCallback.cpp
C++
gpl-2.0
2,326
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <fabien@symfony.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Symfony\Component\Validator\Constraints; use Symfony\Component\Intl\Intl; use Symfony\Component\Validator\Context\ExecutionContextInterface; use Symfony\Component\Validator\Constraint; use Symfony\Component\Validator\ConstraintValidator; use Symfony\Component\Validator\Exception\UnexpectedTypeException; /** * Validates whether a value is a valid locale code. * * @author Bernhard Schussek <bschussek@gmail.com> */ class LocaleValidator extends ConstraintValidator { /** * {@inheritdoc} */ public function validate($value, Constraint $constraint) { if (!$constraint instanceof Locale) { throw new UnexpectedTypeException($constraint, __NAMESPACE__.'\Locale'); } if (null === $value || '' === $value) { return; } if (!is_scalar($value) && !(is_object($value) && method_exists($value, '__toString'))) { throw new UnexpectedTypeException($value, 'string'); } $value = (string) $value; $locales = Intl::getLocaleBundle()->getLocaleNames(); $aliases = Intl::getLocaleBundle()->getAliases(); if (!isset($locales[$value]) && !in_array($value, $aliases)) { if ($this->context instanceof ExecutionContextInterface) { $this->context->buildViolation($constraint->message) ->setParameter('{{ value }}', $this->formatValue($value)) ->setCode(Locale::NO_SUCH_LOCALE_ERROR) ->addViolation(); } else { $this->buildViolation($constraint->message) ->setParameter('{{ value }}', $this->formatValue($value)) ->setCode(Locale::NO_SUCH_LOCALE_ERROR) ->addViolation(); } } } }
evasmidt/allsafe-drupal
vendor/symfony/validator/Constraints/LocaleValidator.php
PHP
gpl-2.0
2,051
/* =========================================================== # bootstrap-tour - v0.8.0 # http://bootstraptour.com # ============================================================== # Copyright 2012-2013 Ulrich Sossou # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. */ .tour-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1030;background-color:#000;opacity:.8}.tour-step-backdrop{position:relative;z-index:1031;background:inherit}.tour-step-background{position:absolute;z-index:1030;background:inherit;border-radius:6px}.popover[class*=tour-]{z-index:1030}.popover[class*=tour-] .popover-navigation{padding:9px 14px}.popover[class*=tour-] .popover-navigation [data-role=end]{float:right}.popover[class*=tour-] .popover-navigation [data-role=prev],.popover[class*=tour-] .popover-navigation [data-role=next],.popover[class*=tour-] .popover-navigation [data-role=end]{cursor:pointer}.popover[class*=tour-] .popover-navigation [data-role=prev].disabled,.popover[class*=tour-] .popover-navigation [data-role=next].disabled,.popover[class*=tour-] .popover-navigation [data-role=end].disabled{cursor:default}.popover[class*=tour-].orphan{position:fixed;margin-top:0}.popover[class*=tour-].orphan .arrow{display:none}
mowema/mmusica
wp-content/plugins/chronoforms/admin/chronoforms/assets/bootstrap-tour/bootstrap-tour.min.css
CSS
gpl-2.0
1,722
/* * Copyright 1990 - 1995, Julianne Frances Haugh * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Julianne F. Haugh nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY JULIE HAUGH AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL JULIE HAUGH OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "t_defines.h" #ifdef _WIN32 #include <windows.h> #include <io.h> #endif /* _WIN32 */ #ifdef HAVE_UNISTD_H #include <unistd.h> #endif /* HAVE_UNISTD_H */ #include <signal.h> #include <stdio.h> static int sig_caught; #ifdef HAVE_SIGACTION static struct sigaction sigact; #endif /*ARGSUSED*/ static RETSIGTYPE sig_catch (sig) int sig; { sig_caught = 1; } _TYPE( int ) t_getpass (buf, maxlen, prompt) char *buf; unsigned maxlen; const char *prompt; { char *cp; #ifdef _WIN32 HANDLE handle = (HANDLE) _get_osfhandle(_fileno(stdin)); DWORD mode; GetConsoleMode( handle, &mode ); SetConsoleMode( handle, mode & ~ENABLE_ECHO_INPUT ); if(fputs(prompt, stdout) == EOF || fgets(buf, maxlen, stdin) == NULL) { SetConsoleMode(handle,mode); return -1; } cp = buf + strlen(buf) - 1; if ( *cp == 0x0a ) *cp = '\0'; printf("\n"); SetConsoleMode(handle,mode); #else FILE *fp; int tty_opened = 0; #ifdef HAVE_SIGACTION struct sigaction old_sigact; #else RETSIGTYPE (*old_signal)(); #endif TERMIO new_modes; TERMIO old_modes; /* * set a flag so the SIGINT signal can be re-sent if it * is caught */ sig_caught = 0; /* * if /dev/tty can't be opened, getpass() needs to read * from stdin instead. */ if ((fp = fopen ("/dev/tty", "r")) == 0) { fp = stdin; setbuf (fp, (char *) 0); } else { tty_opened = 1; } /* * the current tty modes must be saved so they can be * restored later on. echo will be turned off, except * for the newline character (BSD has to punt on this) */ if (GTTY (fileno (fp), &new_modes)) return -1; old_modes = new_modes; #ifdef HAVE_SIGACTION sigact.sa_handler = sig_catch; (void) sigaction (SIGINT, &sigact, &old_sigact); #else old_signal = signal (SIGINT, sig_catch); #endif #ifdef USE_SGTTY new_modes.sg_flags &= ~ECHO; #else new_modes.c_iflag &= ~IGNCR; new_modes.c_iflag |= ICRNL; new_modes.c_oflag |= OPOST|ONLCR; new_modes.c_lflag &= ~(ECHO|ECHOE|ECHOK); new_modes.c_lflag |= ICANON|ECHONL; #endif if (STTY (fileno (fp), &new_modes)) goto out; /* * the prompt is output, and the response read without * echoing. the trailing newline must be removed. if * the fgets() returns an error, a NULL pointer is * returned. */ if (fputs (prompt, stdout) == EOF) goto out; (void) fflush (stdout); if (fgets (buf, maxlen, fp) == buf) { if ((cp = strchr (buf, '\n'))) *cp = '\0'; else buf[maxlen - 1] = '\0'; #ifdef USE_SGTTY putc ('\n', stdout); #endif } else buf[0] = '\0'; out: /* * the old SIGINT handler is restored after the tty * modes. then /dev/tty is closed if it was opened in * the beginning. finally, if a signal was caught it * is sent to this process for normal processing. */ if (STTY (fileno (fp), &old_modes)) { memset (buf, 0, maxlen); return -1; } #ifdef HAVE_SIGACTION (void) sigaction (SIGINT, &old_sigact, NULL); #else (void) signal (SIGINT, old_signal); #endif if (tty_opened) (void) fclose (fp); if (sig_caught) { kill (getpid (), SIGINT); memset (buf, 0, maxlen); return -1; } #endif return 0; }
ollie27/openwrt
package/network/services/ead/src/tinysrp/t_getpass.c
C
gpl-2.0
4,721
/* * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Copyright (C) 2001 IBM * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) * * Derived from "arch/i386/kernel/signal.c" * Copyright (C) 1991, 1992 Linus Torvalds * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/elf.h> #include <linux/ptrace.h> #ifdef CONFIG_PPC64 #include <linux/syscalls.h> #include <linux/compat.h> #else #include <linux/wait.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/freezer.h> #endif #include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/syscalls.h> #include <asm/sigcontext.h> #include <asm/vdso.h> #ifdef CONFIG_PPC64 #include "ppc32.h" #include <asm/unistd.h> #else #include <asm/ucontext.h> #include <asm/pgtable.h> #endif #include "signal.h" #undef DEBUG_SIG #ifdef CONFIG_PPC64 #define sys_sigsuspend compat_sys_sigsuspend #define sys_rt_sigsuspend compat_sys_rt_sigsuspend #define sys_rt_sigreturn compat_sys_rt_sigreturn #define sys_sigaction compat_sys_sigaction #define sys_swapcontext compat_sys_swapcontext #define sys_sigreturn compat_sys_sigreturn #define old_sigaction old_sigaction32 #define sigcontext sigcontext32 #define mcontext mcontext32 #define ucontext ucontext32 /* * Userspace code may pass a ucontext which doesn't include VSX added * at the end. We need to check for this case. */ #define UCONTEXTSIZEWITHOUTVSX \ (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32)) /* * Returning 0 means we return to userspace via * ret_from_except and thus restore all user * registers from *regs. This is what we need * to do when a signal has been delivered. */ #define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32)) #undef __SIGNAL_FRAMESIZE #define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32 #undef ELF_NVRREG #define ELF_NVRREG ELF_NVRREG32 /* * Functions for flipping sigsets (thanks to brain dead generic * implementation that makes things simple for little endian only) */ static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) { compat_sigset_t cset; switch (_NSIG_WORDS) { case 4: cset.sig[5] = set->sig[3] & 0xffffffffull; cset.sig[7] = set->sig[3] >> 32; case 3: cset.sig[4] = set->sig[2] & 0xffffffffull; cset.sig[5] = set->sig[2] >> 32; case 2: cset.sig[2] = set->sig[1] & 0xffffffffull; cset.sig[3] = set->sig[1] >> 32; case 1: cset.sig[0] = set->sig[0] & 0xffffffffull; cset.sig[1] = set->sig[0] >> 32; } return copy_to_user(uset, &cset, sizeof(*uset)); } static inline int get_sigset_t(sigset_t *set, const compat_sigset_t __user *uset) { compat_sigset_t s32; if (copy_from_user(&s32, uset, sizeof(*uset))) return -EFAULT; /* * Swap the 2 words of the 64-bit sigset_t (they are stored * in the "wrong" endian in 32-bit user storage). */ switch (_NSIG_WORDS) { case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32); case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32); case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32); case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); } return 0; } static inline int get_old_sigaction(struct k_sigaction *new_ka, struct old_sigaction __user *act) { compat_old_sigset_t mask; compat_uptr_t handler, restorer; if (get_user(handler, &act->sa_handler) || __get_user(restorer, &act->sa_restorer) || __get_user(new_ka->sa.sa_flags, &act->sa_flags) || __get_user(mask, &act->sa_mask)) return -EFAULT; new_ka->sa.sa_handler = compat_ptr(handler); new_ka->sa.sa_restorer = compat_ptr(restorer); siginitset(&new_ka->sa.sa_mask, mask); return 0; } #define to_user_ptr(p) ptr_to_compat(p) #define from_user_ptr(p) compat_ptr(p) static inline int save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) { elf_greg_t64 *gregs = (elf_greg_t64 *)regs; int i; WARN_ON(!FULL_REGS(regs)); for (i = 0; i <= PT_RESULT; i ++) { if (i == 14 && !FULL_REGS(regs)) i = 32; if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i])) return -EFAULT; } return 0; } static inline int restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr) { elf_greg_t64 *gregs = (elf_greg_t64 *)regs; int i; for (i = 0; i <= PT_RESULT; i++) { if ((i == PT_MSR) || (i == PT_SOFTE)) continue; if (__get_user(gregs[i], &sr->mc_gregs[i])) return -EFAULT; } return 0; } #else /* CONFIG_PPC64 */ #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set) { return copy_to_user(uset, set, sizeof(*uset)); } static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset) { return copy_from_user(set, uset, sizeof(*uset)); } static inline int get_old_sigaction(struct k_sigaction *new_ka, struct old_sigaction __user *act) { old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka->sa.sa_handler, &act->sa_handler) || __get_user(new_ka->sa.sa_restorer, &act->sa_restorer)) return -EFAULT; __get_user(new_ka->sa.sa_flags, &act->sa_flags); __get_user(mask, &act->sa_mask); siginitset(&new_ka->sa.sa_mask, mask); return 0; } #define to_user_ptr(p) ((unsigned long)(p)) #define from_user_ptr(p) ((void __user *)(p)) static inline int save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) { WARN_ON(!FULL_REGS(regs)); return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE); } static inline int restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr) { /* copy up to but not including MSR */ if (__copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t))) return -EFAULT; /* copy from orig_r3 (the word after the MSR) up to the end */ if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3], GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t))) return -EFAULT; return 0; } #endif /* CONFIG_PPC64 */ /* * Atomically swap in the new signal mask, and wait for a signal. */ long sys_sigsuspend(old_sigset_t mask) { mask &= _BLOCKABLE; spin_lock_irq(&current->sighand->siglock); current->saved_sigmask = current->blocked; siginitset(&current->blocked, mask); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_restore_sigmask(); return -ERESTARTNOHAND; } long sys_sigaction(int sig, struct old_sigaction __user *act, struct old_sigaction __user *oact) { struct k_sigaction new_ka, old_ka; int ret; #ifdef CONFIG_PPC64 if (sig < 0) sig = -sig; #endif if (act) { if (get_old_sigaction(&new_ka, act)) return -EFAULT; } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler) || __put_user(to_user_ptr(old_ka.sa.sa_restorer), &oact->sa_restorer) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; } return ret; } /* * When we have signals to deliver, we set up on the * user stack, going down from the original stack pointer: * an ABI gap of 56 words * an mcontext struct * a sigcontext struct * a gap of __SIGNAL_FRAMESIZE bytes * * Each of these things must be a multiple of 16 bytes in size. The following * structure represent all of this except the __SIGNAL_FRAMESIZE gap * */ struct sigframe { struct sigcontext sctx; /* the sigcontext */ struct mcontext mctx; /* all the register values */ /* * Programs using the rs6000/xcoff abi can save up to 19 gp * regs and 18 fp regs below sp before decrementing it. */ int abigap[56]; }; /* We use the mc_pad field for the signal return trampoline. */ #define tramp mc_pad /* * When we have rt signals to deliver, we set up on the * user stack, going down from the original stack pointer: * one rt_sigframe struct (siginfo + ucontext + ABI gap) * a gap of __SIGNAL_FRAMESIZE+16 bytes * (the +16 is to get the siginfo and ucontext in the same * positions as in older kernels). * * Each of these things must be a multiple of 16 bytes in size. * */ struct rt_sigframe { #ifdef CONFIG_PPC64 compat_siginfo_t info; #else struct siginfo info; #endif struct ucontext uc; /* * Programs using the rs6000/xcoff abi can save up to 19 gp * regs and 18 fp regs below sp before decrementing it. */ int abigap[56]; }; #ifdef CONFIG_VSX unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { double buf[ELF_NFPREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < (ELF_NFPREG - 1) ; i++) buf[i] = task->thread.TS_FPR(i); memcpy(&buf[i], &task->thread.fpscr, sizeof(double)); return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); } unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from) { double buf[ELF_NFPREG]; int i; if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) return 1; for (i = 0; i < (ELF_NFPREG - 1) ; i++) task->thread.TS_FPR(i) = buf[i]; memcpy(&task->thread.fpscr, &buf[i], sizeof(double)); return 0; } unsigned long copy_vsx_to_user(void __user *to, struct task_struct *task) { double buf[ELF_NVSRHALFREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < ELF_NVSRHALFREG; i++) buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET]; return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); } unsigned long copy_vsx_from_user(struct task_struct *task, void __user *from) { double buf[ELF_NVSRHALFREG]; int i; if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) return 1; for (i = 0; i < ELF_NVSRHALFREG ; i++) task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return 0; } #else inline unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { return __copy_to_user(to, task->thread.fpr, ELF_NFPREG * sizeof(double)); } inline unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from) { return __copy_from_user(task->thread.fpr, from, ELF_NFPREG * sizeof(double)); } #endif /* * Save the current user registers on the user stack. * We only save the altivec/spe registers if the process has used * altivec/spe instructions at some point. */ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, int sigret, int ctx_has_vsx_region) { unsigned long msr = regs->msr; /* Make sure floating point registers are stored in regs */ flush_fp_to_thread(current); /* save general registers */ if (save_general_regs(regs, frame)) return 1; #ifdef CONFIG_ALTIVEC /* save altivec registers */ if (current->thread.used_vr) { flush_altivec_to_thread(current); if (__copy_to_user(&frame->mc_vregs, current->thread.vr, ELF_NVRREG * sizeof(vector128))) return 1; /* set MSR_VEC in the saved MSR value to indicate that frame->mc_vregs contains valid data */ msr |= MSR_VEC; } /* else assert((regs->msr & MSR_VEC) == 0) */ /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. Since VSCR only contains 32 bits saved in the least * significant bits of a vector, we "cheat" and stuff VRSAVE in the * most significant bits of that same vector. --BenH */ if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) return 1; #endif /* CONFIG_ALTIVEC */ if (copy_fpr_to_user(&frame->mc_fregs, current)) return 1; #ifdef CONFIG_VSX /* * Copy VSR 0-31 upper half from thread_struct to local * buffer, then write that to userspace. Also set MSR_VSX in * the saved MSR value to indicate that frame->mc_vregs * contains valid data */ if (current->thread.used_vsr && ctx_has_vsx_region) { __giveup_vsx(current); if (copy_vsx_to_user(&frame->mc_vsregs, current)) return 1; msr |= MSR_VSX; } #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* save spe registers */ if (current->thread.used_spe) { flush_spe_to_thread(current); if (__copy_to_user(&frame->mc_vregs, current->thread.evr, ELF_NEVRREG * sizeof(u32))) return 1; /* set MSR_SPE in the saved MSR value to indicate that frame->mc_vregs contains valid data */ msr |= MSR_SPE; } /* else assert((regs->msr & MSR_SPE) == 0) */ /* We always copy to/from spefscr */ if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG)) return 1; #endif /* CONFIG_SPE */ if (__put_user(msr, &frame->mc_gregs[PT_MSR])) return 1; if (sigret) { /* Set up the sigreturn trampoline: li r0,sigret; sc */ if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) || __put_user(0x44000002UL, &frame->tramp[1])) return 1; flush_icache_range((unsigned long) &frame->tramp[0], (unsigned long) &frame->tramp[2]); } return 0; } /* * Restore the current user register values from the user stack, * (except for MSR). */ static long restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr, int sig) { long err; unsigned int save_r2 = 0; unsigned long msr; #ifdef CONFIG_VSX int i; #endif /* * restore general registers but not including MSR or SOFTE. Also * take care of keeping r2 (TLS) intact if not a signal */ if (!sig) save_r2 = (unsigned int)regs->gpr[2]; err = restore_general_regs(regs, sr); err |= __get_user(msr, &sr->mc_gregs[PT_MSR]); if (!sig) regs->gpr[2] = (unsigned long) save_r2; if (err) return 1; /* if doing signal return, restore the previous little-endian mode */ if (sig) regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); /* * Do this before updating the thread state in * current->thread.fpr/vr/evr. That way, if we get preempted * and another task grabs the FPU/Altivec/SPE, it won't be * tempted to save the current CPU state into the thread_struct * and corrupt what we are writing there. */ discard_lazy_cpu_state(); #ifdef CONFIG_ALTIVEC /* * Force the process to reload the altivec registers from * current->thread when it next does altivec instructions */ regs->msr &= ~MSR_VEC; if (msr & MSR_VEC) { /* restore altivec registers from the stack */ if (__copy_from_user(current->thread.vr, &sr->mc_vregs, sizeof(sr->mc_vregs))) return 1; } else if (current->thread.used_vr) memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128)); /* Always get VRSAVE back */ if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) return 1; #endif /* CONFIG_ALTIVEC */ if (copy_fpr_from_user(current, &sr->mc_fregs)) return 1; #ifdef CONFIG_VSX /* * Force the process to reload the VSX registers from * current->thread when it next does VSX instruction. */ regs->msr &= ~MSR_VSX; if (msr & MSR_VSX) { /* * Restore altivec registers from the stack to a local * buffer, then write this out to the thread_struct */ if (copy_vsx_from_user(current, &sr->mc_vsregs)) return 1; } else if (current->thread.used_vsr) for (i = 0; i < 32 ; i++) current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; #endif /* CONFIG_VSX */ /* * force the process to reload the FP registers from * current->thread when it next does FP instructions */ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); #ifdef CONFIG_SPE /* force the process to reload the spe registers from current->thread when it next does spe instructions */ regs->msr &= ~MSR_SPE; if (msr & MSR_SPE) { /* restore spe registers from the stack */ if (__copy_from_user(current->thread.evr, &sr->mc_vregs, ELF_NEVRREG * sizeof(u32))) return 1; } else if (current->thread.used_spe) memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); /* Always get SPEFSCR back */ if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG)) return 1; #endif /* CONFIG_SPE */ return 0; } #ifdef CONFIG_PPC64 long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act, struct sigaction32 __user *oact, size_t sigsetsize) { struct k_sigaction new_ka, old_ka; int ret; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(compat_sigset_t)) return -EINVAL; if (act) { compat_uptr_t handler; ret = get_user(handler, &act->sa_handler); new_ka.sa.sa_handler = compat_ptr(handler); ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask); ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); if (ret) return -EFAULT; } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler); ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask); ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); } return ret; } /* * Note: it is necessary to treat how as an unsigned int, with the * corresponding cast to a signed int to insure that the proper * conversion (sign extension) between the register representation * of a signed int (msr in 32-bit mode) and the register representation * of a signed int (msr in 64-bit mode) is performed. */ long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set, compat_sigset_t __user *oset, size_t sigsetsize) { sigset_t s; sigset_t __user *up; int ret; mm_segment_t old_fs = get_fs(); if (set) { if (get_sigset_t(&s, set)) return -EFAULT; } set_fs(KERNEL_DS); /* This is valid because of the set_fs() */ up = (sigset_t __user *) &s; ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL, sigsetsize); set_fs(old_fs); if (ret) return ret; if (oset) { if (put_sigset_t(oset, &s)) return -EFAULT; } return 0; } long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize) { sigset_t s; int ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); /* The __user pointer cast is valid because of the set_fs() */ ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); set_fs(old_fs); if (!ret) { if (put_sigset_t(set, &s)) return -EFAULT; } return ret; } int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s) { int err; if (!access_ok (VERIFY_WRITE, d, sizeof(*d))) return -EFAULT; /* If you change siginfo_t structure, please be sure * this code is fixed accordingly. * It should never copy any pad contained in the structure * to avoid security leaks, but must copy the generic * 3 ints plus the relevant union member. * This routine must convert siginfo from 64bit to 32bit as well * at the same time. */ err = __put_user(s->si_signo, &d->si_signo); err |= __put_user(s->si_errno, &d->si_errno); err |= __put_user((short)s->si_code, &d->si_code); if (s->si_code < 0) err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad, SI_PAD_SIZE32); else switch(s->si_code >> 16) { case __SI_CHLD >> 16: err |= __put_user(s->si_pid, &d->si_pid); err |= __put_user(s->si_uid, &d->si_uid); err |= __put_user(s->si_utime, &d->si_utime); err |= __put_user(s->si_stime, &d->si_stime); err |= __put_user(s->si_status, &d->si_status); break; case __SI_FAULT >> 16: err |= __put_user((unsigned int)(unsigned long)s->si_addr, &d->si_addr); break; case __SI_POLL >> 16: err |= __put_user(s->si_band, &d->si_band); err |= __put_user(s->si_fd, &d->si_fd); break; case __SI_TIMER >> 16: err |= __put_user(s->si_tid, &d->si_tid); err |= __put_user(s->si_overrun, &d->si_overrun); err |= __put_user(s->si_int, &d->si_int); break; case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ case __SI_MESGQ >> 16: err |= __put_user(s->si_int, &d->si_int); /* fallthrough */ case __SI_KILL >> 16: default: err |= __put_user(s->si_pid, &d->si_pid); err |= __put_user(s->si_uid, &d->si_uid); break; } return err; } #define copy_siginfo_to_user copy_siginfo_to_user32 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) { memset(to, 0, sizeof *to); if (copy_from_user(to, from, 3*sizeof(int)) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE32)) return -EFAULT; return 0; } /* * Note: it is necessary to treat pid and sig as unsigned ints, with the * corresponding cast to a signed int to insure that the proper conversion * (sign extension) between the register representation of a signed int * (msr in 32-bit mode) and the register representation of a signed int * (msr in 64-bit mode) is performed. */ long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo) { siginfo_t info; int ret; mm_segment_t old_fs = get_fs(); ret = copy_siginfo_from_user32(&info, uinfo); if (unlikely(ret)) return ret; set_fs (KERNEL_DS); /* The __user pointer cast is valid becasuse of the set_fs() */ ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info); set_fs (old_fs); return ret; } /* * Start Alternate signal stack support * * System Calls * sigaltatck compat_sys_sigaltstack */ int compat_sys_sigaltstack(u32 __new, u32 __old, int r5, int r6, int r7, int r8, struct pt_regs *regs) { stack_32_t __user * newstack = compat_ptr(__new); stack_32_t __user * oldstack = compat_ptr(__old); stack_t uss, uoss; int ret; mm_segment_t old_fs; unsigned long sp; compat_uptr_t ss_sp; /* * set sp to the user stack on entry to the system call * the system call router sets R9 to the saved registers */ sp = regs->gpr[1]; /* Put new stack info in local 64 bit stack struct */ if (newstack) { if (get_user(ss_sp, &newstack->ss_sp) || __get_user(uss.ss_flags, &newstack->ss_flags) || __get_user(uss.ss_size, &newstack->ss_size)) return -EFAULT; uss.ss_sp = compat_ptr(ss_sp); } old_fs = get_fs(); set_fs(KERNEL_DS); /* The __user pointer casts are valid because of the set_fs() */ ret = do_sigaltstack( newstack ? (stack_t __user *) &uss : NULL, oldstack ? (stack_t __user *) &uoss : NULL, sp); set_fs(old_fs); /* Copy the stack information to the user output buffer */ if (!ret && oldstack && (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) || __put_user(uoss.ss_flags, &oldstack->ss_flags) || __put_user(uoss.ss_size, &oldstack->ss_size))) return -EFAULT; return ret; } #endif /* CONFIG_PPC64 */ /* * Set up a signal frame for a "real-time" signal handler * (one which gets siginfo). */ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { struct rt_sigframe __user *rt_sf; struct mcontext __user *frame; void __user *addr; unsigned long newsp = 0; /* Set up Signal Frame */ /* Put a Real Time Context onto stack */ rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1); addr = rt_sf; if (unlikely(rt_sf == NULL)) goto badframe; /* Put the siginfo & fill in most of the ucontext */ if (copy_siginfo_to_user(&rt_sf->info, info) || __put_user(0, &rt_sf->uc.uc_flags) || __put_user(0, &rt_sf->uc.uc_link) || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp) || __put_user(sas_ss_flags(regs->gpr[1]), &rt_sf->uc.uc_stack.ss_flags) || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size) || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext), &rt_sf->uc.uc_regs) || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset)) goto badframe; /* Save user registers on the stack */ frame = &rt_sf->uc.uc_mcontext; addr = frame; if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { if (save_user_regs(regs, frame, 0, 1)) goto badframe; regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp; } else { if (save_user_regs(regs, frame, __NR_rt_sigreturn, 1)) goto badframe; regs->link = (unsigned long) frame->tramp; } current->thread.fpscr.val = 0; /* turn off all fp exceptions */ /* create a stack frame for the caller of the handler */ newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16); addr = (void __user *)regs->gpr[1]; if (put_user(regs->gpr[1], (u32 __user *)newsp)) goto badframe; /* Fill registers for signal handler */ regs->gpr[1] = newsp; regs->gpr[3] = sig; regs->gpr[4] = (unsigned long) &rt_sf->info; regs->gpr[5] = (unsigned long) &rt_sf->uc; regs->gpr[6] = (unsigned long) rt_sf; regs->nip = (unsigned long) ka->sa.sa_handler; /* enter the signal handler in big-endian mode */ regs->msr &= ~MSR_LE; regs->trap = 0; return 1; badframe: #ifdef DEBUG_SIG printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); #endif if (show_unhandled_signals && printk_ratelimit()) printk(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: " "%p nip %08lx lr %08lx\n", current->comm, current->pid, addr, regs->nip, regs->link); force_sigsegv(sig, current); return 0; } static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig) { sigset_t set; struct mcontext __user *mcp; if (get_sigset_t(&set, &ucp->uc_sigmask)) return -EFAULT; #ifdef CONFIG_PPC64 { u32 cmcp; if (__get_user(cmcp, &ucp->uc_regs)) return -EFAULT; mcp = (struct mcontext __user *)(u64)cmcp; /* no need to check access_ok(mcp), since mcp < 4GB */ } #else if (__get_user(mcp, &ucp->uc_regs)) return -EFAULT; if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp))) return -EFAULT; #endif restore_sigmask(&set); if (restore_user_regs(regs, mcp, sig)) return -EFAULT; return 0; } long sys_swapcontext(struct ucontext __user *old_ctx, struct ucontext __user *new_ctx, int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) { unsigned char tmp; int ctx_has_vsx_region = 0; #ifdef CONFIG_PPC64 unsigned long new_msr = 0; if (new_ctx) { struct mcontext __user *mcp; u32 cmcp; /* * Get pointer to the real mcontext. No need for * access_ok since we are dealing with compat * pointers. */ if (__get_user(cmcp, &new_ctx->uc_regs)) return -EFAULT; mcp = (struct mcontext __user *)(u64)cmcp; if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR])) return -EFAULT; } /* * Check that the context is not smaller than the original * size (with VMX but without VSX) */ if (ctx_size < UCONTEXTSIZEWITHOUTVSX) return -EINVAL; /* * If the new context state sets the MSR VSX bits but * it doesn't provide VSX state. */ if ((ctx_size < sizeof(struct ucontext)) && (new_msr & MSR_VSX)) return -EINVAL; /* Does the context have enough room to store VSX data? */ if (ctx_size >= sizeof(struct ucontext)) ctx_has_vsx_region = 1; #else /* Context size is for future use. Right now, we only make sure * we are passed something we understand */ if (ctx_size < sizeof(struct ucontext)) return -EINVAL; #endif if (old_ctx != NULL) { struct mcontext __user *mctx; /* * old_ctx might not be 16-byte aligned, in which * case old_ctx->uc_mcontext won't be either. * Because we have the old_ctx->uc_pad2 field * before old_ctx->uc_mcontext, we need to round down * from &old_ctx->uc_mcontext to a 16-byte boundary. */ mctx = (struct mcontext __user *) ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL); if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) || save_user_regs(regs, mctx, 0, ctx_has_vsx_region) || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked) || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs)) return -EFAULT; } if (new_ctx == NULL) return 0; if (!access_ok(VERIFY_READ, new_ctx, ctx_size) || __get_user(tmp, (u8 __user *) new_ctx) || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) return -EFAULT; /* * If we get a fault copying the context into the kernel's * image of the user's registers, we can't just return -EFAULT * because the user's registers will be corrupted. For instance * the NIP value may have been updated but not some of the * other registers. Given that we have done the access_ok * and successfully read the first and last bytes of the region * above, this should only happen in an out-of-memory situation * or if another thread unmaps the region containing the context. * We kill the task with a SIGSEGV in this situation. */ if (do_setcontext(new_ctx, regs, 0)) do_exit(SIGSEGV); set_thread_flag(TIF_RESTOREALL); return 0; } long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, struct pt_regs *regs) { struct rt_sigframe __user *rt_sf; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; rt_sf = (struct rt_sigframe __user *) (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16); if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf))) goto bad; if (do_setcontext(&rt_sf->uc, regs, 1)) goto bad; /* * It's not clear whether or why it is desirable to save the * sigaltstack setting on signal delivery and restore it on * signal return. But other architectures do this and we have * always done it up until now so it is probably better not to * change it. -- paulus */ #ifdef CONFIG_PPC64 /* * We use the compat_sys_ version that does the 32/64 bits conversion * and takes userland pointer directly. What about error checking ? * nobody does any... */ compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs); #else do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]); #endif set_thread_flag(TIF_RESTOREALL); return 0; bad: if (show_unhandled_signals && printk_ratelimit()) printk(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: " "%p nip %08lx lr %08lx\n", current->comm, current->pid, rt_sf, regs->nip, regs->link); force_sig(SIGSEGV, current); return 0; } #ifdef CONFIG_PPC32 int sys_debug_setcontext(struct ucontext __user *ctx, int ndbg, struct sig_dbg_op __user *dbg, int r6, int r7, int r8, struct pt_regs *regs) { struct sig_dbg_op op; int i; unsigned char tmp; unsigned long new_msr = regs->msr; #ifdef CONFIG_PPC_ADV_DEBUG_REGS unsigned long new_dbcr0 = current->thread.dbcr0; #endif for (i=0; i<ndbg; i++) { if (copy_from_user(&op, dbg + i, sizeof(op))) return -EFAULT; switch (op.dbg_type) { case SIG_DBG_SINGLE_STEPPING: #ifdef CONFIG_PPC_ADV_DEBUG_REGS if (op.dbg_value) { new_msr |= MSR_DE; new_dbcr0 |= (DBCR0_IDM | DBCR0_IC); } else { new_dbcr0 &= ~DBCR0_IC; if (!DBCR_ACTIVE_EVENTS(new_dbcr0, current->thread.dbcr1)) { new_msr &= ~MSR_DE; new_dbcr0 &= ~DBCR0_IDM; } } #else if (op.dbg_value) new_msr |= MSR_SE; else new_msr &= ~MSR_SE; #endif break; case SIG_DBG_BRANCH_TRACING: #ifdef CONFIG_PPC_ADV_DEBUG_REGS return -EINVAL; #else if (op.dbg_value) new_msr |= MSR_BE; else new_msr &= ~MSR_BE; #endif break; default: return -EINVAL; } } /* We wait until here to actually install the values in the registers so if we fail in the above loop, it will not affect the contents of these registers. After this point, failure is a problem, anyway, and it's very unlikely unless the user is really doing something wrong. */ regs->msr = new_msr; #ifdef CONFIG_PPC_ADV_DEBUG_REGS current->thread.dbcr0 = new_dbcr0; #endif if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)) || __get_user(tmp, (u8 __user *) ctx) || __get_user(tmp, (u8 __user *) (ctx + 1) - 1)) return -EFAULT; /* * If we get a fault copying the context into the kernel's * image of the user's registers, we can't just return -EFAULT * because the user's registers will be corrupted. For instance * the NIP value may have been updated but not some of the * other registers. Given that we have done the access_ok * and successfully read the first and last bytes of the region * above, this should only happen in an out-of-memory situation * or if another thread unmaps the region containing the context. * We kill the task with a SIGSEGV in this situation. */ if (do_setcontext(ctx, regs, 1)) { if (show_unhandled_signals && printk_ratelimit()) printk(KERN_INFO "%s[%d]: bad frame in " "sys_debug_setcontext: %p nip %08lx " "lr %08lx\n", current->comm, current->pid, ctx, regs->nip, regs->link); force_sig(SIGSEGV, current); goto out; } /* * It's not clear whether or why it is desirable to save the * sigaltstack setting on signal delivery and restore it on * signal return. But other architectures do this and we have * always done it up until now so it is probably better not to * change it. -- paulus */ do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]); set_thread_flag(TIF_RESTOREALL); out: return 0; } #endif /* * OK, we're invoking a handler */ int handle_signal32(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { struct sigcontext __user *sc; struct sigframe __user *frame; unsigned long newsp = 0; /* Set up Signal Frame */ frame = get_sigframe(ka, regs, sizeof(*frame), 1); if (unlikely(frame == NULL)) goto badframe; sc = (struct sigcontext __user *) &frame->sctx; #if _NSIG != 64 #error "Please adjust handle_signal()" #endif if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler) || __put_user(oldset->sig[0], &sc->oldmask) #ifdef CONFIG_PPC64 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3]) #else || __put_user(oldset->sig[1], &sc->_unused[3]) #endif || __put_user(to_user_ptr(&frame->mctx), &sc->regs) || __put_user(sig, &sc->signal)) goto badframe; if (vdso32_sigtramp && current->mm->context.vdso_base) { if (save_user_regs(regs, &frame->mctx, 0, 1)) goto badframe; regs->link = current->mm->context.vdso_base + vdso32_sigtramp; } else { if (save_user_regs(regs, &frame->mctx, __NR_sigreturn, 1)) goto badframe; regs->link = (unsigned long) frame->mctx.tramp; } current->thread.fpscr.val = 0; /* turn off all fp exceptions */ /* create a stack frame for the caller of the handler */ newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; if (put_user(regs->gpr[1], (u32 __user *)newsp)) goto badframe; regs->gpr[1] = newsp; regs->gpr[3] = sig; regs->gpr[4] = (unsigned long) sc; regs->nip = (unsigned long) ka->sa.sa_handler; /* enter the signal handler in big-endian mode */ regs->msr &= ~MSR_LE; regs->trap = 0; return 1; badframe: #ifdef DEBUG_SIG printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); #endif if (show_unhandled_signals && printk_ratelimit()) printk(KERN_INFO "%s[%d]: bad frame in handle_signal32: " "%p nip %08lx lr %08lx\n", current->comm, current->pid, frame, regs->nip, regs->link); force_sigsegv(sig, current); return 0; } /* * Do a signal return; undo the signal stack. */ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, struct pt_regs *regs) { struct sigcontext __user *sc; struct sigcontext sigctx; struct mcontext __user *sr; void __user *addr; sigset_t set; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE); addr = sc; if (copy_from_user(&sigctx, sc, sizeof(sigctx))) goto badframe; #ifdef CONFIG_PPC64 /* * Note that PPC32 puts the upper 32 bits of the sigmask in the * unused part of the signal stackframe */ set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32); #else set.sig[0] = sigctx.oldmask; set.sig[1] = sigctx._unused[3]; #endif restore_sigmask(&set); sr = (struct mcontext __user *)from_user_ptr(sigctx.regs); addr = sr; if (!access_ok(VERIFY_READ, sr, sizeof(*sr)) || restore_user_regs(regs, sr, 1)) goto badframe; set_thread_flag(TIF_RESTOREALL); return 0; badframe: if (show_unhandled_signals && printk_ratelimit()) printk(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: " "%p nip %08lx lr %08lx\n", current->comm, current->pid, addr, regs->nip, regs->link); force_sig(SIGSEGV, current); return 0; }
FreeOptimusProject/android_kernel_lge_p970
arch/powerpc/kernel/signal_32.c
C
gpl-2.0
36,850
/* * linux/arch/m68k/mac/config.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ /* * Miscellaneous linux stuff */ #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/console.h> #include <linux/interrupt.h> /* keyb */ #include <linux/random.h> #include <linux/delay.h> /* keyb */ #include <linux/init.h> #include <linux/vt_kern.h> #include <linux/platform_device.h> #include <linux/adb.h> #include <linux/cuda.h> #include <asm/setup.h> #include <asm/bootinfo.h> #include <asm/bootinfo-mac.h> #include <asm/byteorder.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/pgtable.h> #include <asm/rtc.h> #include <asm/machdep.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/machw.h> #include <asm/mac_iop.h> #include <asm/mac_via.h> #include <asm/mac_oss.h> #include <asm/mac_psc.h> /* Mac bootinfo struct */ struct mac_booter_data mac_bi_data; /* The phys. video addr. - might be bogus on some machines */ static unsigned long mac_orig_videoaddr; /* Mac specific timer functions */ extern u32 mac_gettimeoffset(void); extern int mac_hwclk(int, struct rtc_time *); extern int mac_set_clock_mmss(unsigned long); extern void iop_preinit(void); extern void iop_init(void); extern void via_init(void); extern void via_init_clock(irq_handler_t func); extern void via_flush_cache(void); extern void oss_init(void); extern void psc_init(void); extern void baboon_init(void); extern void mac_mksound(unsigned int, unsigned int); static void mac_get_model(char *str); static void mac_identify(void); static void mac_report_hardware(void); static void __init mac_sched_init(irq_handler_t vector) { via_init_clock(vector); } /* * Parse a Macintosh-specific record in the bootinfo */ int __init mac_parse_bootinfo(const struct bi_record *record) { int unknown = 0; const void *data = record->data; switch (be16_to_cpu(record->tag)) { case BI_MAC_MODEL: mac_bi_data.id = be32_to_cpup(data); break; case BI_MAC_VADDR: mac_bi_data.videoaddr = be32_to_cpup(data); break; case BI_MAC_VDEPTH: mac_bi_data.videodepth = be32_to_cpup(data); break; case BI_MAC_VROW: mac_bi_data.videorow = be32_to_cpup(data); break; case BI_MAC_VDIM: mac_bi_data.dimensions = be32_to_cpup(data); break; case BI_MAC_VLOGICAL: mac_orig_videoaddr = be32_to_cpup(data); mac_bi_data.videological = VIDEOMEMBASE + (mac_orig_videoaddr & ~VIDEOMEMMASK); break; case BI_MAC_SCCBASE: mac_bi_data.sccbase = be32_to_cpup(data); break; case BI_MAC_BTIME: mac_bi_data.boottime = be32_to_cpup(data); break; case BI_MAC_GMTBIAS: mac_bi_data.gmtbias = be32_to_cpup(data); break; case BI_MAC_MEMSIZE: mac_bi_data.memsize = be32_to_cpup(data); break; case BI_MAC_CPUID: mac_bi_data.cpuid = be32_to_cpup(data); break; case BI_MAC_ROMBASE: mac_bi_data.rombase = be32_to_cpup(data); break; default: unknown = 1; break; } return unknown; } /* * Flip into 24bit mode for an instant - flushes the L2 cache card. We * have to disable interrupts for this. Our IRQ handlers will crap * themselves if they take an IRQ in 24bit mode! */ static void mac_cache_card_flush(int writeback) { unsigned long flags; local_irq_save(flags); via_flush_cache(); local_irq_restore(flags); } void __init config_mac(void) { if (!MACH_IS_MAC) printk(KERN_ERR "ERROR: no Mac, but config_mac() called!!\n"); mach_sched_init = mac_sched_init; mach_init_IRQ = mac_init_IRQ; mach_get_model = mac_get_model; arch_gettimeoffset = mac_gettimeoffset; mach_hwclk = mac_hwclk; mach_set_clock_mmss = mac_set_clock_mmss; mach_reset = mac_reset; mach_halt = mac_poweroff; mach_power_off = mac_poweroff; mach_max_dma_address = 0xffffffff; #if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE) mach_beep = mac_mksound; #endif /* * Determine hardware present */ mac_identify(); mac_report_hardware(); /* * AFAIK only the IIci takes a cache card. The IIfx has onboard * cache ... someone needs to figure out how to tell if it's on or * not. */ if (macintosh_config->ident == MAC_MODEL_IICI || macintosh_config->ident == MAC_MODEL_IIFX) mach_l2_flush = mac_cache_card_flush; } /* * Macintosh Table: hardcoded model configuration data. * * Much of this was defined by Alan, based on who knows what docs. * I've added a lot more, and some of that was pure guesswork based * on hardware pages present on the Mac web site. Possibly wildly * inaccurate, so look here if a new Mac model won't run. Example: if * a Mac crashes immediately after the VIA1 registers have been dumped * to the screen, it probably died attempting to read DirB on a RBV. * Meaning it should have MAC_VIA_IICI here :-) */ struct mac_model *macintosh_config; EXPORT_SYMBOL(macintosh_config); static struct mac_model mac_data_table[] = { /* * We'll pretend to be a Macintosh II, that's pretty safe. */ { .ident = MAC_MODEL_II, .name = "Unknown", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_II, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_IWM, }, /* * Original Mac II hardware */ { .ident = MAC_MODEL_II, .name = "II", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_II, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_IWM, }, { .ident = MAC_MODEL_IIX, .name = "IIx", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_II, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_IICX, .name = "IIcx", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_II, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_SE30, .name = "SE/30", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_II, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* * Weirdified Mac II hardware - all subtly different. Gee thanks * Apple. All these boxes seem to have VIA2 in a different place to * the Mac II (+1A000 rather than +4000) * CSA: see http://developer.apple.com/technotes/hw/hw_09.html */ { .ident = MAC_MODEL_IICI, .name = "IIci", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_IIFX, .name = "IIfx", .adb_type = MAC_ADB_IOP, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_IIFX, .scc_type = MAC_SCC_IOP, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_IOP, }, { .ident = MAC_MODEL_IISI, .name = "IIsi", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_IIVI, .name = "IIvi", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_LC, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_IIVX, .name = "IIvx", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_LC, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* * Classic models (guessing: similar to SE/30? Nope, similar to LC...) */ { .ident = MAC_MODEL_CLII, .name = "Classic II", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_LC, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_CCL, .name = "Color Classic", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_LC, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_CCLII, .name = "Color Classic II", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_LC, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* * Some Mac LC machines. Basically the same as the IIci, ADB like IIsi */ { .ident = MAC_MODEL_LC, .name = "LC", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_LC, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_LCII, .name = "LC II", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_LC, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_LCIII, .name = "LC III", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_LC, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* * Quadra. Video is at 0xF9000000, via is like a MacII. We label it * differently as some of the stuff connected to VIA2 seems different. * Better SCSI chip and onboard ethernet using a NatSemi SONIC except * the 660AV and 840AV which use an AMD 79C940 (MACE). * The 700, 900 and 950 have some I/O chips in the wrong place to * confuse us. The 840AV has a SCSI location of its own (same as * the 660AV). */ { .ident = MAC_MODEL_Q605, .name = "Quadra 605", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_Q605_ACC, .name = "Quadra 605", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_Q610, .name = "Quadra 610", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_Q630, .name = "Quadra 630", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .ide_type = MAC_IDE_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_Q650, .name = "Quadra 650", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, /* The Q700 does have a NS Sonic */ { .ident = MAC_MODEL_Q700, .name = "Quadra 700", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA2, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_Q800, .name = "Quadra 800", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_Q840, .name = "Quadra 840AV", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA3, .scc_type = MAC_SCC_PSC, .ether_type = MAC_ETHER_MACE, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_AV, }, { .ident = MAC_MODEL_Q900, .name = "Quadra 900", .adb_type = MAC_ADB_IOP, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA2, .scc_type = MAC_SCC_IOP, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_IOP, }, { .ident = MAC_MODEL_Q950, .name = "Quadra 950", .adb_type = MAC_ADB_IOP, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA2, .scc_type = MAC_SCC_IOP, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_IOP, }, /* * Performa - more LC type machines */ { .ident = MAC_MODEL_P460, .name = "Performa 460", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_LC, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_P475, .name = "Performa 475", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_P475F, .name = "Performa 475", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_P520, .name = "Performa 520", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_LC, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_P550, .name = "Performa 550", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_LC, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* These have the comm slot, and therefore possibly SONIC ethernet */ { .ident = MAC_MODEL_P575, .name = "Performa 575", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_II, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_P588, .name = "Performa 588", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .ide_type = MAC_IDE_QUADRA, .scc_type = MAC_SCC_II, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_TV, .name = "TV", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_LC, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_P600, .name = "Performa 600", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_LC, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* * Centris - just guessing again; maybe like Quadra. * The C610 may or may not have SONIC. We probe to make sure. */ { .ident = MAC_MODEL_C610, .name = "Centris 610", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_C650, .name = "Centris 650", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_C660, .name = "Centris 660AV", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA3, .scc_type = MAC_SCC_PSC, .ether_type = MAC_ETHER_MACE, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_AV, }, /* * The PowerBooks all the same "Combo" custom IC for SCSI and SCC * and a PMU (in two variations?) for ADB. Most of them use the * Quadra-style VIAs. A few models also have IDE from hell. */ { .ident = MAC_MODEL_PB140, .name = "PowerBook 140", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB145, .name = "PowerBook 145", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB150, .name = "PowerBook 150", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .ide_type = MAC_IDE_PB, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB160, .name = "PowerBook 160", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB165, .name = "PowerBook 165", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB165C, .name = "PowerBook 165c", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB170, .name = "PowerBook 170", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB180, .name = "PowerBook 180", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB180C, .name = "PowerBook 180c", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB190, .name = "PowerBook 190", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_LATE, .ide_type = MAC_IDE_BABOON, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB520, .name = "PowerBook 520", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_LATE, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* * PowerBook Duos are pretty much like normal PowerBooks * All of these probably have onboard SONIC in the Dock which * means we'll have to probe for it eventually. */ { .ident = MAC_MODEL_PB210, .name = "PowerBook Duo 210", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_DUO, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB230, .name = "PowerBook Duo 230", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_DUO, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB250, .name = "PowerBook Duo 250", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_DUO, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB270C, .name = "PowerBook Duo 270c", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_DUO, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB280, .name = "PowerBook Duo 280", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_DUO, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB280C, .name = "PowerBook Duo 280c", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_DUO, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* * Other stuff? */ { .ident = -1 } }; static struct resource scc_a_rsrcs[] = { { .flags = IORESOURCE_MEM }, { .flags = IORESOURCE_IRQ }, }; static struct resource scc_b_rsrcs[] = { { .flags = IORESOURCE_MEM }, { .flags = IORESOURCE_IRQ }, }; struct platform_device scc_a_pdev = { .name = "scc", .id = 0, .num_resources = ARRAY_SIZE(scc_a_rsrcs), .resource = scc_a_rsrcs, }; EXPORT_SYMBOL(scc_a_pdev); struct platform_device scc_b_pdev = { .name = "scc", .id = 1, .num_resources = ARRAY_SIZE(scc_b_rsrcs), .resource = scc_b_rsrcs, }; EXPORT_SYMBOL(scc_b_pdev); static void __init mac_identify(void) { struct mac_model *m; /* Penguin data useful? */ int model = mac_bi_data.id; if (!model) { /* no bootinfo model id -> NetBSD booter was used! */ /* XXX FIXME: breaks for model > 31 */ model = (mac_bi_data.cpuid >> 2) & 63; printk(KERN_WARNING "No bootinfo model ID, using cpuid instead " "(obsolete bootloader?)\n"); } macintosh_config = mac_data_table; for (m = macintosh_config; m->ident != -1; m++) { if (m->ident == model) { macintosh_config = m; break; } } /* Set up serial port resources for the console initcall. */ scc_a_rsrcs[0].start = (resource_size_t) mac_bi_data.sccbase + 2; scc_a_rsrcs[0].end = scc_a_rsrcs[0].start; scc_b_rsrcs[0].start = (resource_size_t) mac_bi_data.sccbase; scc_b_rsrcs[0].end = scc_b_rsrcs[0].start; switch (macintosh_config->scc_type) { case MAC_SCC_PSC: scc_a_rsrcs[1].start = scc_a_rsrcs[1].end = IRQ_MAC_SCC_A; scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_MAC_SCC_B; break; default: /* On non-PSC machines, the serial ports share an IRQ. */ if (macintosh_config->ident == MAC_MODEL_IIFX) { scc_a_rsrcs[1].start = scc_a_rsrcs[1].end = IRQ_MAC_SCC; scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_MAC_SCC; } else { scc_a_rsrcs[1].start = scc_a_rsrcs[1].end = IRQ_AUTO_4; scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_AUTO_4; } break; } /* * We need to pre-init the IOPs, if any. Otherwise * the serial console won't work if the user had * the serial ports set to "Faster" mode in MacOS. */ iop_preinit(); printk(KERN_INFO "Detected Macintosh model: %d\n", model); /* * Report booter data: */ printk(KERN_DEBUG " Penguin bootinfo data:\n"); printk(KERN_DEBUG " Video: addr 0x%lx " "row 0x%lx depth %lx dimensions %ld x %ld\n", mac_bi_data.videoaddr, mac_bi_data.videorow, mac_bi_data.videodepth, mac_bi_data.dimensions & 0xFFFF, mac_bi_data.dimensions >> 16); printk(KERN_DEBUG " Videological 0x%lx phys. 0x%lx, SCC at 0x%lx\n", mac_bi_data.videological, mac_orig_videoaddr, mac_bi_data.sccbase); printk(KERN_DEBUG " Boottime: 0x%lx GMTBias: 0x%lx\n", mac_bi_data.boottime, mac_bi_data.gmtbias); printk(KERN_DEBUG " Machine ID: %ld CPUid: 0x%lx memory size: 0x%lx\n", mac_bi_data.id, mac_bi_data.cpuid, mac_bi_data.memsize); iop_init(); via_init(); oss_init(); psc_init(); baboon_init(); #ifdef CONFIG_ADB_CUDA find_via_cuda(); #endif } static void __init mac_report_hardware(void) { printk(KERN_INFO "Apple Macintosh %s\n", macintosh_config->name); } static void mac_get_model(char *str) { strcpy(str, "Macintosh "); strcat(str, macintosh_config->name); } static struct resource swim_rsrc = { .flags = IORESOURCE_MEM }; static struct platform_device swim_pdev = { .name = "swim", .id = -1, .num_resources = 1, .resource = &swim_rsrc, }; static const struct resource mac_scsi_iifx_rsrc[] __initconst = { { .flags = IORESOURCE_IRQ, .start = IRQ_MAC_SCSI, .end = IRQ_MAC_SCSI, }, { .flags = IORESOURCE_MEM, .start = 0x50008000, .end = 0x50009FFF, }, }; static const struct resource mac_scsi_duo_rsrc[] __initconst = { { .flags = IORESOURCE_MEM, .start = 0xFEE02000, .end = 0xFEE03FFF, }, }; static const struct resource mac_scsi_old_rsrc[] __initconst = { { .flags = IORESOURCE_IRQ, .start = IRQ_MAC_SCSI, .end = IRQ_MAC_SCSI, }, { .flags = IORESOURCE_MEM, .start = 0x50010000, .end = 0x50011FFF, }, { .flags = IORESOURCE_MEM, .start = 0x50006000, .end = 0x50007FFF, }, }; static const struct resource mac_scsi_late_rsrc[] __initconst = { { .flags = IORESOURCE_IRQ, .start = IRQ_MAC_SCSI, .end = IRQ_MAC_SCSI, }, { .flags = IORESOURCE_MEM, .start = 0x50010000, .end = 0x50011FFF, }, }; static const struct resource mac_scsi_ccl_rsrc[] __initconst = { { .flags = IORESOURCE_IRQ, .start = IRQ_MAC_SCSI, .end = IRQ_MAC_SCSI, }, { .flags = IORESOURCE_MEM, .start = 0x50F10000, .end = 0x50F11FFF, }, { .flags = IORESOURCE_MEM, .start = 0x50F06000, .end = 0x50F07FFF, }, }; static struct platform_device esp_0_pdev = { .name = "mac_esp", .id = 0, }; static struct platform_device esp_1_pdev = { .name = "mac_esp", .id = 1, }; static struct platform_device sonic_pdev = { .name = "macsonic", .id = -1, }; static struct platform_device mace_pdev = { .name = "macmace", .id = -1, }; int __init mac_platform_init(void) { u8 *swim_base; if (!MACH_IS_MAC) return -ENODEV; /* * Serial devices */ platform_device_register(&scc_a_pdev); platform_device_register(&scc_b_pdev); /* * Floppy device */ switch (macintosh_config->floppy_type) { case MAC_FLOPPY_SWIM_ADDR1: swim_base = (u8 *)(VIA1_BASE + 0x1E000); break; case MAC_FLOPPY_SWIM_ADDR2: swim_base = (u8 *)(VIA1_BASE + 0x16000); break; default: swim_base = NULL; break; } if (swim_base) { swim_rsrc.start = (resource_size_t) swim_base, swim_rsrc.end = (resource_size_t) swim_base + 0x2000, platform_device_register(&swim_pdev); } /* * SCSI device(s) */ switch (macintosh_config->scsi_type) { case MAC_SCSI_QUADRA: case MAC_SCSI_QUADRA3: platform_device_register(&esp_0_pdev); break; case MAC_SCSI_QUADRA2: platform_device_register(&esp_0_pdev); if ((macintosh_config->ident == MAC_MODEL_Q900) || (macintosh_config->ident == MAC_MODEL_Q950)) platform_device_register(&esp_1_pdev); break; case MAC_SCSI_IIFX: /* Addresses from The Guide to Mac Family Hardware. * $5000 8000 - $5000 9FFF: SCSI DMA * $5000 C000 - $5000 DFFF: Alternate SCSI (DMA) * $5000 E000 - $5000 FFFF: Alternate SCSI (Hsk) * The SCSI DMA custom IC embeds the 53C80 core. mac_scsi does * not make use of its DMA or hardware handshaking logic. */ platform_device_register_simple("mac_scsi", 0, mac_scsi_iifx_rsrc, ARRAY_SIZE(mac_scsi_iifx_rsrc)); break; case MAC_SCSI_DUO: /* Addresses from the Duo Dock II Developer Note. * $FEE0 2000 - $FEE0 3FFF: normal mode * $FEE0 4000 - $FEE0 5FFF: pseudo DMA without /DRQ * $FEE0 6000 - $FEE0 7FFF: pseudo DMA with /DRQ * The NetBSD code indicates that both 5380 chips share * an IRQ (?) which would need careful handling (see mac_esp). */ platform_device_register_simple("mac_scsi", 1, mac_scsi_duo_rsrc, ARRAY_SIZE(mac_scsi_duo_rsrc)); /* fall through */ case MAC_SCSI_OLD: /* Addresses from Developer Notes for Duo System, * PowerBook 180 & 160, 140 & 170, Macintosh IIsi * and also from The Guide to Mac Family Hardware for * SE/30, II, IIx, IIcx, IIci. * $5000 6000 - $5000 7FFF: pseudo-DMA with /DRQ * $5001 0000 - $5001 1FFF: normal mode * $5001 2000 - $5001 3FFF: pseudo-DMA without /DRQ * GMFH says that $5000 0000 - $50FF FFFF "wraps * $5000 0000 - $5001 FFFF eight times" (!) * mess.org says IIci and Color Classic do not alias * I/O address space. */ platform_device_register_simple("mac_scsi", 0, mac_scsi_old_rsrc, ARRAY_SIZE(mac_scsi_old_rsrc)); break; case MAC_SCSI_LATE: /* PDMA logic in 68040 PowerBooks is somehow different to * '030 models. It's probably more like Quadras (see mac_esp). */ platform_device_register_simple("mac_scsi", 0, mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc)); break; case MAC_SCSI_LC: /* Addresses from Mac LC data in Designing Cards & Drivers 3ed. * Also from the Developer Notes for Classic II, LC III, * Color Classic and IIvx. * $50F0 6000 - $50F0 7FFF: SCSI handshake * $50F1 0000 - $50F1 1FFF: SCSI * $50F1 2000 - $50F1 3FFF: SCSI DMA */ platform_device_register_simple("mac_scsi", 0, mac_scsi_ccl_rsrc, ARRAY_SIZE(mac_scsi_ccl_rsrc)); break; } /* * Ethernet device */ switch (macintosh_config->ether_type) { case MAC_ETHER_SONIC: platform_device_register(&sonic_pdev); break; case MAC_ETHER_MACE: platform_device_register(&mace_pdev); break; } return 0; } arch_initcall(mac_platform_init);
lucaspcamargo/litmus-rt
arch/m68k/mac/config.c
C
gpl-2.0
29,061
/* * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch> * Copyright (C) 2004 Microtronix Datacom Ltd * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/types.h> #include <linux/string.h> void *memmove(void *d, const void *s, size_t count) { unsigned long dst, src; if (!count) return d; if (d < s) { dst = (unsigned long) d; src = (unsigned long) s; if ((count < 8) || ((dst ^ src) & 3)) goto restup; if (dst & 1) { *(char *)dst++ = *(char *)src++; count--; } if (dst & 2) { *(short *)dst = *(short *)src; src += 2; dst += 2; count -= 2; } while (count > 3) { *(long *)dst = *(long *)src; src += 4; dst += 4; count -= 4; } restup: while (count--) *(char *)dst++ = *(char *)src++; } else { dst = (unsigned long) d + count; src = (unsigned long) s + count; if ((count < 8) || ((dst ^ src) & 3)) goto restdown; if (dst & 1) { src--; dst--; count--; *(char *)dst = *(char *)src; } if (dst & 2) { src -= 2; dst -= 2; count -= 2; *(short *)dst = *(short *)src; } while (count > 3) { src -= 4; dst -= 4; count -= 4; *(long *)dst = *(long *)src; } restdown: while (count--) { src--; dst--; *(char *)dst = *(char *)src; } } return d; }
jarodwilson/linux-muck
arch/nios2/lib/memmove.c
C
gpl-2.0
1,422
/* see http://davidnewton.ca/the-current-state-of-hyphenation-on-the-web http://davidnewton.ca/demos/hyphenation/test.html There are three tests: 1. csshyphens - tests hyphens:auto actually adds hyphens to text 2. softhyphens - tests that &shy; does its job 3. softhyphensfind - tests that in-browser Find functionality still works correctly with &shy; These tests currently require document.body to be present Hyphenation is language specific, sometimes. See for more details: http://code.google.com/p/hyphenator/source/diff?spec=svn975&r=975&format=side&path=/trunk/Hyphenator.js#sc_svn975_313 If loading Hyphenator.js via Modernizr.load, be cautious of issue 158: http://code.google.com/p/hyphenator/issues/detail?id=158 More details at https://github.com/Modernizr/Modernizr/issues/312 */ (function() { if (!document.body){ window.console && console.warn('document.body doesn\'t exist. Modernizr hyphens test needs it.'); return; } // functional test of adding hyphens:auto function test_hyphens_css() { try { /* create a div container and a span within that * these have to be appended to document.body, otherwise some browsers can give false negative */ var div = document.createElement('div'), span = document.createElement('span'), divStyle = div.style, spanHeight = 0, spanWidth = 0, result = false, firstChild = document.body.firstElementChild || document.body.firstChild; div.appendChild(span); span.innerHTML = 'Bacon ipsum dolor sit amet jerky velit in culpa hamburger et. Laborum dolor proident, enim dolore duis commodo et strip steak. Salami anim et, veniam consectetur dolore qui tenderloin jowl velit sirloin. Et ad culpa, fatback cillum jowl ball tip ham hock nulla short ribs pariatur aute. Pig pancetta ham bresaola, ut boudin nostrud commodo flank esse cow tongue culpa. Pork belly bresaola enim pig, ea consectetur nisi. Fugiat officia turkey, ea cow jowl pariatur ullamco proident do laborum velit sausage. Magna biltong sint tri-tip commodo sed bacon, esse proident aliquip. Ullamco ham sint fugiat, velit in enim sed mollit nulla cow ut adipisicing nostrud consectetur. Proident dolore beef ribs, laborum nostrud meatball ea laboris rump cupidatat labore culpa. Shankle minim beef, velit sint cupidatat fugiat tenderloin pig et ball tip. Ut cow fatback salami, bacon ball tip et in shank strip steak bresaola. In ut pork belly sed mollit tri-tip magna culpa veniam, short ribs qui in andouille ham consequat. Dolore bacon t-bone, velit short ribs enim strip steak nulla. Voluptate labore ut, biltong swine irure jerky. Cupidatat excepteur aliquip salami dolore. Ball tip strip steak in pork dolor. Ad in esse biltong. Dolore tenderloin exercitation ad pork loin t-bone, dolore in chicken ball tip qui pig. Ut culpa tongue, sint ribeye dolore ex shank voluptate hamburger. Jowl et tempor, boudin pork chop labore ham hock drumstick consectetur tri-tip elit swine meatball chicken ground round. Proident shankle mollit dolore. Shoulder ut duis t-bone quis reprehenderit. Meatloaf dolore minim strip steak, laboris ea aute bacon beef ribs elit shank in veniam drumstick qui. Ex laboris meatball cow tongue pork belly. Ea ball tip reprehenderit pig, sed fatback boudin dolore flank aliquip laboris eu quis. Beef ribs duis beef, cow corned beef adipisicing commodo nisi deserunt exercitation. Cillum dolor t-bone spare ribs, ham hock est sirloin. Brisket irure meatloaf in, boudin pork belly sirloin ball tip. Sirloin sint irure nisi nostrud aliqua. Nostrud nulla aute, enim officia culpa ham hock. Aliqua reprehenderit dolore sunt nostrud sausage, ea boudin pork loin ut t-bone ham tempor. Tri-tip et pancetta drumstick laborum. Ham hock magna do nostrud in proident. Ex ground round fatback, venison non ribeye in.'; document.body.insertBefore(div, firstChild); /* get size of unhyphenated text */ divStyle.cssText = 'position:absolute;top:0;left:0;width:5em;text-align:justify;text-justification:newspaper;'; spanHeight = span.offsetHeight; spanWidth = span.offsetWidth; /* compare size with hyphenated text */ divStyle.cssText = 'position:absolute;top:0;left:0;width:5em;text-align:justify;'+ 'text-justification:newspaper;'+ Modernizr._prefixes.join('hyphens:auto; '); result = (span.offsetHeight != spanHeight || span.offsetWidth != spanWidth); /* results and cleanup */ document.body.removeChild(div); div.removeChild(span); return result; } catch(e) { return false; } } // for the softhyphens test function test_hyphens(delimiter, testWidth) { try { /* create a div container and a span within that * these have to be appended to document.body, otherwise some browsers can give false negative */ var div = document.createElement('div'), span = document.createElement('span'), divStyle = div.style, spanSize = 0, result = false, result1 = false, result2 = false, firstChild = document.body.firstElementChild || document.body.firstChild; divStyle.cssText = 'position:absolute;top:0;left:0;overflow:visible;width:1.25em;'; div.appendChild(span); document.body.insertBefore(div, firstChild); /* get height of unwrapped text */ span.innerHTML = 'mm'; spanSize = span.offsetHeight; /* compare height w/ delimiter, to see if it wraps to new line */ span.innerHTML = 'm' + delimiter + 'm'; result1 = (span.offsetHeight > spanSize); /* if we're testing the width too (i.e. for soft-hyphen, not zws), * this is because tested Blackberry devices will wrap the text but not display the hyphen */ if (testWidth) { /* get width of wrapped, non-hyphenated text */ span.innerHTML = 'm<br />m'; spanSize = span.offsetWidth; /* compare width w/ wrapped w/ delimiter to see if hyphen is present */ span.innerHTML = 'm' + delimiter + 'm'; result2 = (span.offsetWidth > spanSize); } else { result2 = true; } /* results and cleanup */ if (result1 === true && result2 === true) { result = true; } document.body.removeChild(div); div.removeChild(span); return result; } catch(e) { return false; } } // testing if in-browser Find functionality will work on hyphenated text function test_hyphens_find(delimiter) { try { /* create a dummy input for resetting selection location, and a div container * these have to be appended to document.body, otherwise some browsers can give false negative * div container gets the doubled testword, separated by the delimiter * Note: giving a width to div gives false positive in iOS Safari */ var dummy = document.createElement('input'), div = document.createElement('div'), testword = 'lebowski', result = false, textrange, firstChild = document.body.firstElementChild || document.body.firstChild; div.innerHTML = testword + delimiter + testword; document.body.insertBefore(div, firstChild); document.body.insertBefore(dummy, div); /* reset the selection to the dummy input element, i.e. BEFORE the div container * stackoverflow.com/questions/499126/jquery-set-cursor-position-in-text-area */ if (dummy.setSelectionRange) { dummy.focus(); dummy.setSelectionRange(0,0); } else if (dummy.createTextRange) { textrange = dummy.createTextRange(); textrange.collapse(true); textrange.moveEnd('character', 0); textrange.moveStart('character', 0); textrange.select(); } /* try to find the doubled testword, without the delimiter */ if (window.find) { result = window.find(testword + testword); } else { try { textrange = window.self.document.body.createTextRange(); result = textrange.findText(testword + testword); } catch(e) { result = false; } } document.body.removeChild(div); document.body.removeChild(dummy); return result; } catch(e) { return false; } } Modernizr.addTest("csshyphens", function() { if (!Modernizr.testAllProps('hyphens')) return false; /* Chrome lies about its hyphens support so we need a more robust test crbug.com/107111 */ try { return test_hyphens_css(); } catch(e) { return false; } }); Modernizr.addTest("softhyphens", function() { try { // use numeric entity instead of &shy; in case it's XHTML return test_hyphens('&#173;', true) && test_hyphens('&#8203;', false); } catch(e) { return false; } }); Modernizr.addTest("softhyphensfind", function() { try { return test_hyphens_find('&#173;') && test_hyphens_find('&#8203;'); } catch(e) { return false; } }); })();
Socratacom/socrata-europe
wp-content/themes/socrata/bower_components/modernizr/feature-detects/css-hyphens.js
JavaScript
gpl-2.0
8,658
#undef BLOCKMOVE #define Z_WAKE #undef Z_EXT_CHARS_IN_BUFFER /* * This file contains the driver for the Cyclades async multiport * serial boards. * * Initially written by Randolph Bentson <bentson@grieg.seaslug.org>. * Modified and maintained by Marcio Saito <marcio@cyclades.com>. * * Copyright (C) 2007-2009 Jiri Slaby <jirislaby@gmail.com> * * Much of the design and some of the code came from serial.c * which was copyright (C) 1991, 1992 Linus Torvalds. It was * extensively rewritten by Theodore Ts'o, 8/16/92 -- 9/14/92, * and then fixed as suggested by Michael K. Johnson 12/12/92. * Converted to pci probing and cleaned up by Jiri Slaby. * */ #define CY_VERSION "2.6" /* If you need to install more boards than NR_CARDS, change the constant in the definition below. No other change is necessary to support up to eight boards. Beyond that you'll have to extend cy_isa_addresses. */ #define NR_CARDS 4 /* If the total number of ports is larger than NR_PORTS, change this constant in the definition below. No other change is necessary to support more boards/ports. */ #define NR_PORTS 256 #define ZO_V1 0 #define ZO_V2 1 #define ZE_V1 2 #define SERIAL_PARANOIA_CHECK #undef CY_DEBUG_OPEN #undef CY_DEBUG_THROTTLE #undef CY_DEBUG_OTHER #undef CY_DEBUG_IO #undef CY_DEBUG_COUNT #undef CY_DEBUG_DTR #undef CY_DEBUG_INTERRUPTS #undef CY_16Y_HACK #undef CY_ENABLE_MONITORING #undef CY_PCI_DEBUG /* * Include section */ #include <linux/module.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial.h> #include <linux/major.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/ptrace.h> #include <linux/cyclades.h> #include <linux/mm.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/bitops.h> #include <linux/firmware.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/stat.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> static void cy_send_xchar(struct tty_struct *tty, char ch); #ifndef SERIAL_XMIT_SIZE #define SERIAL_XMIT_SIZE (min(PAGE_SIZE, 4096)) #endif #define STD_COM_FLAGS (0) /* firmware stuff */ #define ZL_MAX_BLOCKS 16 #define DRIVER_VERSION 0x02010203 #define RAM_SIZE 0x80000 enum zblock_type { ZBLOCK_PRG = 0, ZBLOCK_FPGA = 1 }; struct zfile_header { char name[64]; char date[32]; char aux[32]; u32 n_config; u32 config_offset; u32 n_blocks; u32 block_offset; u32 reserved[9]; } __attribute__ ((packed)); struct zfile_config { char name[64]; u32 mailbox; u32 function; u32 n_blocks; u32 block_list[ZL_MAX_BLOCKS]; } __attribute__ ((packed)); struct zfile_block { u32 type; u32 file_offset; u32 ram_offset; u32 size; } __attribute__ ((packed)); static struct tty_driver *cy_serial_driver; #ifdef CONFIG_ISA /* This is the address lookup table. The driver will probe for Cyclom-Y/ISA boards at all addresses in here. If you want the driver to probe addresses at a different address, add it to this table. If the driver is probing some other board and causing problems, remove the offending address from this table. */ static unsigned int cy_isa_addresses[] = { 0xD0000, 0xD2000, 0xD4000, 0xD6000, 0xD8000, 0xDA000, 0xDC000, 0xDE000, 0, 0, 0, 0, 0, 0, 0, 0 }; #define NR_ISA_ADDRS ARRAY_SIZE(cy_isa_addresses) static long maddr[NR_CARDS]; static int irq[NR_CARDS]; module_param_array(maddr, long, NULL, 0); module_param_array(irq, int, NULL, 0); #endif /* CONFIG_ISA */ /* This is the per-card data structure containing address, irq, number of channels, etc. This driver supports a maximum of NR_CARDS cards. */ static struct cyclades_card cy_card[NR_CARDS]; static int cy_next_channel; /* next minor available */ /* * This is used to look up the divisor speeds and the timeouts * We're normally limited to 15 distinct baud rates. The extra * are accessed via settings in info->port.flags. * 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, * 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, * HI VHI * 20 */ static const int baud_table[] = { 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, 9600, 19200, 38400, 57600, 76800, 115200, 150000, 230400, 0 }; static const char baud_co_25[] = { /* 25 MHz clock option table */ /* value => 00 01 02 03 04 */ /* divide by 8 32 128 512 2048 */ 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x02, 0x02, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const char baud_bpr_25[] = { /* 25 MHz baud rate period table */ 0x00, 0xf5, 0xa3, 0x6f, 0x5c, 0x51, 0xf5, 0xa3, 0x51, 0xa3, 0x6d, 0x51, 0xa3, 0x51, 0xa3, 0x51, 0x36, 0x29, 0x1b, 0x15 }; static const char baud_co_60[] = { /* 60 MHz clock option table (CD1400 J) */ /* value => 00 01 02 03 04 */ /* divide by 8 32 128 512 2048 */ 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x02, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const char baud_bpr_60[] = { /* 60 MHz baud rate period table (CD1400 J) */ 0x00, 0x82, 0x21, 0xff, 0xdb, 0xc3, 0x92, 0x62, 0xc3, 0x62, 0x41, 0xc3, 0x62, 0xc3, 0x62, 0xc3, 0x82, 0x62, 0x41, 0x32, 0x21 }; static const char baud_cor3[] = { /* receive threshold */ 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x09, 0x09, 0x08, 0x08, 0x08, 0x08, 0x07, 0x07 }; /* * The Cyclades driver implements HW flow control as any serial driver. * The cyclades_port structure member rflow and the vector rflow_thr * allows us to take advantage of a special feature in the CD1400 to avoid * data loss even when the system interrupt latency is too high. These flags * are to be used only with very special applications. Setting these flags * requires the use of a special cable (DTR and RTS reversed). In the new * CD1400-based boards (rev. 6.00 or later), there is no need for special * cables. */ static const char rflow_thr[] = { /* rflow threshold */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a }; /* The Cyclom-Ye has placed the sequential chips in non-sequential * address order. This look-up table overcomes that problem. */ static const unsigned int cy_chip_offset[] = { 0x0000, 0x0400, 0x0800, 0x0C00, 0x0200, 0x0600, 0x0A00, 0x0E00 }; /* PCI related definitions */ #ifdef CONFIG_PCI static const struct pci_device_id cy_pci_dev_id[] = { /* PCI < 1Mb */ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Lo) }, /* PCI > 1Mb */ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Hi) }, /* 4Y PCI < 1Mb */ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Lo) }, /* 4Y PCI > 1Mb */ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Hi) }, /* 8Y PCI < 1Mb */ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Lo) }, /* 8Y PCI > 1Mb */ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Hi) }, /* Z PCI < 1Mb */ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Lo) }, /* Z PCI > 1Mb */ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Hi) }, { } /* end of table */ }; MODULE_DEVICE_TABLE(pci, cy_pci_dev_id); #endif static void cy_start(struct tty_struct *); static void cy_set_line_char(struct cyclades_port *, struct tty_struct *); static int cyz_issue_cmd(struct cyclades_card *, __u32, __u8, __u32); #ifdef CONFIG_ISA static unsigned detect_isa_irq(void __iomem *); #endif /* CONFIG_ISA */ #ifndef CONFIG_CYZ_INTR static void cyz_poll(unsigned long); /* The Cyclades-Z polling cycle is defined by this variable */ static long cyz_polling_cycle = CZ_DEF_POLL; static DEFINE_TIMER(cyz_timerlist, cyz_poll, 0, 0); #else /* CONFIG_CYZ_INTR */ static void cyz_rx_restart(unsigned long); static struct timer_list cyz_rx_full_timer[NR_PORTS]; #endif /* CONFIG_CYZ_INTR */ static inline void cyy_writeb(struct cyclades_port *port, u32 reg, u8 val) { struct cyclades_card *card = port->card; cy_writeb(port->u.cyy.base_addr + (reg << card->bus_index), val); } static inline u8 cyy_readb(struct cyclades_port *port, u32 reg) { struct cyclades_card *card = port->card; return readb(port->u.cyy.base_addr + (reg << card->bus_index)); } static inline bool cy_is_Z(struct cyclades_card *card) { return card->num_chips == (unsigned int)-1; } static inline bool __cyz_fpga_loaded(struct RUNTIME_9060 __iomem *ctl_addr) { return readl(&ctl_addr->init_ctrl) & (1 << 17); } static inline bool cyz_fpga_loaded(struct cyclades_card *card) { return __cyz_fpga_loaded(card->ctl_addr.p9060); } static inline bool cyz_is_loaded(struct cyclades_card *card) { struct FIRM_ID __iomem *fw_id = card->base_addr + ID_ADDRESS; return (card->hw_ver == ZO_V1 || cyz_fpga_loaded(card)) && readl(&fw_id->signature) == ZFIRM_ID; } static inline int serial_paranoia_check(struct cyclades_port *info, const char *name, const char *routine) { #ifdef SERIAL_PARANOIA_CHECK if (!info) { printk(KERN_WARNING "cyc Warning: null cyclades_port for (%s) " "in %s\n", name, routine); return 1; } if (info->magic != CYCLADES_MAGIC) { printk(KERN_WARNING "cyc Warning: bad magic number for serial " "struct (%s) in %s\n", name, routine); return 1; } #endif return 0; } /***********************************************************/ /********* Start of block of Cyclom-Y specific code ********/ /* This routine waits up to 1000 micro-seconds for the previous command to the Cirrus chip to complete and then issues the new command. An error is returned if the previous command didn't finish within the time limit. This function is only called from inside spinlock-protected code. */ static int __cyy_issue_cmd(void __iomem *base_addr, u8 cmd, int index) { void __iomem *ccr = base_addr + (CyCCR << index); unsigned int i; /* Check to see that the previous command has completed */ for (i = 0; i < 100; i++) { if (readb(ccr) == 0) break; udelay(10L); } /* if the CCR never cleared, the previous command didn't finish within the "reasonable time" */ if (i == 100) return -1; /* Issue the new command */ cy_writeb(ccr, cmd); return 0; } static inline int cyy_issue_cmd(struct cyclades_port *port, u8 cmd) { return __cyy_issue_cmd(port->u.cyy.base_addr, cmd, port->card->bus_index); } #ifdef CONFIG_ISA /* ISA interrupt detection code */ static unsigned detect_isa_irq(void __iomem *address) { int irq; unsigned long irqs, flags; int save_xir, save_car; int index = 0; /* IRQ probing is only for ISA */ /* forget possible initially masked and pending IRQ */ irq = probe_irq_off(probe_irq_on()); /* Clear interrupts on the board first */ cy_writeb(address + (Cy_ClrIntr << index), 0); /* Cy_ClrIntr is 0x1800 */ irqs = probe_irq_on(); /* Wait ... */ msleep(5); /* Enable the Tx interrupts on the CD1400 */ local_irq_save(flags); cy_writeb(address + (CyCAR << index), 0); __cyy_issue_cmd(address, CyCHAN_CTL | CyENB_XMTR, index); cy_writeb(address + (CyCAR << index), 0); cy_writeb(address + (CySRER << index), readb(address + (CySRER << index)) | CyTxRdy); local_irq_restore(flags); /* Wait ... */ msleep(5); /* Check which interrupt is in use */ irq = probe_irq_off(irqs); /* Clean up */ save_xir = (u_char) readb(address + (CyTIR << index)); save_car = readb(address + (CyCAR << index)); cy_writeb(address + (CyCAR << index), (save_xir & 0x3)); cy_writeb(address + (CySRER << index), readb(address + (CySRER << index)) & ~CyTxRdy); cy_writeb(address + (CyTIR << index), (save_xir & 0x3f)); cy_writeb(address + (CyCAR << index), (save_car)); cy_writeb(address + (Cy_ClrIntr << index), 0); /* Cy_ClrIntr is 0x1800 */ return (irq > 0) ? irq : 0; } #endif /* CONFIG_ISA */ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip, void __iomem *base_addr) { struct cyclades_port *info; struct tty_port *port; int len, index = cinfo->bus_index; u8 ivr, save_xir, channel, save_car, data, char_count; #ifdef CY_DEBUG_INTERRUPTS printk(KERN_DEBUG "cyy_interrupt: rcvd intr, chip %d\n", chip); #endif /* determine the channel & change to that context */ save_xir = readb(base_addr + (CyRIR << index)); channel = save_xir & CyIRChannel; info = &cinfo->ports[channel + chip * 4]; port = &info->port; save_car = cyy_readb(info, CyCAR); cyy_writeb(info, CyCAR, save_xir); ivr = cyy_readb(info, CyRIVR) & CyIVRMask; /* there is an open port for this data */ if (ivr == CyIVRRxEx) { /* exception */ data = cyy_readb(info, CyRDSR); /* For statistics only */ if (data & CyBREAK) info->icount.brk++; else if (data & CyFRAME) info->icount.frame++; else if (data & CyPARITY) info->icount.parity++; else if (data & CyOVERRUN) info->icount.overrun++; if (data & info->ignore_status_mask) { info->icount.rx++; return; } if (tty_buffer_request_room(port, 1)) { if (data & info->read_status_mask) { if (data & CyBREAK) { tty_insert_flip_char(port, cyy_readb(info, CyRDSR), TTY_BREAK); info->icount.rx++; if (port->flags & ASYNC_SAK) { struct tty_struct *tty = tty_port_tty_get(port); if (tty) { do_SAK(tty); tty_kref_put(tty); } } } else if (data & CyFRAME) { tty_insert_flip_char(port, cyy_readb(info, CyRDSR), TTY_FRAME); info->icount.rx++; info->idle_stats.frame_errs++; } else if (data & CyPARITY) { /* Pieces of seven... */ tty_insert_flip_char(port, cyy_readb(info, CyRDSR), TTY_PARITY); info->icount.rx++; info->idle_stats.parity_errs++; } else if (data & CyOVERRUN) { tty_insert_flip_char(port, 0, TTY_OVERRUN); info->icount.rx++; /* If the flip buffer itself is overflowing, we still lose the next incoming character. */ tty_insert_flip_char(port, cyy_readb(info, CyRDSR), TTY_FRAME); info->icount.rx++; info->idle_stats.overruns++; /* These two conditions may imply */ /* a normal read should be done. */ /* } else if(data & CyTIMEOUT) { */ /* } else if(data & CySPECHAR) { */ } else { tty_insert_flip_char(port, 0, TTY_NORMAL); info->icount.rx++; } } else { tty_insert_flip_char(port, 0, TTY_NORMAL); info->icount.rx++; } } else { /* there was a software buffer overrun and nothing * could be done about it!!! */ info->icount.buf_overrun++; info->idle_stats.overruns++; } } else { /* normal character reception */ /* load # chars available from the chip */ char_count = cyy_readb(info, CyRDCR); #ifdef CY_ENABLE_MONITORING ++info->mon.int_count; info->mon.char_count += char_count; if (char_count > info->mon.char_max) info->mon.char_max = char_count; info->mon.char_last = char_count; #endif len = tty_buffer_request_room(port, char_count); while (len--) { data = cyy_readb(info, CyRDSR); tty_insert_flip_char(port, data, TTY_NORMAL); info->idle_stats.recv_bytes++; info->icount.rx++; #ifdef CY_16Y_HACK udelay(10L); #endif } info->idle_stats.recv_idle = jiffies; } tty_schedule_flip(port); /* end of service */ cyy_writeb(info, CyRIR, save_xir & 0x3f); cyy_writeb(info, CyCAR, save_car); } static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip, void __iomem *base_addr) { struct cyclades_port *info; struct tty_struct *tty; int char_count, index = cinfo->bus_index; u8 save_xir, channel, save_car, outch; /* Since we only get here when the transmit buffer is empty, we know we can always stuff a dozen characters. */ #ifdef CY_DEBUG_INTERRUPTS printk(KERN_DEBUG "cyy_interrupt: xmit intr, chip %d\n", chip); #endif /* determine the channel & change to that context */ save_xir = readb(base_addr + (CyTIR << index)); channel = save_xir & CyIRChannel; save_car = readb(base_addr + (CyCAR << index)); cy_writeb(base_addr + (CyCAR << index), save_xir); info = &cinfo->ports[channel + chip * 4]; tty = tty_port_tty_get(&info->port); if (tty == NULL) { cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy); goto end; } /* load the on-chip space for outbound data */ char_count = info->xmit_fifo_size; if (info->x_char) { /* send special char */ outch = info->x_char; cyy_writeb(info, CyTDR, outch); char_count--; info->icount.tx++; info->x_char = 0; } if (info->breakon || info->breakoff) { if (info->breakon) { cyy_writeb(info, CyTDR, 0); cyy_writeb(info, CyTDR, 0x81); info->breakon = 0; char_count -= 2; } if (info->breakoff) { cyy_writeb(info, CyTDR, 0); cyy_writeb(info, CyTDR, 0x83); info->breakoff = 0; char_count -= 2; } } while (char_count-- > 0) { if (!info->xmit_cnt) { if (cyy_readb(info, CySRER) & CyTxMpty) { cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxMpty); } else { cyy_writeb(info, CySRER, CyTxMpty | (cyy_readb(info, CySRER) & ~CyTxRdy)); } goto done; } if (info->port.xmit_buf == NULL) { cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy); goto done; } if (tty->stopped || tty->hw_stopped) { cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy); goto done; } /* Because the Embedded Transmit Commands have been enabled, * we must check to see if the escape character, NULL, is being * sent. If it is, we must ensure that there is room for it to * be doubled in the output stream. Therefore we no longer * advance the pointer when the character is fetched, but * rather wait until after the check for a NULL output * character. This is necessary because there may not be room * for the two chars needed to send a NULL.) */ outch = info->port.xmit_buf[info->xmit_tail]; if (outch) { info->xmit_cnt--; info->xmit_tail = (info->xmit_tail + 1) & (SERIAL_XMIT_SIZE - 1); cyy_writeb(info, CyTDR, outch); info->icount.tx++; } else { if (char_count > 1) { info->xmit_cnt--; info->xmit_tail = (info->xmit_tail + 1) & (SERIAL_XMIT_SIZE - 1); cyy_writeb(info, CyTDR, outch); cyy_writeb(info, CyTDR, 0); info->icount.tx++; char_count--; } } } done: tty_wakeup(tty); tty_kref_put(tty); end: /* end of service */ cyy_writeb(info, CyTIR, save_xir & 0x3f); cyy_writeb(info, CyCAR, save_car); } static void cyy_chip_modem(struct cyclades_card *cinfo, int chip, void __iomem *base_addr) { struct cyclades_port *info; struct tty_struct *tty; int index = cinfo->bus_index; u8 save_xir, channel, save_car, mdm_change, mdm_status; /* determine the channel & change to that context */ save_xir = readb(base_addr + (CyMIR << index)); channel = save_xir & CyIRChannel; info = &cinfo->ports[channel + chip * 4]; save_car = cyy_readb(info, CyCAR); cyy_writeb(info, CyCAR, save_xir); mdm_change = cyy_readb(info, CyMISR); mdm_status = cyy_readb(info, CyMSVR1); tty = tty_port_tty_get(&info->port); if (!tty) goto end; if (mdm_change & CyANY_DELTA) { /* For statistics only */ if (mdm_change & CyDCD) info->icount.dcd++; if (mdm_change & CyCTS) info->icount.cts++; if (mdm_change & CyDSR) info->icount.dsr++; if (mdm_change & CyRI) info->icount.rng++; wake_up_interruptible(&info->port.delta_msr_wait); } if ((mdm_change & CyDCD) && (info->port.flags & ASYNC_CHECK_CD)) { if (mdm_status & CyDCD) wake_up_interruptible(&info->port.open_wait); else tty_hangup(tty); } if ((mdm_change & CyCTS) && tty_port_cts_enabled(&info->port)) { if (tty->hw_stopped) { if (mdm_status & CyCTS) { /* cy_start isn't used because... !!! */ tty->hw_stopped = 0; cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyTxRdy); tty_wakeup(tty); } } else { if (!(mdm_status & CyCTS)) { /* cy_stop isn't used because ... !!! */ tty->hw_stopped = 1; cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy); } } } /* if (mdm_change & CyDSR) { } if (mdm_change & CyRI) { }*/ tty_kref_put(tty); end: /* end of service */ cyy_writeb(info, CyMIR, save_xir & 0x3f); cyy_writeb(info, CyCAR, save_car); } /* The real interrupt service routine is called whenever the card wants its hand held--chars received, out buffer empty, modem change, etc. */ static irqreturn_t cyy_interrupt(int irq, void *dev_id) { int status; struct cyclades_card *cinfo = dev_id; void __iomem *base_addr, *card_base_addr; unsigned int chip, too_many, had_work; int index; if (unlikely(cinfo == NULL)) { #ifdef CY_DEBUG_INTERRUPTS printk(KERN_DEBUG "cyy_interrupt: spurious interrupt %d\n", irq); #endif return IRQ_NONE; /* spurious interrupt */ } card_base_addr = cinfo->base_addr; index = cinfo->bus_index; /* card was not initialized yet (e.g. DEBUG_SHIRQ) */ if (unlikely(card_base_addr == NULL)) return IRQ_HANDLED; /* This loop checks all chips in the card. Make a note whenever _any_ chip had some work to do, as this is considered an indication that there will be more to do. Only when no chip has any work does this outermost loop exit. */ do { had_work = 0; for (chip = 0; chip < cinfo->num_chips; chip++) { base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index); too_many = 0; while ((status = readb(base_addr + (CySVRR << index))) != 0x00) { had_work++; /* The purpose of the following test is to ensure that no chip can monopolize the driver. This forces the chips to be checked in a round-robin fashion (after draining each of a bunch (1000) of characters). */ if (1000 < too_many++) break; spin_lock(&cinfo->card_lock); if (status & CySRReceive) /* rx intr */ cyy_chip_rx(cinfo, chip, base_addr); if (status & CySRTransmit) /* tx intr */ cyy_chip_tx(cinfo, chip, base_addr); if (status & CySRModem) /* modem intr */ cyy_chip_modem(cinfo, chip, base_addr); spin_unlock(&cinfo->card_lock); } } } while (had_work); /* clear interrupts */ spin_lock(&cinfo->card_lock); cy_writeb(card_base_addr + (Cy_ClrIntr << index), 0); /* Cy_ClrIntr is 0x1800 */ spin_unlock(&cinfo->card_lock); return IRQ_HANDLED; } /* cyy_interrupt */ static void cyy_change_rts_dtr(struct cyclades_port *info, unsigned int set, unsigned int clear) { struct cyclades_card *card = info->card; int channel = info->line - card->first_line; u32 rts, dtr, msvrr, msvrd; channel &= 0x03; if (info->rtsdtr_inv) { msvrr = CyMSVR2; msvrd = CyMSVR1; rts = CyDTR; dtr = CyRTS; } else { msvrr = CyMSVR1; msvrd = CyMSVR2; rts = CyRTS; dtr = CyDTR; } if (set & TIOCM_RTS) { cyy_writeb(info, CyCAR, channel); cyy_writeb(info, msvrr, rts); } if (clear & TIOCM_RTS) { cyy_writeb(info, CyCAR, channel); cyy_writeb(info, msvrr, ~rts); } if (set & TIOCM_DTR) { cyy_writeb(info, CyCAR, channel); cyy_writeb(info, msvrd, dtr); #ifdef CY_DEBUG_DTR printk(KERN_DEBUG "cyc:set_modem_info raising DTR\n"); printk(KERN_DEBUG " status: 0x%x, 0x%x\n", cyy_readb(info, CyMSVR1), cyy_readb(info, CyMSVR2)); #endif } if (clear & TIOCM_DTR) { cyy_writeb(info, CyCAR, channel); cyy_writeb(info, msvrd, ~dtr); #ifdef CY_DEBUG_DTR printk(KERN_DEBUG "cyc:set_modem_info dropping DTR\n"); printk(KERN_DEBUG " status: 0x%x, 0x%x\n", cyy_readb(info, CyMSVR1), cyy_readb(info, CyMSVR2)); #endif } } /***********************************************************/ /********* End of block of Cyclom-Y specific code **********/ /******** Start of block of Cyclades-Z specific code *******/ /***********************************************************/ static int cyz_fetch_msg(struct cyclades_card *cinfo, __u32 *channel, __u8 *cmd, __u32 *param) { struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl; unsigned long loc_doorbell; loc_doorbell = readl(&cinfo->ctl_addr.p9060->loc_doorbell); if (loc_doorbell) { *cmd = (char)(0xff & loc_doorbell); *channel = readl(&board_ctrl->fwcmd_channel); *param = (__u32) readl(&board_ctrl->fwcmd_param); cy_writel(&cinfo->ctl_addr.p9060->loc_doorbell, 0xffffffff); return 1; } return 0; } /* cyz_fetch_msg */ static int cyz_issue_cmd(struct cyclades_card *cinfo, __u32 channel, __u8 cmd, __u32 param) { struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl; __u32 __iomem *pci_doorbell; unsigned int index; if (!cyz_is_loaded(cinfo)) return -1; index = 0; pci_doorbell = &cinfo->ctl_addr.p9060->pci_doorbell; while ((readl(pci_doorbell) & 0xff) != 0) { if (index++ == 1000) return (int)(readl(pci_doorbell) & 0xff); udelay(50L); } cy_writel(&board_ctrl->hcmd_channel, channel); cy_writel(&board_ctrl->hcmd_param, param); cy_writel(pci_doorbell, (long)cmd); return 0; } /* cyz_issue_cmd */ static void cyz_handle_rx(struct cyclades_port *info) { struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl; struct cyclades_card *cinfo = info->card; struct tty_port *port = &info->port; unsigned int char_count; int len; #ifdef BLOCKMOVE unsigned char *buf; #else char data; #endif __u32 rx_put, rx_get, new_rx_get, rx_bufsize, rx_bufaddr; rx_get = new_rx_get = readl(&buf_ctrl->rx_get); rx_put = readl(&buf_ctrl->rx_put); rx_bufsize = readl(&buf_ctrl->rx_bufsize); rx_bufaddr = readl(&buf_ctrl->rx_bufaddr); if (rx_put >= rx_get) char_count = rx_put - rx_get; else char_count = rx_put - rx_get + rx_bufsize; if (!char_count) return; #ifdef CY_ENABLE_MONITORING info->mon.int_count++; info->mon.char_count += char_count; if (char_count > info->mon.char_max) info->mon.char_max = char_count; info->mon.char_last = char_count; #endif #ifdef BLOCKMOVE /* we'd like to use memcpy(t, f, n) and memset(s, c, count) for performance, but because of buffer boundaries, there may be several steps to the operation */ while (1) { len = tty_prepare_flip_string(port, &buf, char_count); if (!len) break; len = min_t(unsigned int, min(len, char_count), rx_bufsize - new_rx_get); memcpy_fromio(buf, cinfo->base_addr + rx_bufaddr + new_rx_get, len); new_rx_get = (new_rx_get + len) & (rx_bufsize - 1); char_count -= len; info->icount.rx += len; info->idle_stats.recv_bytes += len; } #else len = tty_buffer_request_room(port, char_count); while (len--) { data = readb(cinfo->base_addr + rx_bufaddr + new_rx_get); new_rx_get = (new_rx_get + 1) & (rx_bufsize - 1); tty_insert_flip_char(port, data, TTY_NORMAL); info->idle_stats.recv_bytes++; info->icount.rx++; } #endif #ifdef CONFIG_CYZ_INTR /* Recalculate the number of chars in the RX buffer and issue a cmd in case it's higher than the RX high water mark */ rx_put = readl(&buf_ctrl->rx_put); if (rx_put >= rx_get) char_count = rx_put - rx_get; else char_count = rx_put - rx_get + rx_bufsize; if (char_count >= readl(&buf_ctrl->rx_threshold) && !timer_pending(&cyz_rx_full_timer[ info->line])) mod_timer(&cyz_rx_full_timer[info->line], jiffies + 1); #endif info->idle_stats.recv_idle = jiffies; tty_schedule_flip(&info->port); /* Update rx_get */ cy_writel(&buf_ctrl->rx_get, new_rx_get); } static void cyz_handle_tx(struct cyclades_port *info) { struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl; struct cyclades_card *cinfo = info->card; struct tty_struct *tty; u8 data; unsigned int char_count; #ifdef BLOCKMOVE int small_count; #endif __u32 tx_put, tx_get, tx_bufsize, tx_bufaddr; if (info->xmit_cnt <= 0) /* Nothing to transmit */ return; tx_get = readl(&buf_ctrl->tx_get); tx_put = readl(&buf_ctrl->tx_put); tx_bufsize = readl(&buf_ctrl->tx_bufsize); tx_bufaddr = readl(&buf_ctrl->tx_bufaddr); if (tx_put >= tx_get) char_count = tx_get - tx_put - 1 + tx_bufsize; else char_count = tx_get - tx_put - 1; if (!char_count) return; tty = tty_port_tty_get(&info->port); if (tty == NULL) goto ztxdone; if (info->x_char) { /* send special char */ data = info->x_char; cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data); tx_put = (tx_put + 1) & (tx_bufsize - 1); info->x_char = 0; char_count--; info->icount.tx++; } #ifdef BLOCKMOVE while (0 < (small_count = min_t(unsigned int, tx_bufsize - tx_put, min_t(unsigned int, (SERIAL_XMIT_SIZE - info->xmit_tail), min_t(unsigned int, info->xmit_cnt, char_count))))) { memcpy_toio((char *)(cinfo->base_addr + tx_bufaddr + tx_put), &info->port.xmit_buf[info->xmit_tail], small_count); tx_put = (tx_put + small_count) & (tx_bufsize - 1); char_count -= small_count; info->icount.tx += small_count; info->xmit_cnt -= small_count; info->xmit_tail = (info->xmit_tail + small_count) & (SERIAL_XMIT_SIZE - 1); } #else while (info->xmit_cnt && char_count) { data = info->port.xmit_buf[info->xmit_tail]; info->xmit_cnt--; info->xmit_tail = (info->xmit_tail + 1) & (SERIAL_XMIT_SIZE - 1); cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data); tx_put = (tx_put + 1) & (tx_bufsize - 1); char_count--; info->icount.tx++; } #endif tty_wakeup(tty); tty_kref_put(tty); ztxdone: /* Update tx_put */ cy_writel(&buf_ctrl->tx_put, tx_put); } static void cyz_handle_cmd(struct cyclades_card *cinfo) { struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl; struct cyclades_port *info; __u32 channel, param, fw_ver; __u8 cmd; int special_count; int delta_count; fw_ver = readl(&board_ctrl->fw_version); while (cyz_fetch_msg(cinfo, &channel, &cmd, &param) == 1) { special_count = 0; delta_count = 0; info = &cinfo->ports[channel]; switch (cmd) { case C_CM_PR_ERROR: tty_insert_flip_char(&info->port, 0, TTY_PARITY); info->icount.rx++; special_count++; break; case C_CM_FR_ERROR: tty_insert_flip_char(&info->port, 0, TTY_FRAME); info->icount.rx++; special_count++; break; case C_CM_RXBRK: tty_insert_flip_char(&info->port, 0, TTY_BREAK); info->icount.rx++; special_count++; break; case C_CM_MDCD: info->icount.dcd++; delta_count++; if (info->port.flags & ASYNC_CHECK_CD) { u32 dcd = fw_ver > 241 ? param : readl(&info->u.cyz.ch_ctrl->rs_status); if (dcd & C_RS_DCD) wake_up_interruptible(&info->port.open_wait); else tty_port_tty_hangup(&info->port, false); } break; case C_CM_MCTS: info->icount.cts++; delta_count++; break; case C_CM_MRI: info->icount.rng++; delta_count++; break; case C_CM_MDSR: info->icount.dsr++; delta_count++; break; #ifdef Z_WAKE case C_CM_IOCTLW: complete(&info->shutdown_wait); break; #endif #ifdef CONFIG_CYZ_INTR case C_CM_RXHIWM: case C_CM_RXNNDT: case C_CM_INTBACK2: /* Reception Interrupt */ #ifdef CY_DEBUG_INTERRUPTS printk(KERN_DEBUG "cyz_interrupt: rcvd intr, card %d, " "port %ld\n", info->card, channel); #endif cyz_handle_rx(info); break; case C_CM_TXBEMPTY: case C_CM_TXLOWWM: case C_CM_INTBACK: /* Transmission Interrupt */ #ifdef CY_DEBUG_INTERRUPTS printk(KERN_DEBUG "cyz_interrupt: xmit intr, card %d, " "port %ld\n", info->card, channel); #endif cyz_handle_tx(info); break; #endif /* CONFIG_CYZ_INTR */ case C_CM_FATAL: /* should do something with this !!! */ break; default: break; } if (delta_count) wake_up_interruptible(&info->port.delta_msr_wait); if (special_count) tty_schedule_flip(&info->port); } } #ifdef CONFIG_CYZ_INTR static irqreturn_t cyz_interrupt(int irq, void *dev_id) { struct cyclades_card *cinfo = dev_id; if (unlikely(!cyz_is_loaded(cinfo))) { #ifdef CY_DEBUG_INTERRUPTS printk(KERN_DEBUG "cyz_interrupt: board not yet loaded " "(IRQ%d).\n", irq); #endif return IRQ_NONE; } /* Handle the interrupts */ cyz_handle_cmd(cinfo); return IRQ_HANDLED; } /* cyz_interrupt */ static void cyz_rx_restart(unsigned long arg) { struct cyclades_port *info = (struct cyclades_port *)arg; struct cyclades_card *card = info->card; int retval; __u32 channel = info->line - card->first_line; unsigned long flags; spin_lock_irqsave(&card->card_lock, flags); retval = cyz_issue_cmd(card, channel, C_CM_INTBACK2, 0L); if (retval != 0) { printk(KERN_ERR "cyc:cyz_rx_restart retval on ttyC%d was %x\n", info->line, retval); } spin_unlock_irqrestore(&card->card_lock, flags); } #else /* CONFIG_CYZ_INTR */ static void cyz_poll(unsigned long arg) { struct cyclades_card *cinfo; struct cyclades_port *info; unsigned long expires = jiffies + HZ; unsigned int port, card; for (card = 0; card < NR_CARDS; card++) { cinfo = &cy_card[card]; if (!cy_is_Z(cinfo)) continue; if (!cyz_is_loaded(cinfo)) continue; /* Skip first polling cycle to avoid racing conditions with the FW */ if (!cinfo->intr_enabled) { cinfo->intr_enabled = 1; continue; } cyz_handle_cmd(cinfo); for (port = 0; port < cinfo->nports; port++) { info = &cinfo->ports[port]; if (!info->throttle) cyz_handle_rx(info); cyz_handle_tx(info); } /* poll every 'cyz_polling_cycle' period */ expires = jiffies + cyz_polling_cycle; } mod_timer(&cyz_timerlist, expires); } /* cyz_poll */ #endif /* CONFIG_CYZ_INTR */ /********** End of block of Cyclades-Z specific code *********/ /***********************************************************/ /* This is called whenever a port becomes active; interrupts are enabled and DTR & RTS are turned on. */ static int cy_startup(struct cyclades_port *info, struct tty_struct *tty) { struct cyclades_card *card; unsigned long flags; int retval = 0; int channel; unsigned long page; card = info->card; channel = info->line - card->first_line; page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; spin_lock_irqsave(&card->card_lock, flags); if (info->port.flags & ASYNC_INITIALIZED) goto errout; if (!info->type) { set_bit(TTY_IO_ERROR, &tty->flags); goto errout; } if (info->port.xmit_buf) free_page(page); else info->port.xmit_buf = (unsigned char *)page; spin_unlock_irqrestore(&card->card_lock, flags); cy_set_line_char(info, tty); if (!cy_is_Z(card)) { channel &= 0x03; spin_lock_irqsave(&card->card_lock, flags); cyy_writeb(info, CyCAR, channel); cyy_writeb(info, CyRTPR, (info->default_timeout ? info->default_timeout : 0x02)); /* 10ms rx timeout */ cyy_issue_cmd(info, CyCHAN_CTL | CyENB_RCVR | CyENB_XMTR); cyy_change_rts_dtr(info, TIOCM_RTS | TIOCM_DTR, 0); cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyRxData); } else { struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; if (!cyz_is_loaded(card)) return -ENODEV; #ifdef CY_DEBUG_OPEN printk(KERN_DEBUG "cyc startup Z card %d, channel %d, " "base_addr %p\n", card, channel, card->base_addr); #endif spin_lock_irqsave(&card->card_lock, flags); cy_writel(&ch_ctrl->op_mode, C_CH_ENABLE); #ifdef Z_WAKE #ifdef CONFIG_CYZ_INTR cy_writel(&ch_ctrl->intr_enable, C_IN_TXBEMPTY | C_IN_TXLOWWM | C_IN_RXHIWM | C_IN_RXNNDT | C_IN_IOCTLW | C_IN_MDCD); #else cy_writel(&ch_ctrl->intr_enable, C_IN_IOCTLW | C_IN_MDCD); #endif /* CONFIG_CYZ_INTR */ #else #ifdef CONFIG_CYZ_INTR cy_writel(&ch_ctrl->intr_enable, C_IN_TXBEMPTY | C_IN_TXLOWWM | C_IN_RXHIWM | C_IN_RXNNDT | C_IN_MDCD); #else cy_writel(&ch_ctrl->intr_enable, C_IN_MDCD); #endif /* CONFIG_CYZ_INTR */ #endif /* Z_WAKE */ retval = cyz_issue_cmd(card, channel, C_CM_IOCTL, 0L); if (retval != 0) { printk(KERN_ERR "cyc:startup(1) retval on ttyC%d was " "%x\n", info->line, retval); } /* Flush RX buffers before raising DTR and RTS */ retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_RX, 0L); if (retval != 0) { printk(KERN_ERR "cyc:startup(2) retval on ttyC%d was " "%x\n", info->line, retval); } /* set timeout !!! */ /* set RTS and DTR !!! */ tty_port_raise_dtr_rts(&info->port); /* enable send, recv, modem !!! */ } info->port.flags |= ASYNC_INITIALIZED; clear_bit(TTY_IO_ERROR, &tty->flags); info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; info->breakon = info->breakoff = 0; memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats)); info->idle_stats.in_use = info->idle_stats.recv_idle = info->idle_stats.xmit_idle = jiffies; spin_unlock_irqrestore(&card->card_lock, flags); #ifdef CY_DEBUG_OPEN printk(KERN_DEBUG "cyc startup done\n"); #endif return 0; errout: spin_unlock_irqrestore(&card->card_lock, flags); free_page(page); return retval; } /* startup */ static void start_xmit(struct cyclades_port *info) { struct cyclades_card *card = info->card; unsigned long flags; int channel = info->line - card->first_line; if (!cy_is_Z(card)) { spin_lock_irqsave(&card->card_lock, flags); cyy_writeb(info, CyCAR, channel & 0x03); cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyTxRdy); spin_unlock_irqrestore(&card->card_lock, flags); } else { #ifdef CONFIG_CYZ_INTR int retval; spin_lock_irqsave(&card->card_lock, flags); retval = cyz_issue_cmd(card, channel, C_CM_INTBACK, 0L); if (retval != 0) { printk(KERN_ERR "cyc:start_xmit retval on ttyC%d was " "%x\n", info->line, retval); } spin_unlock_irqrestore(&card->card_lock, flags); #else /* CONFIG_CYZ_INTR */ /* Don't have to do anything at this time */ #endif /* CONFIG_CYZ_INTR */ } } /* start_xmit */ /* * This routine shuts down a serial port; interrupts are disabled, * and DTR is dropped if the hangup on close termio flag is on. */ static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty) { struct cyclades_card *card; unsigned long flags; if (!(info->port.flags & ASYNC_INITIALIZED)) return; card = info->card; if (!cy_is_Z(card)) { spin_lock_irqsave(&card->card_lock, flags); /* Clear delta_msr_wait queue to avoid mem leaks. */ wake_up_interruptible(&info->port.delta_msr_wait); if (info->port.xmit_buf) { unsigned char *temp; temp = info->port.xmit_buf; info->port.xmit_buf = NULL; free_page((unsigned long)temp); } if (tty->termios.c_cflag & HUPCL) cyy_change_rts_dtr(info, 0, TIOCM_RTS | TIOCM_DTR); cyy_issue_cmd(info, CyCHAN_CTL | CyDIS_RCVR); /* it may be appropriate to clear _XMIT at some later date (after testing)!!! */ set_bit(TTY_IO_ERROR, &tty->flags); info->port.flags &= ~ASYNC_INITIALIZED; spin_unlock_irqrestore(&card->card_lock, flags); } else { #ifdef CY_DEBUG_OPEN int channel = info->line - card->first_line; printk(KERN_DEBUG "cyc shutdown Z card %d, channel %d, " "base_addr %p\n", card, channel, card->base_addr); #endif if (!cyz_is_loaded(card)) return; spin_lock_irqsave(&card->card_lock, flags); if (info->port.xmit_buf) { unsigned char *temp; temp = info->port.xmit_buf; info->port.xmit_buf = NULL; free_page((unsigned long)temp); } if (tty->termios.c_cflag & HUPCL) tty_port_lower_dtr_rts(&info->port); set_bit(TTY_IO_ERROR, &tty->flags); info->port.flags &= ~ASYNC_INITIALIZED; spin_unlock_irqrestore(&card->card_lock, flags); } #ifdef CY_DEBUG_OPEN printk(KERN_DEBUG "cyc shutdown done\n"); #endif } /* shutdown */ /* * ------------------------------------------------------------ * cy_open() and friends * ------------------------------------------------------------ */ /* * This routine is called whenever a serial port is opened. It * performs the serial-specific initialization for the tty structure. */ static int cy_open(struct tty_struct *tty, struct file *filp) { struct cyclades_port *info; unsigned int i, line = tty->index; int retval; for (i = 0; i < NR_CARDS; i++) if (line < cy_card[i].first_line + cy_card[i].nports && line >= cy_card[i].first_line) break; if (i >= NR_CARDS) return -ENODEV; info = &cy_card[i].ports[line - cy_card[i].first_line]; if (info->line < 0) return -ENODEV; /* If the card's firmware hasn't been loaded, treat it as absent from the system. This will make the user pay attention. */ if (cy_is_Z(info->card)) { struct cyclades_card *cinfo = info->card; struct FIRM_ID __iomem *firm_id = cinfo->base_addr + ID_ADDRESS; if (!cyz_is_loaded(cinfo)) { if (cinfo->hw_ver == ZE_V1 && cyz_fpga_loaded(cinfo) && readl(&firm_id->signature) == ZFIRM_HLT) { printk(KERN_ERR "cyc:Cyclades-Z Error: you " "need an external power supply for " "this number of ports.\nFirmware " "halted.\n"); } else { printk(KERN_ERR "cyc:Cyclades-Z firmware not " "yet loaded\n"); } return -ENODEV; } #ifdef CONFIG_CYZ_INTR else { /* In case this Z board is operating in interrupt mode, its interrupts should be enabled as soon as the first open happens to one of its ports. */ if (!cinfo->intr_enabled) { u16 intr; /* Enable interrupts on the PLX chip */ intr = readw(&cinfo->ctl_addr.p9060-> intr_ctrl_stat) | 0x0900; cy_writew(&cinfo->ctl_addr.p9060-> intr_ctrl_stat, intr); /* Enable interrupts on the FW */ retval = cyz_issue_cmd(cinfo, 0, C_CM_IRQ_ENBL, 0L); if (retval != 0) { printk(KERN_ERR "cyc:IRQ enable retval " "was %x\n", retval); } cinfo->intr_enabled = 1; } } #endif /* CONFIG_CYZ_INTR */ /* Make sure this Z port really exists in hardware */ if (info->line > (cinfo->first_line + cinfo->nports - 1)) return -ENODEV; } #ifdef CY_DEBUG_OTHER printk(KERN_DEBUG "cyc:cy_open ttyC%d\n", info->line); #endif tty->driver_data = info; if (serial_paranoia_check(info, tty->name, "cy_open")) return -ENODEV; #ifdef CY_DEBUG_OPEN printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line, info->port.count); #endif info->port.count++; #ifdef CY_DEBUG_COUNT printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n", current->pid, info->port.count); #endif /* * If the port is the middle of closing, bail out now */ if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) { wait_event_interruptible_tty(tty, info->port.close_wait, !(info->port.flags & ASYNC_CLOSING)); return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS; } /* * Start up serial port */ retval = cy_startup(info, tty); if (retval) return retval; retval = tty_port_block_til_ready(&info->port, tty, filp); if (retval) { #ifdef CY_DEBUG_OPEN printk(KERN_DEBUG "cyc:cy_open returning after block_til_ready " "with %d\n", retval); #endif return retval; } info->throttle = 0; tty_port_tty_set(&info->port, tty); #ifdef CY_DEBUG_OPEN printk(KERN_DEBUG "cyc:cy_open done\n"); #endif return 0; } /* cy_open */ /* * cy_wait_until_sent() --- wait until the transmitter is empty */ static void cy_wait_until_sent(struct tty_struct *tty, int timeout) { struct cyclades_card *card; struct cyclades_port *info = tty->driver_data; unsigned long orig_jiffies; int char_time; if (serial_paranoia_check(info, tty->name, "cy_wait_until_sent")) return; if (info->xmit_fifo_size == 0) return; /* Just in case.... */ orig_jiffies = jiffies; /* * Set the check interval to be 1/5 of the estimated time to * send a single character, and make it at least 1. The check * interval should also be less than the timeout. * * Note: we have to use pretty tight timings here to satisfy * the NIST-PCTS. */ char_time = (info->timeout - HZ / 50) / info->xmit_fifo_size; char_time = char_time / 5; if (char_time <= 0) char_time = 1; if (timeout < 0) timeout = 0; if (timeout) char_time = min(char_time, timeout); /* * If the transmitter hasn't cleared in twice the approximate * amount of time to send the entire FIFO, it probably won't * ever clear. This assumes the UART isn't doing flow * control, which is currently the case. Hence, if it ever * takes longer than info->timeout, this is probably due to a * UART bug of some kind. So, we clamp the timeout parameter at * 2*info->timeout. */ if (!timeout || timeout > 2 * info->timeout) timeout = 2 * info->timeout; card = info->card; if (!cy_is_Z(card)) { while (cyy_readb(info, CySRER) & CyTxRdy) { if (msleep_interruptible(jiffies_to_msecs(char_time))) break; if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } /* Run one more char cycle */ msleep_interruptible(jiffies_to_msecs(char_time * 5)); } static void cy_flush_buffer(struct tty_struct *tty) { struct cyclades_port *info = tty->driver_data; struct cyclades_card *card; int channel, retval; unsigned long flags; #ifdef CY_DEBUG_IO printk(KERN_DEBUG "cyc:cy_flush_buffer ttyC%d\n", info->line); #endif if (serial_paranoia_check(info, tty->name, "cy_flush_buffer")) return; card = info->card; channel = info->line - card->first_line; spin_lock_irqsave(&card->card_lock, flags); info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; spin_unlock_irqrestore(&card->card_lock, flags); if (cy_is_Z(card)) { /* If it is a Z card, flush the on-board buffers as well */ spin_lock_irqsave(&card->card_lock, flags); retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L); if (retval != 0) { printk(KERN_ERR "cyc: flush_buffer retval on ttyC%d " "was %x\n", info->line, retval); } spin_unlock_irqrestore(&card->card_lock, flags); } tty_wakeup(tty); } /* cy_flush_buffer */ static void cy_do_close(struct tty_port *port) { struct cyclades_port *info = container_of(port, struct cyclades_port, port); struct cyclades_card *card; unsigned long flags; int channel; card = info->card; channel = info->line - card->first_line; spin_lock_irqsave(&card->card_lock, flags); if (!cy_is_Z(card)) { /* Stop accepting input */ cyy_writeb(info, CyCAR, channel & 0x03); cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyRxData); if (info->port.flags & ASYNC_INITIALIZED) { /* Waiting for on-board buffers to be empty before closing the port */ spin_unlock_irqrestore(&card->card_lock, flags); cy_wait_until_sent(port->tty, info->timeout); spin_lock_irqsave(&card->card_lock, flags); } } else { #ifdef Z_WAKE /* Waiting for on-board buffers to be empty before closing the port */ struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; int retval; if (readl(&ch_ctrl->flow_status) != C_FS_TXIDLE) { retval = cyz_issue_cmd(card, channel, C_CM_IOCTLW, 0L); if (retval != 0) { printk(KERN_DEBUG "cyc:cy_close retval on " "ttyC%d was %x\n", info->line, retval); } spin_unlock_irqrestore(&card->card_lock, flags); wait_for_completion_interruptible(&info->shutdown_wait); spin_lock_irqsave(&card->card_lock, flags); } #endif } spin_unlock_irqrestore(&card->card_lock, flags); cy_shutdown(info, port->tty); } /* * This routine is called when a particular tty device is closed. */ static void cy_close(struct tty_struct *tty, struct file *filp) { struct cyclades_port *info = tty->driver_data; if (!info || serial_paranoia_check(info, tty->name, "cy_close")) return; tty_port_close(&info->port, tty, filp); } /* cy_close */ /* This routine gets called when tty_write has put something into * the write_queue. The characters may come from user space or * kernel space. * * This routine will return the number of characters actually * accepted for writing. * * If the port is not already transmitting stuff, start it off by * enabling interrupts. The interrupt service routine will then * ensure that the characters are sent. * If the port is already active, there is no need to kick it. * */ static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct cyclades_port *info = tty->driver_data; unsigned long flags; int c, ret = 0; #ifdef CY_DEBUG_IO printk(KERN_DEBUG "cyc:cy_write ttyC%d\n", info->line); #endif if (serial_paranoia_check(info, tty->name, "cy_write")) return 0; if (!info->port.xmit_buf) return 0; spin_lock_irqsave(&info->card->card_lock, flags); while (1) { c = min(count, (int)(SERIAL_XMIT_SIZE - info->xmit_cnt - 1)); c = min(c, (int)(SERIAL_XMIT_SIZE - info->xmit_head)); if (c <= 0) break; memcpy(info->port.xmit_buf + info->xmit_head, buf, c); info->xmit_head = (info->xmit_head + c) & (SERIAL_XMIT_SIZE - 1); info->xmit_cnt += c; buf += c; count -= c; ret += c; } spin_unlock_irqrestore(&info->card->card_lock, flags); info->idle_stats.xmit_bytes += ret; info->idle_stats.xmit_idle = jiffies; if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) start_xmit(info); return ret; } /* cy_write */ /* * This routine is called by the kernel to write a single * character to the tty device. If the kernel uses this routine, * it must call the flush_chars() routine (if defined) when it is * done stuffing characters into the driver. If there is no room * in the queue, the character is ignored. */ static int cy_put_char(struct tty_struct *tty, unsigned char ch) { struct cyclades_port *info = tty->driver_data; unsigned long flags; #ifdef CY_DEBUG_IO printk(KERN_DEBUG "cyc:cy_put_char ttyC%d\n", info->line); #endif if (serial_paranoia_check(info, tty->name, "cy_put_char")) return 0; if (!info->port.xmit_buf) return 0; spin_lock_irqsave(&info->card->card_lock, flags); if (info->xmit_cnt >= (int)(SERIAL_XMIT_SIZE - 1)) { spin_unlock_irqrestore(&info->card->card_lock, flags); return 0; } info->port.xmit_buf[info->xmit_head++] = ch; info->xmit_head &= SERIAL_XMIT_SIZE - 1; info->xmit_cnt++; info->idle_stats.xmit_bytes++; info->idle_stats.xmit_idle = jiffies; spin_unlock_irqrestore(&info->card->card_lock, flags); return 1; } /* cy_put_char */ /* * This routine is called by the kernel after it has written a * series of characters to the tty device using put_char(). */ static void cy_flush_chars(struct tty_struct *tty) { struct cyclades_port *info = tty->driver_data; #ifdef CY_DEBUG_IO printk(KERN_DEBUG "cyc:cy_flush_chars ttyC%d\n", info->line); #endif if (serial_paranoia_check(info, tty->name, "cy_flush_chars")) return; if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || !info->port.xmit_buf) return; start_xmit(info); } /* cy_flush_chars */ /* * This routine returns the numbers of characters the tty driver * will accept for queuing to be written. This number is subject * to change as output buffers get emptied, or if the output flow * control is activated. */ static int cy_write_room(struct tty_struct *tty) { struct cyclades_port *info = tty->driver_data; int ret; #ifdef CY_DEBUG_IO printk(KERN_DEBUG "cyc:cy_write_room ttyC%d\n", info->line); #endif if (serial_paranoia_check(info, tty->name, "cy_write_room")) return 0; ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1; if (ret < 0) ret = 0; return ret; } /* cy_write_room */ static int cy_chars_in_buffer(struct tty_struct *tty) { struct cyclades_port *info = tty->driver_data; if (serial_paranoia_check(info, tty->name, "cy_chars_in_buffer")) return 0; #ifdef Z_EXT_CHARS_IN_BUFFER if (!cy_is_Z(info->card)) { #endif /* Z_EXT_CHARS_IN_BUFFER */ #ifdef CY_DEBUG_IO printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n", info->line, info->xmit_cnt); #endif return info->xmit_cnt; #ifdef Z_EXT_CHARS_IN_BUFFER } else { struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl; int char_count; __u32 tx_put, tx_get, tx_bufsize; tx_get = readl(&buf_ctrl->tx_get); tx_put = readl(&buf_ctrl->tx_put); tx_bufsize = readl(&buf_ctrl->tx_bufsize); if (tx_put >= tx_get) char_count = tx_put - tx_get; else char_count = tx_put - tx_get + tx_bufsize; #ifdef CY_DEBUG_IO printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n", info->line, info->xmit_cnt + char_count); #endif return info->xmit_cnt + char_count; } #endif /* Z_EXT_CHARS_IN_BUFFER */ } /* cy_chars_in_buffer */ /* * ------------------------------------------------------------ * cy_ioctl() and friends * ------------------------------------------------------------ */ static void cyy_baud_calc(struct cyclades_port *info, __u32 baud) { int co, co_val, bpr; __u32 cy_clock = ((info->chip_rev >= CD1400_REV_J) ? 60000000 : 25000000); if (baud == 0) { info->tbpr = info->tco = info->rbpr = info->rco = 0; return; } /* determine which prescaler to use */ for (co = 4, co_val = 2048; co; co--, co_val >>= 2) { if (cy_clock / co_val / baud > 63) break; } bpr = (cy_clock / co_val * 2 / baud + 1) / 2; if (bpr > 255) bpr = 255; info->tbpr = info->rbpr = bpr; info->tco = info->rco = co; } /* * This routine finds or computes the various line characteristics. * It used to be called config_setup */ static void cy_set_line_char(struct cyclades_port *info, struct tty_struct *tty) { struct cyclades_card *card; unsigned long flags; int channel; unsigned cflag, iflag; int baud, baud_rate = 0; int i; if (info->line == -1) return; cflag = tty->termios.c_cflag; iflag = tty->termios.c_iflag; /* * Set up the tty->alt_speed kludge */ if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) tty->alt_speed = 57600; if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) tty->alt_speed = 115200; if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) tty->alt_speed = 230400; if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) tty->alt_speed = 460800; card = info->card; channel = info->line - card->first_line; if (!cy_is_Z(card)) { u32 cflags; /* baud rate */ baud = tty_get_baud_rate(tty); if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) { if (info->custom_divisor) baud_rate = info->baud / info->custom_divisor; else baud_rate = info->baud; } else if (baud > CD1400_MAX_SPEED) { baud = CD1400_MAX_SPEED; } /* find the baud index */ for (i = 0; i < 20; i++) { if (baud == baud_table[i]) break; } if (i == 20) i = 19; /* CD1400_MAX_SPEED */ if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) { cyy_baud_calc(info, baud_rate); } else { if (info->chip_rev >= CD1400_REV_J) { /* It is a CD1400 rev. J or later */ info->tbpr = baud_bpr_60[i]; /* Tx BPR */ info->tco = baud_co_60[i]; /* Tx CO */ info->rbpr = baud_bpr_60[i]; /* Rx BPR */ info->rco = baud_co_60[i]; /* Rx CO */ } else { info->tbpr = baud_bpr_25[i]; /* Tx BPR */ info->tco = baud_co_25[i]; /* Tx CO */ info->rbpr = baud_bpr_25[i]; /* Rx BPR */ info->rco = baud_co_25[i]; /* Rx CO */ } } if (baud_table[i] == 134) { /* get it right for 134.5 baud */ info->timeout = (info->xmit_fifo_size * HZ * 30 / 269) + 2; } else if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) { info->timeout = (info->xmit_fifo_size * HZ * 15 / baud_rate) + 2; } else if (baud_table[i]) { info->timeout = (info->xmit_fifo_size * HZ * 15 / baud_table[i]) + 2; /* this needs to be propagated into the card info */ } else { info->timeout = 0; } /* By tradition (is it a standard?) a baud rate of zero implies the line should be/has been closed. A bit later in this routine such a test is performed. */ /* byte size and parity */ info->cor5 = 0; info->cor4 = 0; /* receive threshold */ info->cor3 = (info->default_threshold ? info->default_threshold : baud_cor3[i]); info->cor2 = CyETC; switch (cflag & CSIZE) { case CS5: info->cor1 = Cy_5_BITS; break; case CS6: info->cor1 = Cy_6_BITS; break; case CS7: info->cor1 = Cy_7_BITS; break; case CS8: info->cor1 = Cy_8_BITS; break; } if (cflag & CSTOPB) info->cor1 |= Cy_2_STOP; if (cflag & PARENB) { if (cflag & PARODD) info->cor1 |= CyPARITY_O; else info->cor1 |= CyPARITY_E; } else info->cor1 |= CyPARITY_NONE; /* CTS flow control flag */ if (cflag & CRTSCTS) { info->port.flags |= ASYNC_CTS_FLOW; info->cor2 |= CyCtsAE; } else { info->port.flags &= ~ASYNC_CTS_FLOW; info->cor2 &= ~CyCtsAE; } if (cflag & CLOCAL) info->port.flags &= ~ASYNC_CHECK_CD; else info->port.flags |= ASYNC_CHECK_CD; /*********************************************** The hardware option, CyRtsAO, presents RTS when the chip has characters to send. Since most modems use RTS as reverse (inbound) flow control, this option is not used. If inbound flow control is necessary, DTR can be programmed to provide the appropriate signals for use with a non-standard cable. Contact Marcio Saito for details. ***********************************************/ channel &= 0x03; spin_lock_irqsave(&card->card_lock, flags); cyy_writeb(info, CyCAR, channel); /* tx and rx baud rate */ cyy_writeb(info, CyTCOR, info->tco); cyy_writeb(info, CyTBPR, info->tbpr); cyy_writeb(info, CyRCOR, info->rco); cyy_writeb(info, CyRBPR, info->rbpr); /* set line characteristics according configuration */ cyy_writeb(info, CySCHR1, START_CHAR(tty)); cyy_writeb(info, CySCHR2, STOP_CHAR(tty)); cyy_writeb(info, CyCOR1, info->cor1); cyy_writeb(info, CyCOR2, info->cor2); cyy_writeb(info, CyCOR3, info->cor3); cyy_writeb(info, CyCOR4, info->cor4); cyy_writeb(info, CyCOR5, info->cor5); cyy_issue_cmd(info, CyCOR_CHANGE | CyCOR1ch | CyCOR2ch | CyCOR3ch); /* !!! Is this needed? */ cyy_writeb(info, CyCAR, channel); cyy_writeb(info, CyRTPR, (info->default_timeout ? info->default_timeout : 0x02)); /* 10ms rx timeout */ cflags = CyCTS; if (!C_CLOCAL(tty)) cflags |= CyDSR | CyRI | CyDCD; /* without modem intr */ cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyMdmCh); /* act on 1->0 modem transitions */ if ((cflag & CRTSCTS) && info->rflow) cyy_writeb(info, CyMCOR1, cflags | rflow_thr[i]); else cyy_writeb(info, CyMCOR1, cflags); /* act on 0->1 modem transitions */ cyy_writeb(info, CyMCOR2, cflags); if (i == 0) /* baud rate is zero, turn off line */ cyy_change_rts_dtr(info, 0, TIOCM_DTR); else cyy_change_rts_dtr(info, TIOCM_DTR, 0); clear_bit(TTY_IO_ERROR, &tty->flags); spin_unlock_irqrestore(&card->card_lock, flags); } else { struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; __u32 sw_flow; int retval; if (!cyz_is_loaded(card)) return; /* baud rate */ baud = tty_get_baud_rate(tty); if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) { if (info->custom_divisor) baud_rate = info->baud / info->custom_divisor; else baud_rate = info->baud; } else if (baud > CYZ_MAX_SPEED) { baud = CYZ_MAX_SPEED; } cy_writel(&ch_ctrl->comm_baud, baud); if (baud == 134) { /* get it right for 134.5 baud */ info->timeout = (info->xmit_fifo_size * HZ * 30 / 269) + 2; } else if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) { info->timeout = (info->xmit_fifo_size * HZ * 15 / baud_rate) + 2; } else if (baud) { info->timeout = (info->xmit_fifo_size * HZ * 15 / baud) + 2; /* this needs to be propagated into the card info */ } else { info->timeout = 0; } /* byte size and parity */ switch (cflag & CSIZE) { case CS5: cy_writel(&ch_ctrl->comm_data_l, C_DL_CS5); break; case CS6: cy_writel(&ch_ctrl->comm_data_l, C_DL_CS6); break; case CS7: cy_writel(&ch_ctrl->comm_data_l, C_DL_CS7); break; case CS8: cy_writel(&ch_ctrl->comm_data_l, C_DL_CS8); break; } if (cflag & CSTOPB) { cy_writel(&ch_ctrl->comm_data_l, readl(&ch_ctrl->comm_data_l) | C_DL_2STOP); } else { cy_writel(&ch_ctrl->comm_data_l, readl(&ch_ctrl->comm_data_l) | C_DL_1STOP); } if (cflag & PARENB) { if (cflag & PARODD) cy_writel(&ch_ctrl->comm_parity, C_PR_ODD); else cy_writel(&ch_ctrl->comm_parity, C_PR_EVEN); } else cy_writel(&ch_ctrl->comm_parity, C_PR_NONE); /* CTS flow control flag */ if (cflag & CRTSCTS) { cy_writel(&ch_ctrl->hw_flow, readl(&ch_ctrl->hw_flow) | C_RS_CTS | C_RS_RTS); } else { cy_writel(&ch_ctrl->hw_flow, readl(&ch_ctrl->hw_flow) & ~(C_RS_CTS | C_RS_RTS)); } /* As the HW flow control is done in firmware, the driver doesn't need to care about it */ info->port.flags &= ~ASYNC_CTS_FLOW; /* XON/XOFF/XANY flow control flags */ sw_flow = 0; if (iflag & IXON) { sw_flow |= C_FL_OXX; if (iflag & IXANY) sw_flow |= C_FL_OIXANY; } cy_writel(&ch_ctrl->sw_flow, sw_flow); retval = cyz_issue_cmd(card, channel, C_CM_IOCTL, 0L); if (retval != 0) { printk(KERN_ERR "cyc:set_line_char retval on ttyC%d " "was %x\n", info->line, retval); } /* CD sensitivity */ if (cflag & CLOCAL) info->port.flags &= ~ASYNC_CHECK_CD; else info->port.flags |= ASYNC_CHECK_CD; if (baud == 0) { /* baud rate is zero, turn off line */ cy_writel(&ch_ctrl->rs_control, readl(&ch_ctrl->rs_control) & ~C_RS_DTR); #ifdef CY_DEBUG_DTR printk(KERN_DEBUG "cyc:set_line_char dropping Z DTR\n"); #endif } else { cy_writel(&ch_ctrl->rs_control, readl(&ch_ctrl->rs_control) | C_RS_DTR); #ifdef CY_DEBUG_DTR printk(KERN_DEBUG "cyc:set_line_char raising Z DTR\n"); #endif } retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L); if (retval != 0) { printk(KERN_ERR "cyc:set_line_char(2) retval on ttyC%d " "was %x\n", info->line, retval); } clear_bit(TTY_IO_ERROR, &tty->flags); } } /* set_line_char */ static int cy_get_serial_info(struct cyclades_port *info, struct serial_struct __user *retinfo) { struct cyclades_card *cinfo = info->card; struct serial_struct tmp = { .type = info->type, .line = info->line, .port = (info->card - cy_card) * 0x100 + info->line - cinfo->first_line, .irq = cinfo->irq, .flags = info->port.flags, .close_delay = info->port.close_delay, .closing_wait = info->port.closing_wait, .baud_base = info->baud, .custom_divisor = info->custom_divisor, .hub6 = 0, /*!!! */ }; return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0; } static int cy_set_serial_info(struct cyclades_port *info, struct tty_struct *tty, struct serial_struct __user *new_info) { struct serial_struct new_serial; int ret; if (copy_from_user(&new_serial, new_info, sizeof(new_serial))) return -EFAULT; mutex_lock(&info->port.mutex); if (!capable(CAP_SYS_ADMIN)) { if (new_serial.close_delay != info->port.close_delay || new_serial.baud_base != info->baud || (new_serial.flags & ASYNC_FLAGS & ~ASYNC_USR_MASK) != (info->port.flags & ASYNC_FLAGS & ~ASYNC_USR_MASK)) { mutex_unlock(&info->port.mutex); return -EPERM; } info->port.flags = (info->port.flags & ~ASYNC_USR_MASK) | (new_serial.flags & ASYNC_USR_MASK); info->baud = new_serial.baud_base; info->custom_divisor = new_serial.custom_divisor; goto check_and_exit; } /* * OK, past this point, all the error checking has been done. * At this point, we start making changes..... */ info->baud = new_serial.baud_base; info->custom_divisor = new_serial.custom_divisor; info->port.flags = (info->port.flags & ~ASYNC_FLAGS) | (new_serial.flags & ASYNC_FLAGS); info->port.close_delay = new_serial.close_delay * HZ / 100; info->port.closing_wait = new_serial.closing_wait * HZ / 100; check_and_exit: if (info->port.flags & ASYNC_INITIALIZED) { cy_set_line_char(info, tty); ret = 0; } else { ret = cy_startup(info, tty); } mutex_unlock(&info->port.mutex); return ret; } /* set_serial_info */ /* * get_lsr_info - get line status register info * * Purpose: Let user call ioctl() to get info when the UART physically * is emptied. On bus types like RS485, the transmitter must * release the bus after transmitting. This must be done when * the transmit shift register is empty, not be done when the * transmit holding register is empty. This functionality * allows an RS485 driver to be written in user space. */ static int get_lsr_info(struct cyclades_port *info, unsigned int __user *value) { struct cyclades_card *card = info->card; unsigned int result; unsigned long flags; u8 status; if (!cy_is_Z(card)) { spin_lock_irqsave(&card->card_lock, flags); status = cyy_readb(info, CySRER) & (CyTxRdy | CyTxMpty); spin_unlock_irqrestore(&card->card_lock, flags); result = (status ? 0 : TIOCSER_TEMT); } else { /* Not supported yet */ return -EINVAL; } return put_user(result, value); } static int cy_tiocmget(struct tty_struct *tty) { struct cyclades_port *info = tty->driver_data; struct cyclades_card *card; int result; if (serial_paranoia_check(info, tty->name, __func__)) return -ENODEV; card = info->card; if (!cy_is_Z(card)) { unsigned long flags; int channel = info->line - card->first_line; u8 status; spin_lock_irqsave(&card->card_lock, flags); cyy_writeb(info, CyCAR, channel & 0x03); status = cyy_readb(info, CyMSVR1); status |= cyy_readb(info, CyMSVR2); spin_unlock_irqrestore(&card->card_lock, flags); if (info->rtsdtr_inv) { result = ((status & CyRTS) ? TIOCM_DTR : 0) | ((status & CyDTR) ? TIOCM_RTS : 0); } else { result = ((status & CyRTS) ? TIOCM_RTS : 0) | ((status & CyDTR) ? TIOCM_DTR : 0); } result |= ((status & CyDCD) ? TIOCM_CAR : 0) | ((status & CyRI) ? TIOCM_RNG : 0) | ((status & CyDSR) ? TIOCM_DSR : 0) | ((status & CyCTS) ? TIOCM_CTS : 0); } else { u32 lstatus; if (!cyz_is_loaded(card)) { result = -ENODEV; goto end; } lstatus = readl(&info->u.cyz.ch_ctrl->rs_status); result = ((lstatus & C_RS_RTS) ? TIOCM_RTS : 0) | ((lstatus & C_RS_DTR) ? TIOCM_DTR : 0) | ((lstatus & C_RS_DCD) ? TIOCM_CAR : 0) | ((lstatus & C_RS_RI) ? TIOCM_RNG : 0) | ((lstatus & C_RS_DSR) ? TIOCM_DSR : 0) | ((lstatus & C_RS_CTS) ? TIOCM_CTS : 0); } end: return result; } /* cy_tiomget */ static int cy_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct cyclades_port *info = tty->driver_data; struct cyclades_card *card; unsigned long flags; if (serial_paranoia_check(info, tty->name, __func__)) return -ENODEV; card = info->card; if (!cy_is_Z(card)) { spin_lock_irqsave(&card->card_lock, flags); cyy_change_rts_dtr(info, set, clear); spin_unlock_irqrestore(&card->card_lock, flags); } else { struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; int retval, channel = info->line - card->first_line; u32 rs; if (!cyz_is_loaded(card)) return -ENODEV; spin_lock_irqsave(&card->card_lock, flags); rs = readl(&ch_ctrl->rs_control); if (set & TIOCM_RTS) rs |= C_RS_RTS; if (clear & TIOCM_RTS) rs &= ~C_RS_RTS; if (set & TIOCM_DTR) { rs |= C_RS_DTR; #ifdef CY_DEBUG_DTR printk(KERN_DEBUG "cyc:set_modem_info raising Z DTR\n"); #endif } if (clear & TIOCM_DTR) { rs &= ~C_RS_DTR; #ifdef CY_DEBUG_DTR printk(KERN_DEBUG "cyc:set_modem_info clearing " "Z DTR\n"); #endif } cy_writel(&ch_ctrl->rs_control, rs); retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L); spin_unlock_irqrestore(&card->card_lock, flags); if (retval != 0) { printk(KERN_ERR "cyc:set_modem_info retval on ttyC%d " "was %x\n", info->line, retval); } } return 0; } /* * cy_break() --- routine which turns the break handling on or off */ static int cy_break(struct tty_struct *tty, int break_state) { struct cyclades_port *info = tty->driver_data; struct cyclades_card *card; unsigned long flags; int retval = 0; if (serial_paranoia_check(info, tty->name, "cy_break")) return -EINVAL; card = info->card; spin_lock_irqsave(&card->card_lock, flags); if (!cy_is_Z(card)) { /* Let the transmit ISR take care of this (since it requires stuffing characters into the output stream). */ if (break_state == -1) { if (!info->breakon) { info->breakon = 1; if (!info->xmit_cnt) { spin_unlock_irqrestore(&card->card_lock, flags); start_xmit(info); spin_lock_irqsave(&card->card_lock, flags); } } } else { if (!info->breakoff) { info->breakoff = 1; if (!info->xmit_cnt) { spin_unlock_irqrestore(&card->card_lock, flags); start_xmit(info); spin_lock_irqsave(&card->card_lock, flags); } } } } else { if (break_state == -1) { retval = cyz_issue_cmd(card, info->line - card->first_line, C_CM_SET_BREAK, 0L); if (retval != 0) { printk(KERN_ERR "cyc:cy_break (set) retval on " "ttyC%d was %x\n", info->line, retval); } } else { retval = cyz_issue_cmd(card, info->line - card->first_line, C_CM_CLR_BREAK, 0L); if (retval != 0) { printk(KERN_DEBUG "cyc:cy_break (clr) retval " "on ttyC%d was %x\n", info->line, retval); } } } spin_unlock_irqrestore(&card->card_lock, flags); return retval; } /* cy_break */ static int set_threshold(struct cyclades_port *info, unsigned long value) { struct cyclades_card *card = info->card; unsigned long flags; if (!cy_is_Z(card)) { info->cor3 &= ~CyREC_FIFO; info->cor3 |= value & CyREC_FIFO; spin_lock_irqsave(&card->card_lock, flags); cyy_writeb(info, CyCOR3, info->cor3); cyy_issue_cmd(info, CyCOR_CHANGE | CyCOR3ch); spin_unlock_irqrestore(&card->card_lock, flags); } return 0; } /* set_threshold */ static int get_threshold(struct cyclades_port *info, unsigned long __user *value) { struct cyclades_card *card = info->card; if (!cy_is_Z(card)) { u8 tmp = cyy_readb(info, CyCOR3) & CyREC_FIFO; return put_user(tmp, value); } return 0; } /* get_threshold */ static int set_timeout(struct cyclades_port *info, unsigned long value) { struct cyclades_card *card = info->card; unsigned long flags; if (!cy_is_Z(card)) { spin_lock_irqsave(&card->card_lock, flags); cyy_writeb(info, CyRTPR, value & 0xff); spin_unlock_irqrestore(&card->card_lock, flags); } return 0; } /* set_timeout */ static int get_timeout(struct cyclades_port *info, unsigned long __user *value) { struct cyclades_card *card = info->card; if (!cy_is_Z(card)) { u8 tmp = cyy_readb(info, CyRTPR); return put_user(tmp, value); } return 0; } /* get_timeout */ static int cy_cflags_changed(struct cyclades_port *info, unsigned long arg, struct cyclades_icount *cprev) { struct cyclades_icount cnow; unsigned long flags; int ret; spin_lock_irqsave(&info->card->card_lock, flags); cnow = info->icount; /* atomic copy */ spin_unlock_irqrestore(&info->card->card_lock, flags); ret = ((arg & TIOCM_RNG) && (cnow.rng != cprev->rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) || ((arg & TIOCM_CD) && (cnow.dcd != cprev->dcd)) || ((arg & TIOCM_CTS) && (cnow.cts != cprev->cts)); *cprev = cnow; return ret; } /* * This routine allows the tty driver to implement device- * specific ioctl's. If the ioctl number passed in cmd is * not recognized by the driver, it should return ENOIOCTLCMD. */ static int cy_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct cyclades_port *info = tty->driver_data; struct cyclades_icount cnow; /* kernel counter temps */ int ret_val = 0; unsigned long flags; void __user *argp = (void __user *)arg; if (serial_paranoia_check(info, tty->name, "cy_ioctl")) return -ENODEV; #ifdef CY_DEBUG_OTHER printk(KERN_DEBUG "cyc:cy_ioctl ttyC%d, cmd = %x arg = %lx\n", info->line, cmd, arg); #endif switch (cmd) { case CYGETMON: if (copy_to_user(argp, &info->mon, sizeof(info->mon))) { ret_val = -EFAULT; break; } memset(&info->mon, 0, sizeof(info->mon)); break; case CYGETTHRESH: ret_val = get_threshold(info, argp); break; case CYSETTHRESH: ret_val = set_threshold(info, arg); break; case CYGETDEFTHRESH: ret_val = put_user(info->default_threshold, (unsigned long __user *)argp); break; case CYSETDEFTHRESH: info->default_threshold = arg & 0x0f; break; case CYGETTIMEOUT: ret_val = get_timeout(info, argp); break; case CYSETTIMEOUT: ret_val = set_timeout(info, arg); break; case CYGETDEFTIMEOUT: ret_val = put_user(info->default_timeout, (unsigned long __user *)argp); break; case CYSETDEFTIMEOUT: info->default_timeout = arg & 0xff; break; case CYSETRFLOW: info->rflow = (int)arg; break; case CYGETRFLOW: ret_val = info->rflow; break; case CYSETRTSDTR_INV: info->rtsdtr_inv = (int)arg; break; case CYGETRTSDTR_INV: ret_val = info->rtsdtr_inv; break; case CYGETCD1400VER: ret_val = info->chip_rev; break; #ifndef CONFIG_CYZ_INTR case CYZSETPOLLCYCLE: cyz_polling_cycle = (arg * HZ) / 1000; break; case CYZGETPOLLCYCLE: ret_val = (cyz_polling_cycle * 1000) / HZ; break; #endif /* CONFIG_CYZ_INTR */ case CYSETWAIT: info->port.closing_wait = (unsigned short)arg * HZ / 100; break; case CYGETWAIT: ret_val = info->port.closing_wait / (HZ / 100); break; case TIOCGSERIAL: ret_val = cy_get_serial_info(info, argp); break; case TIOCSSERIAL: ret_val = cy_set_serial_info(info, tty, argp); break; case TIOCSERGETLSR: /* Get line status register */ ret_val = get_lsr_info(info, argp); break; /* * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change * - mask passed in arg for lines of interest * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) * Caller should use TIOCGICOUNT to see which one it was */ case TIOCMIWAIT: spin_lock_irqsave(&info->card->card_lock, flags); /* note the counters on entry */ cnow = info->icount; spin_unlock_irqrestore(&info->card->card_lock, flags); ret_val = wait_event_interruptible(info->port.delta_msr_wait, cy_cflags_changed(info, arg, &cnow)); break; /* * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) * Return: write counters to the user passed counter struct * NB: both 1->0 and 0->1 transitions are counted except for * RI where only 0->1 is counted. */ default: ret_val = -ENOIOCTLCMD; } #ifdef CY_DEBUG_OTHER printk(KERN_DEBUG "cyc:cy_ioctl done\n"); #endif return ret_val; } /* cy_ioctl */ static int cy_get_icount(struct tty_struct *tty, struct serial_icounter_struct *sic) { struct cyclades_port *info = tty->driver_data; struct cyclades_icount cnow; /* Used to snapshot */ unsigned long flags; spin_lock_irqsave(&info->card->card_lock, flags); cnow = info->icount; spin_unlock_irqrestore(&info->card->card_lock, flags); sic->cts = cnow.cts; sic->dsr = cnow.dsr; sic->rng = cnow.rng; sic->dcd = cnow.dcd; sic->rx = cnow.rx; sic->tx = cnow.tx; sic->frame = cnow.frame; sic->overrun = cnow.overrun; sic->parity = cnow.parity; sic->brk = cnow.brk; sic->buf_overrun = cnow.buf_overrun; return 0; } /* * This routine allows the tty driver to be notified when * device's termios settings have changed. Note that a * well-designed tty driver should be prepared to accept the case * where old == NULL, and try to do something rational. */ static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { struct cyclades_port *info = tty->driver_data; #ifdef CY_DEBUG_OTHER printk(KERN_DEBUG "cyc:cy_set_termios ttyC%d\n", info->line); #endif cy_set_line_char(info, tty); if ((old_termios->c_cflag & CRTSCTS) && !(tty->termios.c_cflag & CRTSCTS)) { tty->hw_stopped = 0; cy_start(tty); } #if 0 /* * No need to wake up processes in open wait, since they * sample the CLOCAL flag once, and don't recheck it. * XXX It's not clear whether the current behavior is correct * or not. Hence, this may change..... */ if (!(old_termios->c_cflag & CLOCAL) && (tty->termios.c_cflag & CLOCAL)) wake_up_interruptible(&info->port.open_wait); #endif } /* cy_set_termios */ /* This function is used to send a high-priority XON/XOFF character to the device. */ static void cy_send_xchar(struct tty_struct *tty, char ch) { struct cyclades_port *info = tty->driver_data; struct cyclades_card *card; int channel; if (serial_paranoia_check(info, tty->name, "cy_send_xchar")) return; info->x_char = ch; if (ch) cy_start(tty); card = info->card; channel = info->line - card->first_line; if (cy_is_Z(card)) { if (ch == STOP_CHAR(tty)) cyz_issue_cmd(card, channel, C_CM_SENDXOFF, 0L); else if (ch == START_CHAR(tty)) cyz_issue_cmd(card, channel, C_CM_SENDXON, 0L); } } /* This routine is called by the upper-layer tty layer to signal that incoming characters should be throttled because the input buffers are close to full. */ static void cy_throttle(struct tty_struct *tty) { struct cyclades_port *info = tty->driver_data; struct cyclades_card *card; unsigned long flags; #ifdef CY_DEBUG_THROTTLE char buf[64]; printk(KERN_DEBUG "cyc:throttle %s: %ld...ttyC%d\n", tty_name(tty, buf), tty->ldisc.chars_in_buffer(tty), info->line); #endif if (serial_paranoia_check(info, tty->name, "cy_throttle")) return; card = info->card; if (I_IXOFF(tty)) { if (!cy_is_Z(card)) cy_send_xchar(tty, STOP_CHAR(tty)); else info->throttle = 1; } if (tty->termios.c_cflag & CRTSCTS) { if (!cy_is_Z(card)) { spin_lock_irqsave(&card->card_lock, flags); cyy_change_rts_dtr(info, 0, TIOCM_RTS); spin_unlock_irqrestore(&card->card_lock, flags); } else { info->throttle = 1; } } } /* cy_throttle */ /* * This routine notifies the tty driver that it should signal * that characters can now be sent to the tty without fear of * overrunning the input buffers of the line disciplines. */ static void cy_unthrottle(struct tty_struct *tty) { struct cyclades_port *info = tty->driver_data; struct cyclades_card *card; unsigned long flags; #ifdef CY_DEBUG_THROTTLE char buf[64]; printk(KERN_DEBUG "cyc:unthrottle %s: %ld...ttyC%d\n", tty_name(tty, buf), tty_chars_in_buffer(tty), info->line); #endif if (serial_paranoia_check(info, tty->name, "cy_unthrottle")) return; if (I_IXOFF(tty)) { if (info->x_char) info->x_char = 0; else cy_send_xchar(tty, START_CHAR(tty)); } if (tty->termios.c_cflag & CRTSCTS) { card = info->card; if (!cy_is_Z(card)) { spin_lock_irqsave(&card->card_lock, flags); cyy_change_rts_dtr(info, TIOCM_RTS, 0); spin_unlock_irqrestore(&card->card_lock, flags); } else { info->throttle = 0; } } } /* cy_unthrottle */ /* cy_start and cy_stop provide software output flow control as a function of XON/XOFF, software CTS, and other such stuff. */ static void cy_stop(struct tty_struct *tty) { struct cyclades_card *cinfo; struct cyclades_port *info = tty->driver_data; int channel; unsigned long flags; #ifdef CY_DEBUG_OTHER printk(KERN_DEBUG "cyc:cy_stop ttyC%d\n", info->line); #endif if (serial_paranoia_check(info, tty->name, "cy_stop")) return; cinfo = info->card; channel = info->line - cinfo->first_line; if (!cy_is_Z(cinfo)) { spin_lock_irqsave(&cinfo->card_lock, flags); cyy_writeb(info, CyCAR, channel & 0x03); cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy); spin_unlock_irqrestore(&cinfo->card_lock, flags); } } /* cy_stop */ static void cy_start(struct tty_struct *tty) { struct cyclades_card *cinfo; struct cyclades_port *info = tty->driver_data; int channel; unsigned long flags; #ifdef CY_DEBUG_OTHER printk(KERN_DEBUG "cyc:cy_start ttyC%d\n", info->line); #endif if (serial_paranoia_check(info, tty->name, "cy_start")) return; cinfo = info->card; channel = info->line - cinfo->first_line; if (!cy_is_Z(cinfo)) { spin_lock_irqsave(&cinfo->card_lock, flags); cyy_writeb(info, CyCAR, channel & 0x03); cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyTxRdy); spin_unlock_irqrestore(&cinfo->card_lock, flags); } } /* cy_start */ /* * cy_hangup() --- called by tty_hangup() when a hangup is signaled. */ static void cy_hangup(struct tty_struct *tty) { struct cyclades_port *info = tty->driver_data; #ifdef CY_DEBUG_OTHER printk(KERN_DEBUG "cyc:cy_hangup ttyC%d\n", info->line); #endif if (serial_paranoia_check(info, tty->name, "cy_hangup")) return; cy_flush_buffer(tty); cy_shutdown(info, tty); tty_port_hangup(&info->port); } /* cy_hangup */ static int cyy_carrier_raised(struct tty_port *port) { struct cyclades_port *info = container_of(port, struct cyclades_port, port); struct cyclades_card *cinfo = info->card; unsigned long flags; int channel = info->line - cinfo->first_line; u32 cd; spin_lock_irqsave(&cinfo->card_lock, flags); cyy_writeb(info, CyCAR, channel & 0x03); cd = cyy_readb(info, CyMSVR1) & CyDCD; spin_unlock_irqrestore(&cinfo->card_lock, flags); return cd; } static void cyy_dtr_rts(struct tty_port *port, int raise) { struct cyclades_port *info = container_of(port, struct cyclades_port, port); struct cyclades_card *cinfo = info->card; unsigned long flags; spin_lock_irqsave(&cinfo->card_lock, flags); cyy_change_rts_dtr(info, raise ? TIOCM_RTS | TIOCM_DTR : 0, raise ? 0 : TIOCM_RTS | TIOCM_DTR); spin_unlock_irqrestore(&cinfo->card_lock, flags); } static int cyz_carrier_raised(struct tty_port *port) { struct cyclades_port *info = container_of(port, struct cyclades_port, port); return readl(&info->u.cyz.ch_ctrl->rs_status) & C_RS_DCD; } static void cyz_dtr_rts(struct tty_port *port, int raise) { struct cyclades_port *info = container_of(port, struct cyclades_port, port); struct cyclades_card *cinfo = info->card; struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; int ret, channel = info->line - cinfo->first_line; u32 rs; rs = readl(&ch_ctrl->rs_control); if (raise) rs |= C_RS_RTS | C_RS_DTR; else rs &= ~(C_RS_RTS | C_RS_DTR); cy_writel(&ch_ctrl->rs_control, rs); ret = cyz_issue_cmd(cinfo, channel, C_CM_IOCTLM, 0L); if (ret != 0) printk(KERN_ERR "%s: retval on ttyC%d was %x\n", __func__, info->line, ret); #ifdef CY_DEBUG_DTR printk(KERN_DEBUG "%s: raising Z DTR\n", __func__); #endif } static const struct tty_port_operations cyy_port_ops = { .carrier_raised = cyy_carrier_raised, .dtr_rts = cyy_dtr_rts, .shutdown = cy_do_close, }; static const struct tty_port_operations cyz_port_ops = { .carrier_raised = cyz_carrier_raised, .dtr_rts = cyz_dtr_rts, .shutdown = cy_do_close, }; /* * --------------------------------------------------------------------- * cy_init() and friends * * cy_init() is called at boot-time to initialize the serial driver. * --------------------------------------------------------------------- */ static int cy_init_card(struct cyclades_card *cinfo) { struct cyclades_port *info; unsigned int channel, port; spin_lock_init(&cinfo->card_lock); cinfo->intr_enabled = 0; cinfo->ports = kcalloc(cinfo->nports, sizeof(*cinfo->ports), GFP_KERNEL); if (cinfo->ports == NULL) { printk(KERN_ERR "Cyclades: cannot allocate ports\n"); return -ENOMEM; } for (channel = 0, port = cinfo->first_line; channel < cinfo->nports; channel++, port++) { info = &cinfo->ports[channel]; tty_port_init(&info->port); info->magic = CYCLADES_MAGIC; info->card = cinfo; info->line = port; info->port.closing_wait = CLOSING_WAIT_DELAY; info->port.close_delay = 5 * HZ / 10; info->port.flags = STD_COM_FLAGS; init_completion(&info->shutdown_wait); if (cy_is_Z(cinfo)) { struct FIRM_ID *firm_id = cinfo->base_addr + ID_ADDRESS; struct ZFW_CTRL *zfw_ctrl; info->port.ops = &cyz_port_ops; info->type = PORT_STARTECH; zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); info->u.cyz.ch_ctrl = &zfw_ctrl->ch_ctrl[channel]; info->u.cyz.buf_ctrl = &zfw_ctrl->buf_ctrl[channel]; if (cinfo->hw_ver == ZO_V1) info->xmit_fifo_size = CYZ_FIFO_SIZE; else info->xmit_fifo_size = 4 * CYZ_FIFO_SIZE; #ifdef CONFIG_CYZ_INTR setup_timer(&cyz_rx_full_timer[port], cyz_rx_restart, (unsigned long)info); #endif } else { unsigned short chip_number; int index = cinfo->bus_index; info->port.ops = &cyy_port_ops; info->type = PORT_CIRRUS; info->xmit_fifo_size = CyMAX_CHAR_FIFO; info->cor1 = CyPARITY_NONE | Cy_1_STOP | Cy_8_BITS; info->cor2 = CyETC; info->cor3 = 0x08; /* _very_ small rcv threshold */ chip_number = channel / CyPORTS_PER_CHIP; info->u.cyy.base_addr = cinfo->base_addr + (cy_chip_offset[chip_number] << index); info->chip_rev = cyy_readb(info, CyGFRCR); if (info->chip_rev >= CD1400_REV_J) { /* It is a CD1400 rev. J or later */ info->tbpr = baud_bpr_60[13]; /* Tx BPR */ info->tco = baud_co_60[13]; /* Tx CO */ info->rbpr = baud_bpr_60[13]; /* Rx BPR */ info->rco = baud_co_60[13]; /* Rx CO */ info->rtsdtr_inv = 1; } else { info->tbpr = baud_bpr_25[13]; /* Tx BPR */ info->tco = baud_co_25[13]; /* Tx CO */ info->rbpr = baud_bpr_25[13]; /* Rx BPR */ info->rco = baud_co_25[13]; /* Rx CO */ info->rtsdtr_inv = 0; } info->read_status_mask = CyTIMEOUT | CySPECHAR | CyBREAK | CyPARITY | CyFRAME | CyOVERRUN; } } #ifndef CONFIG_CYZ_INTR if (cy_is_Z(cinfo) && !timer_pending(&cyz_timerlist)) { mod_timer(&cyz_timerlist, jiffies + 1); #ifdef CY_PCI_DEBUG printk(KERN_DEBUG "Cyclades-Z polling initialized\n"); #endif } #endif return 0; } /* initialize chips on Cyclom-Y card -- return number of valid chips (which is number of ports/4) */ static unsigned short cyy_init_card(void __iomem *true_base_addr, int index) { unsigned int chip_number; void __iomem *base_addr; cy_writeb(true_base_addr + (Cy_HwReset << index), 0); /* Cy_HwReset is 0x1400 */ cy_writeb(true_base_addr + (Cy_ClrIntr << index), 0); /* Cy_ClrIntr is 0x1800 */ udelay(500L); for (chip_number = 0; chip_number < CyMAX_CHIPS_PER_CARD; chip_number++) { base_addr = true_base_addr + (cy_chip_offset[chip_number] << index); mdelay(1); if (readb(base_addr + (CyCCR << index)) != 0x00) { /************* printk(" chip #%d at %#6lx is never idle (CCR != 0)\n", chip_number, (unsigned long)base_addr); *************/ return chip_number; } cy_writeb(base_addr + (CyGFRCR << index), 0); udelay(10L); /* The Cyclom-16Y does not decode address bit 9 and therefore cannot distinguish between references to chip 0 and a non- existent chip 4. If the preceding clearing of the supposed chip 4 GFRCR register appears at chip 0, there is no chip 4 and this must be a Cyclom-16Y, not a Cyclom-32Ye. */ if (chip_number == 4 && readb(true_base_addr + (cy_chip_offset[0] << index) + (CyGFRCR << index)) == 0) { return chip_number; } cy_writeb(base_addr + (CyCCR << index), CyCHIP_RESET); mdelay(1); if (readb(base_addr + (CyGFRCR << index)) == 0x00) { /* printk(" chip #%d at %#6lx is not responding ", chip_number, (unsigned long)base_addr); printk("(GFRCR stayed 0)\n", */ return chip_number; } if ((0xf0 & (readb(base_addr + (CyGFRCR << index)))) != 0x40) { /* printk(" chip #%d at %#6lx is not valid (GFRCR == " "%#2x)\n", chip_number, (unsigned long)base_addr, base_addr[CyGFRCR<<index]); */ return chip_number; } cy_writeb(base_addr + (CyGCR << index), CyCH0_SERIAL); if (readb(base_addr + (CyGFRCR << index)) >= CD1400_REV_J) { /* It is a CD1400 rev. J or later */ /* Impossible to reach 5ms with this chip. Changed to 2ms instead (f = 500 Hz). */ cy_writeb(base_addr + (CyPPR << index), CyCLOCK_60_2MS); } else { /* f = 200 Hz */ cy_writeb(base_addr + (CyPPR << index), CyCLOCK_25_5MS); } /* printk(" chip #%d at %#6lx is rev 0x%2x\n", chip_number, (unsigned long)base_addr, readb(base_addr+(CyGFRCR<<index))); */ } return chip_number; } /* cyy_init_card */ /* * --------------------------------------------------------------------- * cy_detect_isa() - Probe for Cyclom-Y/ISA boards. * sets global variables and return the number of ISA boards found. * --------------------------------------------------------------------- */ static int __init cy_detect_isa(void) { #ifdef CONFIG_ISA struct cyclades_card *card; unsigned short cy_isa_irq, nboard; void __iomem *cy_isa_address; unsigned short i, j, k, cy_isa_nchan; int isparam = 0; nboard = 0; /* Check for module parameters */ for (i = 0; i < NR_CARDS; i++) { if (maddr[i] || i) { isparam = 1; cy_isa_addresses[i] = maddr[i]; } if (!maddr[i]) break; } /* scan the address table probing for Cyclom-Y/ISA boards */ for (i = 0; i < NR_ISA_ADDRS; i++) { unsigned int isa_address = cy_isa_addresses[i]; if (isa_address == 0x0000) return nboard; /* probe for CD1400... */ cy_isa_address = ioremap_nocache(isa_address, CyISA_Ywin); if (cy_isa_address == NULL) { printk(KERN_ERR "Cyclom-Y/ISA: can't remap base " "address\n"); continue; } cy_isa_nchan = CyPORTS_PER_CHIP * cyy_init_card(cy_isa_address, 0); if (cy_isa_nchan == 0) { iounmap(cy_isa_address); continue; } if (isparam && i < NR_CARDS && irq[i]) cy_isa_irq = irq[i]; else /* find out the board's irq by probing */ cy_isa_irq = detect_isa_irq(cy_isa_address); if (cy_isa_irq == 0) { printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but the " "IRQ could not be detected.\n", (unsigned long)cy_isa_address); iounmap(cy_isa_address); continue; } if ((cy_next_channel + cy_isa_nchan) > NR_PORTS) { printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but no " "more channels are available. Change NR_PORTS " "in cyclades.c and recompile kernel.\n", (unsigned long)cy_isa_address); iounmap(cy_isa_address); return nboard; } /* fill the next cy_card structure available */ for (j = 0; j < NR_CARDS; j++) { card = &cy_card[j]; if (card->base_addr == NULL) break; } if (j == NR_CARDS) { /* no more cy_cards available */ printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but no " "more cards can be used. Change NR_CARDS in " "cyclades.c and recompile kernel.\n", (unsigned long)cy_isa_address); iounmap(cy_isa_address); return nboard; } /* allocate IRQ */ if (request_irq(cy_isa_irq, cyy_interrupt, 0, "Cyclom-Y", card)) { printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but " "could not allocate IRQ#%d.\n", (unsigned long)cy_isa_address, cy_isa_irq); iounmap(cy_isa_address); return nboard; } /* set cy_card */ card->base_addr = cy_isa_address; card->ctl_addr.p9050 = NULL; card->irq = (int)cy_isa_irq; card->bus_index = 0; card->first_line = cy_next_channel; card->num_chips = cy_isa_nchan / CyPORTS_PER_CHIP; card->nports = cy_isa_nchan; if (cy_init_card(card)) { card->base_addr = NULL; free_irq(cy_isa_irq, card); iounmap(cy_isa_address); continue; } nboard++; printk(KERN_INFO "Cyclom-Y/ISA #%d: 0x%lx-0x%lx, IRQ%d found: " "%d channels starting from port %d\n", j + 1, (unsigned long)cy_isa_address, (unsigned long)(cy_isa_address + (CyISA_Ywin - 1)), cy_isa_irq, cy_isa_nchan, cy_next_channel); for (k = 0, j = cy_next_channel; j < cy_next_channel + cy_isa_nchan; j++, k++) tty_port_register_device(&card->ports[k].port, cy_serial_driver, j, NULL); cy_next_channel += cy_isa_nchan; } return nboard; #else return 0; #endif /* CONFIG_ISA */ } /* cy_detect_isa */ #ifdef CONFIG_PCI static inline int cyc_isfwstr(const char *str, unsigned int size) { unsigned int a; for (a = 0; a < size && *str; a++, str++) if (*str & 0x80) return -EINVAL; for (; a < size; a++, str++) if (*str) return -EINVAL; return 0; } static inline void cyz_fpga_copy(void __iomem *fpga, const u8 *data, unsigned int size) { for (; size > 0; size--) { cy_writel(fpga, *data++); udelay(10); } } static void plx_init(struct pci_dev *pdev, int irq, struct RUNTIME_9060 __iomem *addr) { /* Reset PLX */ cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x40000000); udelay(100L); cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x40000000); /* Reload Config. Registers from EEPROM */ cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x20000000); udelay(100L); cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x20000000); /* For some yet unknown reason, once the PLX9060 reloads the EEPROM, * the IRQ is lost and, thus, we have to re-write it to the PCI config. * registers. This will remain here until we find a permanent fix. */ pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq); } static int __cyz_load_fw(const struct firmware *fw, const char *name, const u32 mailbox, void __iomem *base, void __iomem *fpga) { const void *ptr = fw->data; const struct zfile_header *h = ptr; const struct zfile_config *c, *cs; const struct zfile_block *b, *bs; unsigned int a, tmp, len = fw->size; #define BAD_FW KERN_ERR "Bad firmware: " if (len < sizeof(*h)) { printk(BAD_FW "too short: %u<%zu\n", len, sizeof(*h)); return -EINVAL; } cs = ptr + h->config_offset; bs = ptr + h->block_offset; if ((void *)(cs + h->n_config) > ptr + len || (void *)(bs + h->n_blocks) > ptr + len) { printk(BAD_FW "too short"); return -EINVAL; } if (cyc_isfwstr(h->name, sizeof(h->name)) || cyc_isfwstr(h->date, sizeof(h->date))) { printk(BAD_FW "bad formatted header string\n"); return -EINVAL; } if (strncmp(name, h->name, sizeof(h->name))) { printk(BAD_FW "bad name '%s' (expected '%s')\n", h->name, name); return -EINVAL; } tmp = 0; for (c = cs; c < cs + h->n_config; c++) { for (a = 0; a < c->n_blocks; a++) if (c->block_list[a] > h->n_blocks) { printk(BAD_FW "bad block ref number in cfgs\n"); return -EINVAL; } if (c->mailbox == mailbox && c->function == 0) /* 0 is normal */ tmp++; } if (!tmp) { printk(BAD_FW "nothing appropriate\n"); return -EINVAL; } for (b = bs; b < bs + h->n_blocks; b++) if (b->file_offset + b->size > len) { printk(BAD_FW "bad block data offset\n"); return -EINVAL; } /* everything is OK, let's seek'n'load it */ for (c = cs; c < cs + h->n_config; c++) if (c->mailbox == mailbox && c->function == 0) break; for (a = 0; a < c->n_blocks; a++) { b = &bs[c->block_list[a]]; if (b->type == ZBLOCK_FPGA) { if (fpga != NULL) cyz_fpga_copy(fpga, ptr + b->file_offset, b->size); } else { if (base != NULL) memcpy_toio(base + b->ram_offset, ptr + b->file_offset, b->size); } } #undef BAD_FW return 0; } static int cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr, struct RUNTIME_9060 __iomem *ctl_addr, int irq) { const struct firmware *fw; struct FIRM_ID __iomem *fid = base_addr + ID_ADDRESS; struct CUSTOM_REG __iomem *cust = base_addr; struct ZFW_CTRL __iomem *pt_zfwctrl; void __iomem *tmp; u32 mailbox, status, nchan; unsigned int i; int retval; retval = request_firmware(&fw, "cyzfirm.bin", &pdev->dev); if (retval) { dev_err(&pdev->dev, "can't get firmware\n"); goto err; } /* Check whether the firmware is already loaded and running. If positive, skip this board */ if (__cyz_fpga_loaded(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) { u32 cntval = readl(base_addr + 0x190); udelay(100); if (cntval != readl(base_addr + 0x190)) { /* FW counter is working, FW is running */ dev_dbg(&pdev->dev, "Cyclades-Z FW already loaded. " "Skipping board.\n"); retval = 0; goto err_rel; } } /* start boot */ cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) & ~0x00030800UL); mailbox = readl(&ctl_addr->mail_box_0); if (mailbox == 0 || __cyz_fpga_loaded(ctl_addr)) { /* stops CPU and set window to beginning of RAM */ cy_writel(&ctl_addr->loc_addr_base, WIN_CREG); cy_writel(&cust->cpu_stop, 0); cy_writel(&ctl_addr->loc_addr_base, WIN_RAM); udelay(100); } plx_init(pdev, irq, ctl_addr); if (mailbox != 0) { /* load FPGA */ retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, NULL, base_addr); if (retval) goto err_rel; if (!__cyz_fpga_loaded(ctl_addr)) { dev_err(&pdev->dev, "fw upload successful, but fw is " "not loaded\n"); goto err_rel; } } /* stops CPU and set window to beginning of RAM */ cy_writel(&ctl_addr->loc_addr_base, WIN_CREG); cy_writel(&cust->cpu_stop, 0); cy_writel(&ctl_addr->loc_addr_base, WIN_RAM); udelay(100); /* clear memory */ for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++) cy_writeb(tmp, 255); if (mailbox != 0) { /* set window to last 512K of RAM */ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM + RAM_SIZE); for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++) cy_writeb(tmp, 255); /* set window to beginning of RAM */ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM); } retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, base_addr, NULL); release_firmware(fw); if (retval) goto err; /* finish boot and start boards */ cy_writel(&ctl_addr->loc_addr_base, WIN_CREG); cy_writel(&cust->cpu_start, 0); cy_writel(&ctl_addr->loc_addr_base, WIN_RAM); i = 0; while ((status = readl(&fid->signature)) != ZFIRM_ID && i++ < 40) msleep(100); if (status != ZFIRM_ID) { if (status == ZFIRM_HLT) { dev_err(&pdev->dev, "you need an external power supply " "for this number of ports. Firmware halted and " "board reset.\n"); retval = -EIO; goto err; } dev_warn(&pdev->dev, "fid->signature = 0x%x... Waiting " "some more time\n", status); while ((status = readl(&fid->signature)) != ZFIRM_ID && i++ < 200) msleep(100); if (status != ZFIRM_ID) { dev_err(&pdev->dev, "Board not started in 20 seconds! " "Giving up. (fid->signature = 0x%x)\n", status); dev_info(&pdev->dev, "*** Warning ***: if you are " "upgrading the FW, please power cycle the " "system before loading the new FW to the " "Cyclades-Z.\n"); if (__cyz_fpga_loaded(ctl_addr)) plx_init(pdev, irq, ctl_addr); retval = -EIO; goto err; } dev_dbg(&pdev->dev, "Firmware started after %d seconds.\n", i / 10); } pt_zfwctrl = base_addr + readl(&fid->zfwctrl_addr); dev_dbg(&pdev->dev, "fid=> %p, zfwctrl_addr=> %x, npt_zfwctrl=> %p\n", base_addr + ID_ADDRESS, readl(&fid->zfwctrl_addr), base_addr + readl(&fid->zfwctrl_addr)); nchan = readl(&pt_zfwctrl->board_ctrl.n_channel); dev_info(&pdev->dev, "Cyclades-Z FW loaded: version = %x, ports = %u\n", readl(&pt_zfwctrl->board_ctrl.fw_version), nchan); if (nchan == 0) { dev_warn(&pdev->dev, "no Cyclades-Z ports were found. Please " "check the connection between the Z host card and the " "serial expanders.\n"); if (__cyz_fpga_loaded(ctl_addr)) plx_init(pdev, irq, ctl_addr); dev_info(&pdev->dev, "Null number of ports detected. Board " "reset.\n"); retval = 0; goto err; } cy_writel(&pt_zfwctrl->board_ctrl.op_system, C_OS_LINUX); cy_writel(&pt_zfwctrl->board_ctrl.dr_version, DRIVER_VERSION); /* Early firmware failed to start looking for commands. This enables firmware interrupts for those commands. */ cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) | (1 << 17)); cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) | 0x00030800UL); return nchan; err_rel: release_firmware(fw); err: return retval; } static int cy_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct cyclades_card *card; void __iomem *addr0 = NULL, *addr2 = NULL; char *card_name = NULL; u32 uninitialized_var(mailbox); unsigned int device_id, nchan = 0, card_no, i, j; unsigned char plx_ver; int retval, irq; retval = pci_enable_device(pdev); if (retval) { dev_err(&pdev->dev, "cannot enable device\n"); goto err; } /* read PCI configuration area */ irq = pdev->irq; device_id = pdev->device & ~PCI_DEVICE_ID_MASK; #if defined(__alpha__) if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo) { /* below 1M? */ dev_err(&pdev->dev, "Cyclom-Y/PCI not supported for low " "addresses on Alpha systems.\n"); retval = -EIO; goto err_dis; } #endif if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Lo) { dev_err(&pdev->dev, "Cyclades-Z/PCI not supported for low " "addresses\n"); retval = -EIO; goto err_dis; } if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) { dev_warn(&pdev->dev, "PCI I/O bit incorrectly set. Ignoring " "it...\n"); pdev->resource[2].flags &= ~IORESOURCE_IO; } retval = pci_request_regions(pdev, "cyclades"); if (retval) { dev_err(&pdev->dev, "failed to reserve resources\n"); goto err_dis; } retval = -EIO; if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo || device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) { card_name = "Cyclom-Y"; addr0 = ioremap_nocache(pci_resource_start(pdev, 0), CyPCI_Yctl); if (addr0 == NULL) { dev_err(&pdev->dev, "can't remap ctl region\n"); goto err_reg; } addr2 = ioremap_nocache(pci_resource_start(pdev, 2), CyPCI_Ywin); if (addr2 == NULL) { dev_err(&pdev->dev, "can't remap base region\n"); goto err_unmap; } nchan = CyPORTS_PER_CHIP * cyy_init_card(addr2, 1); if (nchan == 0) { dev_err(&pdev->dev, "Cyclom-Y PCI host card with no " "Serial-Modules\n"); goto err_unmap; } } else if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Hi) { struct RUNTIME_9060 __iomem *ctl_addr; ctl_addr = addr0 = ioremap_nocache(pci_resource_start(pdev, 0), CyPCI_Zctl); if (addr0 == NULL) { dev_err(&pdev->dev, "can't remap ctl region\n"); goto err_reg; } /* Disable interrupts on the PLX before resetting it */ cy_writew(&ctl_addr->intr_ctrl_stat, readw(&ctl_addr->intr_ctrl_stat) & ~0x0900); plx_init(pdev, irq, addr0); mailbox = readl(&ctl_addr->mail_box_0); addr2 = ioremap_nocache(pci_resource_start(pdev, 2), mailbox == ZE_V1 ? CyPCI_Ze_win : CyPCI_Zwin); if (addr2 == NULL) { dev_err(&pdev->dev, "can't remap base region\n"); goto err_unmap; } if (mailbox == ZE_V1) { card_name = "Cyclades-Ze"; } else { card_name = "Cyclades-8Zo"; #ifdef CY_PCI_DEBUG if (mailbox == ZO_V1) { cy_writel(&ctl_addr->loc_addr_base, WIN_CREG); dev_info(&pdev->dev, "Cyclades-8Zo/PCI: FPGA " "id %lx, ver %lx\n", (ulong)(0xff & readl(&((struct CUSTOM_REG *)addr2)-> fpga_id)), (ulong)(0xff & readl(&((struct CUSTOM_REG *)addr2)-> fpga_version))); cy_writel(&ctl_addr->loc_addr_base, WIN_RAM); } else { dev_info(&pdev->dev, "Cyclades-Z/PCI: New " "Cyclades-Z board. FPGA not loaded\n"); } #endif /* The following clears the firmware id word. This ensures that the driver will not attempt to talk to the board until it has been properly initialized. */ if ((mailbox == ZO_V1) || (mailbox == ZO_V2)) cy_writel(addr2 + ID_ADDRESS, 0L); } retval = cyz_load_fw(pdev, addr2, addr0, irq); if (retval <= 0) goto err_unmap; nchan = retval; } if ((cy_next_channel + nchan) > NR_PORTS) { dev_err(&pdev->dev, "Cyclades-8Zo/PCI found, but no " "channels are available. Change NR_PORTS in " "cyclades.c and recompile kernel.\n"); goto err_unmap; } /* fill the next cy_card structure available */ for (card_no = 0; card_no < NR_CARDS; card_no++) { card = &cy_card[card_no]; if (card->base_addr == NULL) break; } if (card_no == NR_CARDS) { /* no more cy_cards available */ dev_err(&pdev->dev, "Cyclades-8Zo/PCI found, but no " "more cards can be used. Change NR_CARDS in " "cyclades.c and recompile kernel.\n"); goto err_unmap; } if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo || device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) { /* allocate IRQ */ retval = request_irq(irq, cyy_interrupt, IRQF_SHARED, "Cyclom-Y", card); if (retval) { dev_err(&pdev->dev, "could not allocate IRQ\n"); goto err_unmap; } card->num_chips = nchan / CyPORTS_PER_CHIP; } else { struct FIRM_ID __iomem *firm_id = addr2 + ID_ADDRESS; struct ZFW_CTRL __iomem *zfw_ctrl; zfw_ctrl = addr2 + (readl(&firm_id->zfwctrl_addr) & 0xfffff); card->hw_ver = mailbox; card->num_chips = (unsigned int)-1; card->board_ctrl = &zfw_ctrl->board_ctrl; #ifdef CONFIG_CYZ_INTR /* allocate IRQ only if board has an IRQ */ if (irq != 0 && irq != 255) { retval = request_irq(irq, cyz_interrupt, IRQF_SHARED, "Cyclades-Z", card); if (retval) { dev_err(&pdev->dev, "could not allocate IRQ\n"); goto err_unmap; } } #endif /* CONFIG_CYZ_INTR */ } /* set cy_card */ card->base_addr = addr2; card->ctl_addr.p9050 = addr0; card->irq = irq; card->bus_index = 1; card->first_line = cy_next_channel; card->nports = nchan; retval = cy_init_card(card); if (retval) goto err_null; pci_set_drvdata(pdev, card); if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo || device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) { /* enable interrupts in the PCI interface */ plx_ver = readb(addr2 + CyPLX_VER) & 0x0f; switch (plx_ver) { case PLX_9050: cy_writeb(addr0 + 0x4c, 0x43); break; case PLX_9060: case PLX_9080: default: /* Old boards, use PLX_9060 */ { struct RUNTIME_9060 __iomem *ctl_addr = addr0; plx_init(pdev, irq, ctl_addr); cy_writew(&ctl_addr->intr_ctrl_stat, readw(&ctl_addr->intr_ctrl_stat) | 0x0900); break; } } } dev_info(&pdev->dev, "%s/PCI #%d found: %d channels starting from " "port %d.\n", card_name, card_no + 1, nchan, cy_next_channel); for (j = 0, i = cy_next_channel; i < cy_next_channel + nchan; i++, j++) tty_port_register_device(&card->ports[j].port, cy_serial_driver, i, &pdev->dev); cy_next_channel += nchan; return 0; err_null: card->base_addr = NULL; free_irq(irq, card); err_unmap: iounmap(addr0); if (addr2) iounmap(addr2); err_reg: pci_release_regions(pdev); err_dis: pci_disable_device(pdev); err: return retval; } static void cy_pci_remove(struct pci_dev *pdev) { struct cyclades_card *cinfo = pci_get_drvdata(pdev); unsigned int i, channel; /* non-Z with old PLX */ if (!cy_is_Z(cinfo) && (readb(cinfo->base_addr + CyPLX_VER) & 0x0f) == PLX_9050) cy_writeb(cinfo->ctl_addr.p9050 + 0x4c, 0); else #ifndef CONFIG_CYZ_INTR if (!cy_is_Z(cinfo)) #endif cy_writew(&cinfo->ctl_addr.p9060->intr_ctrl_stat, readw(&cinfo->ctl_addr.p9060->intr_ctrl_stat) & ~0x0900); iounmap(cinfo->base_addr); if (cinfo->ctl_addr.p9050) iounmap(cinfo->ctl_addr.p9050); if (cinfo->irq #ifndef CONFIG_CYZ_INTR && !cy_is_Z(cinfo) #endif /* CONFIG_CYZ_INTR */ ) free_irq(cinfo->irq, cinfo); pci_release_regions(pdev); cinfo->base_addr = NULL; for (channel = 0, i = cinfo->first_line; i < cinfo->first_line + cinfo->nports; i++, channel++) { tty_unregister_device(cy_serial_driver, i); tty_port_destroy(&cinfo->ports[channel].port); } cinfo->nports = 0; kfree(cinfo->ports); } static struct pci_driver cy_pci_driver = { .name = "cyclades", .id_table = cy_pci_dev_id, .probe = cy_pci_probe, .remove = cy_pci_remove }; #endif static int cyclades_proc_show(struct seq_file *m, void *v) { struct cyclades_port *info; unsigned int i, j; __u32 cur_jifs = jiffies; seq_puts(m, "Dev TimeOpen BytesOut IdleOut BytesIn " "IdleIn Overruns Ldisc\n"); /* Output one line for each known port */ for (i = 0; i < NR_CARDS; i++) for (j = 0; j < cy_card[i].nports; j++) { info = &cy_card[i].ports[j]; if (info->port.count) { /* XXX is the ldisc num worth this? */ struct tty_struct *tty; struct tty_ldisc *ld; int num = 0; tty = tty_port_tty_get(&info->port); if (tty) { ld = tty_ldisc_ref(tty); if (ld) { num = ld->ops->num; tty_ldisc_deref(ld); } tty_kref_put(tty); } seq_printf(m, "%3d %8lu %10lu %8lu " "%10lu %8lu %9lu %6d\n", info->line, (cur_jifs - info->idle_stats.in_use) / HZ, info->idle_stats.xmit_bytes, (cur_jifs - info->idle_stats.xmit_idle)/ HZ, info->idle_stats.recv_bytes, (cur_jifs - info->idle_stats.recv_idle)/ HZ, info->idle_stats.overruns, num); } else seq_printf(m, "%3d %8lu %10lu %8lu " "%10lu %8lu %9lu %6ld\n", info->line, 0L, 0L, 0L, 0L, 0L, 0L, 0L); } return 0; } static int cyclades_proc_open(struct inode *inode, struct file *file) { return single_open(file, cyclades_proc_show, NULL); } static const struct file_operations cyclades_proc_fops = { .owner = THIS_MODULE, .open = cyclades_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* The serial driver boot-time initialization code! Hardware I/O ports are mapped to character special devices on a first found, first allocated manner. That is, this code searches for Cyclom cards in the system. As each is found, it is probed to discover how many chips (and thus how many ports) are present. These ports are mapped to the tty ports 32 and upward in monotonic fashion. If an 8-port card is replaced with a 16-port card, the port mapping on a following card will shift. This approach is different from what is used in the other serial device driver because the Cyclom is more properly a multiplexer, not just an aggregation of serial ports on one card. If there are more cards with more ports than have been statically allocated above, a warning is printed and the extra ports are ignored. */ static const struct tty_operations cy_ops = { .open = cy_open, .close = cy_close, .write = cy_write, .put_char = cy_put_char, .flush_chars = cy_flush_chars, .write_room = cy_write_room, .chars_in_buffer = cy_chars_in_buffer, .flush_buffer = cy_flush_buffer, .ioctl = cy_ioctl, .throttle = cy_throttle, .unthrottle = cy_unthrottle, .set_termios = cy_set_termios, .stop = cy_stop, .start = cy_start, .hangup = cy_hangup, .break_ctl = cy_break, .wait_until_sent = cy_wait_until_sent, .tiocmget = cy_tiocmget, .tiocmset = cy_tiocmset, .get_icount = cy_get_icount, .proc_fops = &cyclades_proc_fops, }; static int __init cy_init(void) { unsigned int nboards; int retval = -ENOMEM; cy_serial_driver = alloc_tty_driver(NR_PORTS); if (!cy_serial_driver) goto err; printk(KERN_INFO "Cyclades driver " CY_VERSION "\n"); /* Initialize the tty_driver structure */ cy_serial_driver->driver_name = "cyclades"; cy_serial_driver->name = "ttyC"; cy_serial_driver->major = CYCLADES_MAJOR; cy_serial_driver->minor_start = 0; cy_serial_driver->type = TTY_DRIVER_TYPE_SERIAL; cy_serial_driver->subtype = SERIAL_TYPE_NORMAL; cy_serial_driver->init_termios = tty_std_termios; cy_serial_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; cy_serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; tty_set_operations(cy_serial_driver, &cy_ops); retval = tty_register_driver(cy_serial_driver); if (retval) { printk(KERN_ERR "Couldn't register Cyclades serial driver\n"); goto err_frtty; } /* the code below is responsible to find the boards. Each different type of board has its own detection routine. If a board is found, the next cy_card structure available is set by the detection routine. These functions are responsible for checking the availability of cy_card and cy_port data structures and updating the cy_next_channel. */ /* look for isa boards */ nboards = cy_detect_isa(); #ifdef CONFIG_PCI /* look for pci boards */ retval = pci_register_driver(&cy_pci_driver); if (retval && !nboards) { tty_unregister_driver(cy_serial_driver); goto err_frtty; } #endif return 0; err_frtty: put_tty_driver(cy_serial_driver); err: return retval; } /* cy_init */ static void __exit cy_cleanup_module(void) { struct cyclades_card *card; unsigned int i, e1; #ifndef CONFIG_CYZ_INTR del_timer_sync(&cyz_timerlist); #endif /* CONFIG_CYZ_INTR */ e1 = tty_unregister_driver(cy_serial_driver); if (e1) printk(KERN_ERR "failed to unregister Cyclades serial " "driver(%d)\n", e1); #ifdef CONFIG_PCI pci_unregister_driver(&cy_pci_driver); #endif for (i = 0; i < NR_CARDS; i++) { card = &cy_card[i]; if (card->base_addr) { /* clear interrupt */ cy_writeb(card->base_addr + Cy_ClrIntr, 0); iounmap(card->base_addr); if (card->ctl_addr.p9050) iounmap(card->ctl_addr.p9050); if (card->irq #ifndef CONFIG_CYZ_INTR && !cy_is_Z(card) #endif /* CONFIG_CYZ_INTR */ ) free_irq(card->irq, card); for (e1 = card->first_line; e1 < card->first_line + card->nports; e1++) tty_unregister_device(cy_serial_driver, e1); kfree(card->ports); } } put_tty_driver(cy_serial_driver); } /* cy_cleanup_module */ module_init(cy_init); module_exit(cy_cleanup_module); MODULE_LICENSE("GPL"); MODULE_VERSION(CY_VERSION); MODULE_ALIAS_CHARDEV_MAJOR(CYCLADES_MAJOR); MODULE_FIRMWARE("cyzfirm.bin");
HRTKernel/Hacker_Kernel_SM-G92X_MM
drivers/tty/cyclades.c
C
gpl-2.0
112,895
/* * Copyright (C) 2001,2002,2003 Broadcom Corporation * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* * BCM1250-specific PCI support * * This module provides the glue between Linux's PCI subsystem * and the hardware. We basically provide glue for accessing * configuration space, and set up the translation for I/O * space accesses. * * To access configuration space, we use ioremap. In the 32-bit * kernel, this consumes either 4 or 8 page table pages, and 16MB of * kernel mapped memory. Hopefully neither of these should be a huge * problem. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/console.h> #include <linux/tty.h> #include <linux/vt.h> #include <asm/io.h> #include <asm/sibyte/sb1250_defs.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_scd.h> #include <asm/sibyte/board.h> /* * Macros for calculating offsets into config space given a device * structure or dev/fun/reg */ #define CFGOFFSET(bus, devfn, where) (((bus)<<16) + ((devfn)<<8) + (where)) #define CFGADDR(bus, devfn, where) CFGOFFSET((bus)->number, (devfn), where) static void *cfg_space; #define PCI_BUS_ENABLED 1 #define LDT_BUS_ENABLED 2 #define PCI_DEVICE_MODE 4 static int sb1250_bus_status; #define PCI_BRIDGE_DEVICE 0 #define LDT_BRIDGE_DEVICE 1 #ifdef CONFIG_SIBYTE_HAS_LDT /* * HT's level-sensitive interrupts require EOI, which is generated * through a 4MB memory-mapped region */ unsigned long ldt_eoi_space; #endif /* * Read/write 32-bit values in config space. */ static inline u32 READCFG32(u32 addr) { return *(u32 *) (cfg_space + (addr & ~3)); } static inline void WRITECFG32(u32 addr, u32 data) { *(u32 *) (cfg_space + (addr & ~3)) = data; } int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return dev->irq; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; } /* * Some checks before doing config cycles: * In PCI Device Mode, hide everything on bus 0 except the LDT host * bridge. Otherwise, access is controlled by bridge MasterEn bits. */ static int sb1250_pci_can_access(struct pci_bus *bus, int devfn) { u32 devno; if (!(sb1250_bus_status & (PCI_BUS_ENABLED | PCI_DEVICE_MODE))) return 0; if (bus->number == 0) { devno = PCI_SLOT(devfn); if (devno == LDT_BRIDGE_DEVICE) return (sb1250_bus_status & LDT_BUS_ENABLED) != 0; else if (sb1250_bus_status & PCI_DEVICE_MODE) return 0; else return 1; } else return 1; } /* * Read/write access functions for various sizes of values * in config space. Return all 1's for disallowed accesses * for a kludgy but adequate simulation of master aborts. */ static int sb1250_pcibios_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * val) { u32 data = 0; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; if (sb1250_pci_can_access(bus, devfn)) data = READCFG32(CFGADDR(bus, devfn, where)); else data = 0xFFFFFFFF; if (size == 1) *val = (data >> ((where & 3) << 3)) & 0xff; else if (size == 2) *val = (data >> ((where & 3) << 3)) & 0xffff; else *val = data; return PCIBIOS_SUCCESSFUL; } static int sb1250_pcibios_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { u32 cfgaddr = CFGADDR(bus, devfn, where); u32 data = 0; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; if (!sb1250_pci_can_access(bus, devfn)) return PCIBIOS_BAD_REGISTER_NUMBER; data = READCFG32(cfgaddr); if (size == 1) data = (data & ~(0xff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); else if (size == 2) data = (data & ~(0xffff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); else data = val; WRITECFG32(cfgaddr, data); return PCIBIOS_SUCCESSFUL; } struct pci_ops sb1250_pci_ops = { .read = sb1250_pcibios_read, .write = sb1250_pcibios_write, }; static struct resource sb1250_mem_resource = { .name = "SB1250 PCI MEM", .start = 0x40000000UL, .end = 0x5fffffffUL, .flags = IORESOURCE_MEM, }; static struct resource sb1250_io_resource = { .name = "SB1250 PCI I/O", .start = 0x00000000UL, .end = 0x01ffffffUL, .flags = IORESOURCE_IO, }; struct pci_controller sb1250_controller = { .pci_ops = &sb1250_pci_ops, .mem_resource = &sb1250_mem_resource, .io_resource = &sb1250_io_resource, }; static int __init sb1250_pcibios_init(void) { void __iomem *io_map_base; uint32_t cmdreg; uint64_t reg; /* CFE will assign PCI resources */ pci_set_flags(PCI_PROBE_ONLY); /* Avoid ISA compat ranges. */ PCIBIOS_MIN_IO = 0x00008000UL; PCIBIOS_MIN_MEM = 0x01000000UL; /* Set I/O resource limits. */ ioport_resource.end = 0x01ffffffUL; /* 32MB accessible by sb1250 */ iomem_resource.end = 0xffffffffUL; /* no HT support yet */ cfg_space = ioremap(A_PHYS_LDTPCI_CFG_MATCH_BITS, 16 * 1024 * 1024); /* * See if the PCI bus has been configured by the firmware. */ reg = __raw_readq(IOADDR(A_SCD_SYSTEM_CFG)); if (!(reg & M_SYS_PCI_HOST)) { sb1250_bus_status |= PCI_DEVICE_MODE; } else { cmdreg = READCFG32(CFGOFFSET (0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), PCI_COMMAND)); if (!(cmdreg & PCI_COMMAND_MASTER)) { printk ("PCI: Skipping PCI probe. Bus is not initialized.\n"); iounmap(cfg_space); return 0; } sb1250_bus_status |= PCI_BUS_ENABLED; } /* * Establish mappings in KSEG2 (kernel virtual) to PCI I/O * space. Use "match bytes" policy to make everything look * little-endian. So, you need to also set * CONFIG_SWAP_IO_SPACE, but this is the combination that * works correctly with most of Linux's drivers. * XXX ehs: Should this happen in PCI Device mode? */ io_map_base = ioremap(A_PHYS_LDTPCI_IO_MATCH_BYTES, 1024 * 1024); sb1250_controller.io_map_base = (unsigned long)io_map_base; set_io_port_base((unsigned long)io_map_base); #ifdef CONFIG_SIBYTE_HAS_LDT /* * Also check the LDT bridge's enable, just in case we didn't * initialize that one. */ cmdreg = READCFG32(CFGOFFSET(0, PCI_DEVFN(LDT_BRIDGE_DEVICE, 0), PCI_COMMAND)); if (cmdreg & PCI_COMMAND_MASTER) { sb1250_bus_status |= LDT_BUS_ENABLED; /* * Need bits 23:16 to convey vector number. Note that * this consumes 4MB of kernel-mapped memory * (Kseg2/Kseg3) for 32-bit kernel. */ ldt_eoi_space = (unsigned long) ioremap(A_PHYS_LDT_SPECIAL_MATCH_BYTES, 4 * 1024 * 1024); } #endif register_pci_controller(&sb1250_controller); #ifdef CONFIG_VGA_CONSOLE console_lock(); do_take_over_console(&vga_con, 0, MAX_NR_CONSOLES - 1, 1); console_unlock(); #endif return 0; } arch_initcall(sb1250_pcibios_init);
prakhya/linux_sai
arch/mips/pci/pci-sb1250.c
C
gpl-2.0
7,687
/* * linux/fs/hpfs/dnode.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * handling directory dnode tree - adding, deleteing & searching for dirents */ #include "hpfs_fn.h" static loff_t get_pos(struct dnode *d, struct hpfs_dirent *fde) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); int i = 1; for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { if (de == fde) return ((loff_t) le32_to_cpu(d->self) << 4) | (loff_t)i; i++; } printk("HPFS: get_pos: not_found\n"); return ((loff_t)le32_to_cpu(d->self) << 4) | (loff_t)1; } void hpfs_add_pos(struct inode *inode, loff_t *pos) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); int i = 0; loff_t **ppos; if (hpfs_inode->i_rddir_off) for (; hpfs_inode->i_rddir_off[i]; i++) if (hpfs_inode->i_rddir_off[i] == pos) return; if (!(i&0x0f)) { if (!(ppos = kmalloc((i+0x11) * sizeof(loff_t*), GFP_NOFS))) { printk("HPFS: out of memory for position list\n"); return; } if (hpfs_inode->i_rddir_off) { memcpy(ppos, hpfs_inode->i_rddir_off, i * sizeof(loff_t)); kfree(hpfs_inode->i_rddir_off); } hpfs_inode->i_rddir_off = ppos; } hpfs_inode->i_rddir_off[i] = pos; hpfs_inode->i_rddir_off[i + 1] = NULL; } void hpfs_del_pos(struct inode *inode, loff_t *pos) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); loff_t **i, **j; if (!hpfs_inode->i_rddir_off) goto not_f; for (i = hpfs_inode->i_rddir_off; *i; i++) if (*i == pos) goto fnd; goto not_f; fnd: for (j = i + 1; *j; j++) ; *i = *(j - 1); *(j - 1) = NULL; if (j - 1 == hpfs_inode->i_rddir_off) { kfree(hpfs_inode->i_rddir_off); hpfs_inode->i_rddir_off = NULL; } return; not_f: /*printk("HPFS: warning: position pointer %p->%08x not found\n", pos, (int)*pos);*/ return; } static void for_all_poss(struct inode *inode, void (*f)(loff_t *, loff_t, loff_t), loff_t p1, loff_t p2) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); loff_t **i; if (!hpfs_inode->i_rddir_off) return; for (i = hpfs_inode->i_rddir_off; *i; i++) (*f)(*i, p1, p2); return; } static void hpfs_pos_subst(loff_t *p, loff_t f, loff_t t) { if (*p == f) *p = t; } /*void hpfs_hpfs_pos_substd(loff_t *p, loff_t f, loff_t t) { if ((*p & ~0x3f) == (f & ~0x3f)) *p = (t & ~0x3f) | (*p & 0x3f); }*/ static void hpfs_pos_ins(loff_t *p, loff_t d, loff_t c) { if ((*p & ~0x3f) == (d & ~0x3f) && (*p & 0x3f) >= (d & 0x3f)) { int n = (*p & 0x3f) + c; if (n > 0x3f) printk("HPFS: hpfs_pos_ins: %08x + %d\n", (int)*p, (int)c >> 8); else *p = (*p & ~0x3f) | n; } } static void hpfs_pos_del(loff_t *p, loff_t d, loff_t c) { if ((*p & ~0x3f) == (d & ~0x3f) && (*p & 0x3f) >= (d & 0x3f)) { int n = (*p & 0x3f) - c; if (n < 1) printk("HPFS: hpfs_pos_ins: %08x - %d\n", (int)*p, (int)c >> 8); else *p = (*p & ~0x3f) | n; } } static struct hpfs_dirent *dnode_pre_last_de(struct dnode *d) { struct hpfs_dirent *de, *de_end, *dee = NULL, *deee = NULL; de_end = dnode_end_de(d); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { deee = dee; dee = de; } return deee; } static struct hpfs_dirent *dnode_last_de(struct dnode *d) { struct hpfs_dirent *de, *de_end, *dee = NULL; de_end = dnode_end_de(d); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { dee = de; } return dee; } static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno ptr) { struct hpfs_dirent *de; if (!(de = dnode_last_de(d))) { hpfs_error(s, "set_last_pointer: empty dnode %08x", le32_to_cpu(d->self)); return; } if (hpfs_sb(s)->sb_chk) { if (de->down) { hpfs_error(s, "set_last_pointer: dnode %08x has already last pointer %08x", le32_to_cpu(d->self), de_down_pointer(de)); return; } if (le16_to_cpu(de->length) != 32) { hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", le32_to_cpu(d->self)); return; } } if (ptr) { le32_add_cpu(&d->first_free, 4); if (le32_to_cpu(d->first_free) > 2048) { hpfs_error(s, "set_last_pointer: too long dnode %08x", le32_to_cpu(d->self)); le32_add_cpu(&d->first_free, -4); return; } de->length = cpu_to_le16(36); de->down = 1; *(__le32 *)((char *)de + 32) = cpu_to_le32(ptr); } } /* Add an entry to dnode and don't care if it grows over 2048 bytes */ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, const unsigned char *name, unsigned namelen, secno down_ptr) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); unsigned d_size = de_size(namelen, down_ptr); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { int c = hpfs_compare_names(s, name, namelen, de->name, de->namelen, de->last); if (!c) { hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, le32_to_cpu(d->self)); return NULL; } if (c < 0) break; } memmove((char *)de + d_size, de, (char *)de_end - (char *)de); memset(de, 0, d_size); if (down_ptr) { *(__le32 *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr); de->down = 1; } de->length = cpu_to_le16(d_size); de->not_8x3 = hpfs_is_name_long(name, namelen); de->namelen = namelen; memcpy(de->name, name, namelen); le32_add_cpu(&d->first_free, d_size); return de; } /* Delete dirent and don't care about its subtree */ static void hpfs_delete_de(struct super_block *s, struct dnode *d, struct hpfs_dirent *de) { if (de->last) { hpfs_error(s, "attempt to delete last dirent in dnode %08x", le32_to_cpu(d->self)); return; } d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - le16_to_cpu(de->length)); memmove(de, de_next_de(de), le32_to_cpu(d->first_free) + (char *)d - (char *)de); } static void fix_up_ptrs(struct super_block *s, struct dnode *d) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); dnode_secno dno = le32_to_cpu(d->self); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) if (de->down) { struct quad_buffer_head qbh; struct dnode *dd; if ((dd = hpfs_map_dnode(s, de_down_pointer(de), &qbh))) { if (le32_to_cpu(dd->up) != dno || dd->root_dnode) { dd->up = cpu_to_le32(dno); dd->root_dnode = 0; hpfs_mark_4buffers_dirty(&qbh); } hpfs_brelse4(&qbh); } } } /* Add an entry to dnode and do dnode splitting if required */ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, const unsigned char *name, unsigned namelen, struct hpfs_dirent *new_de, dnode_secno down_ptr) { struct quad_buffer_head qbh, qbh1, qbh2; struct dnode *d, *ad, *rd, *nd = NULL; dnode_secno adno, rdno; struct hpfs_dirent *de; struct hpfs_dirent nde; unsigned char *nname; int h; int pos; struct buffer_head *bh; struct fnode *fnode; int c1, c2 = 0; if (!(nname = kmalloc(256, GFP_NOFS))) { printk("HPFS: out of memory, can't add to dnode\n"); return 1; } go_up: if (namelen >= 256) { hpfs_error(i->i_sb, "hpfs_add_to_dnode: namelen == %d", namelen); kfree(nd); kfree(nname); return 1; } if (!(d = hpfs_map_dnode(i->i_sb, dno, &qbh))) { kfree(nd); kfree(nname); return 1; } go_up_a: if (hpfs_sb(i->i_sb)->sb_chk) if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "hpfs_add_to_dnode")) { hpfs_brelse4(&qbh); kfree(nd); kfree(nname); return 1; } if (le32_to_cpu(d->first_free) + de_size(namelen, down_ptr) <= 2048) { loff_t t; copy_de(de=hpfs_add_de(i->i_sb, d, name, namelen, down_ptr), new_de); t = get_pos(d, de); for_all_poss(i, hpfs_pos_ins, t, 1); for_all_poss(i, hpfs_pos_subst, 4, t); for_all_poss(i, hpfs_pos_subst, 5, t + 1); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); kfree(nd); kfree(nname); return 0; } if (!nd) if (!(nd = kmalloc(0x924, GFP_NOFS))) { /* 0x924 is a max size of dnode after adding a dirent with max name length. We alloc this only once. There must not be any error while splitting dnodes, otherwise the whole directory, not only file we're adding, would be lost. */ printk("HPFS: out of memory for dnode splitting\n"); hpfs_brelse4(&qbh); kfree(nname); return 1; } memcpy(nd, d, le32_to_cpu(d->first_free)); copy_de(de = hpfs_add_de(i->i_sb, nd, name, namelen, down_ptr), new_de); for_all_poss(i, hpfs_pos_ins, get_pos(nd, de), 1); h = ((char *)dnode_last_de(nd) - (char *)nd) / 2 + 10; if (!(ad = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &adno, &qbh1))) { hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); hpfs_brelse4(&qbh); kfree(nd); kfree(nname); return 1; } i->i_size += 2048; i->i_blocks += 4; pos = 1; for (de = dnode_first_de(nd); (char *)de_next_de(de) - (char *)nd < h; de = de_next_de(de)) { copy_de(hpfs_add_de(i->i_sb, ad, de->name, de->namelen, de->down ? de_down_pointer(de) : 0), de); for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, ((loff_t)adno << 4) | pos); pos++; } copy_de(new_de = &nde, de); memcpy(nname, de->name, de->namelen); name = nname; namelen = de->namelen; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, 4); down_ptr = adno; set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0); de = de_next_de(de); memmove((char *)nd + 20, de, le32_to_cpu(nd->first_free) + (char *)nd - (char *)de); le32_add_cpu(&nd->first_free, -((char *)de - (char *)nd - 20)); memcpy(d, nd, le32_to_cpu(nd->first_free)); for_all_poss(i, hpfs_pos_del, (loff_t)dno << 4, pos); fix_up_ptrs(i->i_sb, ad); if (!d->root_dnode) { ad->up = d->up; dno = le32_to_cpu(ad->up); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); goto go_up; } if (!(rd = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &rdno, &qbh2))) { hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); hpfs_brelse4(&qbh); hpfs_brelse4(&qbh1); kfree(nd); kfree(nname); return 1; } i->i_size += 2048; i->i_blocks += 4; rd->root_dnode = 1; rd->up = d->up; if (!(fnode = hpfs_map_fnode(i->i_sb, le32_to_cpu(d->up), &bh))) { hpfs_free_dnode(i->i_sb, rdno); hpfs_brelse4(&qbh); hpfs_brelse4(&qbh1); hpfs_brelse4(&qbh2); kfree(nd); kfree(nname); return 1; } fnode->u.external[0].disk_secno = cpu_to_le32(rdno); mark_buffer_dirty(bh); brelse(bh); hpfs_i(i)->i_dno = rdno; d->up = ad->up = cpu_to_le32(rdno); d->root_dnode = ad->root_dnode = 0; hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); qbh = qbh2; set_last_pointer(i->i_sb, rd, dno); dno = rdno; d = rd; goto go_up_a; } /* * Add an entry to directory btree. * I hate such crazy directory structure. * It's easy to read but terrible to write. * I wrote this directory code 4 times. * I hope, now it's finally bug-free. */ int hpfs_add_dirent(struct inode *i, const unsigned char *name, unsigned namelen, struct hpfs_dirent *new_de) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct dnode *d; struct hpfs_dirent *de, *de_end; struct quad_buffer_head qbh; dnode_secno dno; int c; int c1, c2 = 0; dno = hpfs_inode->i_dno; down: if (hpfs_sb(i->i_sb)->sb_chk) if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "hpfs_add_dirent")) return 1; if (!(d = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 1; de_end = dnode_end_de(d); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { if (!(c = hpfs_compare_names(i->i_sb, name, namelen, de->name, de->namelen, de->last))) { hpfs_brelse4(&qbh); return -1; } if (c < 0) { if (de->down) { dno = de_down_pointer(de); hpfs_brelse4(&qbh); goto down; } break; } } hpfs_brelse4(&qbh); if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_ADD)) { c = 1; goto ret; } i->i_version++; c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0); ret: return c; } /* * Find dirent with higher name in 'from' subtree and move it to 'to' dnode. * Return the dnode we moved from (to be checked later if it's empty) */ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to) { dnode_secno dno, ddno; dnode_secno chk_up = to; struct dnode *dnode; struct quad_buffer_head qbh; struct hpfs_dirent *de, *nde; int a; loff_t t; int c1, c2 = 0; dno = from; while (1) { if (hpfs_sb(i->i_sb)->sb_chk) if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "move_to_top")) return 0; if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 0; if (hpfs_sb(i->i_sb)->sb_chk) { if (le32_to_cpu(dnode->up) != chk_up) { hpfs_error(i->i_sb, "move_to_top: up pointer from %08x should be %08x, is %08x", dno, chk_up, le32_to_cpu(dnode->up)); hpfs_brelse4(&qbh); return 0; } chk_up = dno; } if (!(de = dnode_last_de(dnode))) { hpfs_error(i->i_sb, "move_to_top: dnode %08x has no last de", dno); hpfs_brelse4(&qbh); return 0; } if (!de->down) break; dno = de_down_pointer(de); hpfs_brelse4(&qbh); } while (!(de = dnode_pre_last_de(dnode))) { dnode_secno up = le32_to_cpu(dnode->up); hpfs_brelse4(&qbh); hpfs_free_dnode(i->i_sb, dno); i->i_size -= 2048; i->i_blocks -= 4; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, 5); if (up == to) return to; if (!(dnode = hpfs_map_dnode(i->i_sb, up, &qbh))) return 0; if (dnode->root_dnode) { hpfs_error(i->i_sb, "move_to_top: got to root_dnode while moving from %08x to %08x", from, to); hpfs_brelse4(&qbh); return 0; } de = dnode_last_de(dnode); if (!de || !de->down) { hpfs_error(i->i_sb, "move_to_top: dnode %08x doesn't point down to %08x", up, dno); hpfs_brelse4(&qbh); return 0; } le32_add_cpu(&dnode->first_free, -4); le16_add_cpu(&de->length, -4); de->down = 0; hpfs_mark_4buffers_dirty(&qbh); dno = up; } t = get_pos(dnode, de); for_all_poss(i, hpfs_pos_subst, t, 4); for_all_poss(i, hpfs_pos_subst, t + 1, 5); if (!(nde = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { hpfs_error(i->i_sb, "out of memory for dirent - directory will be corrupted"); hpfs_brelse4(&qbh); return 0; } memcpy(nde, de, le16_to_cpu(de->length)); ddno = de->down ? de_down_pointer(de) : 0; hpfs_delete_de(i->i_sb, dnode, de); set_last_pointer(i->i_sb, dnode, ddno); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); a = hpfs_add_to_dnode(i, to, nde->name, nde->namelen, nde, from); kfree(nde); if (a) return 0; return dno; } /* * Check if a dnode is empty and delete it from the tree * (chkdsk doesn't like empty dnodes) */ static void delete_empty_dnode(struct inode *i, dnode_secno dno) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct quad_buffer_head qbh; struct dnode *dnode; dnode_secno down, up, ndown; int p; struct hpfs_dirent *de; int c1, c2 = 0; try_it_again: if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "delete_empty_dnode")) return; if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return; if (le32_to_cpu(dnode->first_free) > 56) goto end; if (le32_to_cpu(dnode->first_free) == 52 || le32_to_cpu(dnode->first_free) == 56) { struct hpfs_dirent *de_end; int root = dnode->root_dnode; up = le32_to_cpu(dnode->up); de = dnode_first_de(dnode); down = de->down ? de_down_pointer(de) : 0; if (hpfs_sb(i->i_sb)->sb_chk) if (root && !down) { hpfs_error(i->i_sb, "delete_empty_dnode: root dnode %08x is empty", dno); goto end; } hpfs_brelse4(&qbh); hpfs_free_dnode(i->i_sb, dno); i->i_size -= 2048; i->i_blocks -= 4; if (root) { struct fnode *fnode; struct buffer_head *bh; struct dnode *d1; struct quad_buffer_head qbh1; if (hpfs_sb(i->i_sb)->sb_chk) if (up != i->i_ino) { hpfs_error(i->i_sb, "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08lx", dno, up, (unsigned long)i->i_ino); return; } if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { d1->up = cpu_to_le32(up); d1->root_dnode = 1; hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } if ((fnode = hpfs_map_fnode(i->i_sb, up, &bh))) { fnode->u.external[0].disk_secno = cpu_to_le32(down); mark_buffer_dirty(bh); brelse(bh); } hpfs_inode->i_dno = down; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, (loff_t) 12); return; } if (!(dnode = hpfs_map_dnode(i->i_sb, up, &qbh))) return; p = 1; de_end = dnode_end_de(dnode); for (de = dnode_first_de(dnode); de < de_end; de = de_next_de(de), p++) if (de->down) if (de_down_pointer(de) == dno) goto fnd; hpfs_error(i->i_sb, "delete_empty_dnode: pointer to dnode %08x not found in dnode %08x", dno, up); goto end; fnd: for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, ((loff_t)up << 4) | p); if (!down) { de->down = 0; le16_add_cpu(&de->length, -4); le32_add_cpu(&dnode->first_free, -4); memmove(de_next_de(de), (char *)de_next_de(de) + 4, (char *)dnode + le32_to_cpu(dnode->first_free) - (char *)de_next_de(de)); } else { struct dnode *d1; struct quad_buffer_head qbh1; *(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4) = down; if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { d1->up = cpu_to_le32(up); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } } } else { hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, le32_to_cpu(dnode->first_free)); goto end; } if (!de->last) { struct hpfs_dirent *de_next = de_next_de(de); struct hpfs_dirent *de_cp; struct dnode *d1; struct quad_buffer_head qbh1; if (!de_next->down) goto endm; ndown = de_down_pointer(de_next); if (!(de_cp = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { printk("HPFS: out of memory for dtree balancing\n"); goto endm; } memcpy(de_cp, de, le16_to_cpu(de->length)); hpfs_delete_de(i->i_sb, dnode, de); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, 4); for_all_poss(i, hpfs_pos_del, ((loff_t)up << 4) | p, 1); if (de_cp->down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de_cp), &qbh1))) { d1->up = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } hpfs_add_to_dnode(i, ndown, de_cp->name, de_cp->namelen, de_cp, de_cp->down ? de_down_pointer(de_cp) : 0); /*printk("UP-TO-DNODE: %08x (ndown = %08x, down = %08x, dno = %08x)\n", up, ndown, down, dno);*/ dno = up; kfree(de_cp); goto try_it_again; } else { struct hpfs_dirent *de_prev = dnode_pre_last_de(dnode); struct hpfs_dirent *de_cp; struct dnode *d1; struct quad_buffer_head qbh1; dnode_secno dlp; if (!de_prev) { hpfs_error(i->i_sb, "delete_empty_dnode: empty dnode %08x", up); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); dno = up; goto try_it_again; } if (!de_prev->down) goto endm; ndown = de_down_pointer(de_prev); if ((d1 = hpfs_map_dnode(i->i_sb, ndown, &qbh1))) { struct hpfs_dirent *del = dnode_last_de(d1); dlp = del->down ? de_down_pointer(del) : 0; if (!dlp && down) { if (le32_to_cpu(d1->first_free) > 2044) { if (hpfs_sb(i->i_sb)->sb_chk >= 2) { printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); printk("HPFS: warning: terminating balancing operation\n"); } hpfs_brelse4(&qbh1); goto endm; } if (hpfs_sb(i->i_sb)->sb_chk >= 2) { printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); printk("HPFS: warning: goin'on\n"); } le16_add_cpu(&del->length, 4); del->down = 1; le32_add_cpu(&d1->first_free, 4); } if (dlp && !down) { le16_add_cpu(&del->length, -4); del->down = 0; le32_add_cpu(&d1->first_free, -4); } else if (down) *(__le32 *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down); } else goto endm; if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) { printk("HPFS: out of memory for dtree balancing\n"); hpfs_brelse4(&qbh1); goto endm; } hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); memcpy(de_cp, de_prev, le16_to_cpu(de_prev->length)); hpfs_delete_de(i->i_sb, dnode, de_prev); if (!de_prev->down) { le16_add_cpu(&de_prev->length, 4); de_prev->down = 1; le32_add_cpu(&dnode->first_free, 4); } *(__le32 *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, ((loff_t)up << 4) | (p - 1)); if (down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de), &qbh1))) { d1->up = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } hpfs_add_to_dnode(i, ndown, de_cp->name, de_cp->namelen, de_cp, dlp); dno = up; kfree(de_cp); goto try_it_again; } endm: hpfs_mark_4buffers_dirty(&qbh); end: hpfs_brelse4(&qbh); } /* Delete dirent from directory */ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de, struct quad_buffer_head *qbh, int depth) { struct dnode *dnode = qbh->data; dnode_secno down = 0; loff_t t; if (de->first || de->last) { hpfs_error(i->i_sb, "hpfs_remove_dirent: attempt to delete first or last dirent in dnode %08x", dno); hpfs_brelse4(qbh); return 1; } if (de->down) down = de_down_pointer(de); if (depth && (de->down || (de == dnode_first_de(dnode) && de_next_de(de)->last))) { if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_DEL)) { hpfs_brelse4(qbh); return 2; } } i->i_version++; for_all_poss(i, hpfs_pos_del, (t = get_pos(dnode, de)) + 1, 1); hpfs_delete_de(i->i_sb, dnode, de); hpfs_mark_4buffers_dirty(qbh); hpfs_brelse4(qbh); if (down) { dnode_secno a = move_to_top(i, down, dno); for_all_poss(i, hpfs_pos_subst, 5, t); if (a) delete_empty_dnode(i, a); return !a; } delete_empty_dnode(i, dno); return 0; } void hpfs_count_dnodes(struct super_block *s, dnode_secno dno, int *n_dnodes, int *n_subdirs, int *n_items) { struct dnode *dnode; struct quad_buffer_head qbh; struct hpfs_dirent *de; dnode_secno ptr, odno = 0; int c1, c2 = 0; int d1, d2 = 0; go_down: if (n_dnodes) (*n_dnodes)++; if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, dno, &c1, &c2, "hpfs_count_dnodes #1")) return; ptr = 0; go_up: if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return; if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && le32_to_cpu(dnode->up) != odno) hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, le32_to_cpu(dnode->up)); de = dnode_first_de(dnode); if (ptr) while(1) { if (de->down) if (de_down_pointer(de) == ptr) goto process_de; if (de->last) { hpfs_brelse4(&qbh); hpfs_error(s, "hpfs_count_dnodes: pointer to dnode %08x not found in dnode %08x, got here from %08x", ptr, dno, odno); return; } de = de_next_de(de); } next_de: if (de->down) { odno = dno; dno = de_down_pointer(de); hpfs_brelse4(&qbh); goto go_down; } process_de: if (!de->first && !de->last && de->directory && n_subdirs) (*n_subdirs)++; if (!de->first && !de->last && n_items) (*n_items)++; if ((de = de_next_de(de)) < dnode_end_de(dnode)) goto next_de; ptr = dno; dno = le32_to_cpu(dnode->up); if (dnode->root_dnode) { hpfs_brelse4(&qbh); return; } hpfs_brelse4(&qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, ptr, &d1, &d2, "hpfs_count_dnodes #2")) return; odno = -1; goto go_up; } static struct hpfs_dirent *map_nth_dirent(struct super_block *s, dnode_secno dno, int n, struct quad_buffer_head *qbh, struct dnode **dn) { int i; struct hpfs_dirent *de, *de_end; struct dnode *dnode; dnode = hpfs_map_dnode(s, dno, qbh); if (!dnode) return NULL; if (dn) *dn=dnode; de = dnode_first_de(dnode); de_end = dnode_end_de(dnode); for (i = 1; de < de_end; i++, de = de_next_de(de)) { if (i == n) { return de; } if (de->last) break; } hpfs_brelse4(qbh); hpfs_error(s, "map_nth_dirent: n too high; dnode = %08x, requested %08x", dno, n); return NULL; } dnode_secno hpfs_de_as_down_as_possible(struct super_block *s, dnode_secno dno) { struct quad_buffer_head qbh; dnode_secno d = dno; dnode_secno up = 0; struct hpfs_dirent *de; int c1, c2 = 0; again: if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, d, &c1, &c2, "hpfs_de_as_down_as_possible")) return d; if (!(de = map_nth_dirent(s, d, 1, &qbh, NULL))) return dno; if (hpfs_sb(s)->sb_chk) if (up && le32_to_cpu(((struct dnode *)qbh.data)->up) != up) hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, le32_to_cpu(((struct dnode *)qbh.data)->up)); if (!de->down) { hpfs_brelse4(&qbh); return d; } up = d; d = de_down_pointer(de); hpfs_brelse4(&qbh); goto again; } struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp, struct quad_buffer_head *qbh) { loff_t pos; unsigned c; dnode_secno dno; struct hpfs_dirent *de, *d; struct hpfs_dirent *up_de; struct hpfs_dirent *end_up_de; struct dnode *dnode; struct dnode *up_dnode; struct quad_buffer_head qbh0; pos = *posp; dno = pos >> 6 << 2; pos &= 077; if (!(de = map_nth_dirent(inode->i_sb, dno, pos, qbh, &dnode))) goto bail; /* Going to the next dirent */ if ((d = de_next_de(de)) < dnode_end_de(dnode)) { if (!(++*posp & 077)) { hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; pos = %08llx", (unsigned long long)*posp); goto bail; } /* We're going down the tree */ if (d->down) { *posp = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, de_down_pointer(d)) << 4) + 1; } return de; } /* Going up */ if (dnode->root_dnode) goto bail; if (!(up_dnode = hpfs_map_dnode(inode->i_sb, le32_to_cpu(dnode->up), &qbh0))) goto bail; end_up_de = dnode_end_de(up_dnode); c = 0; for (up_de = dnode_first_de(up_dnode); up_de < end_up_de; up_de = de_next_de(up_de)) { if (!(++c & 077)) hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; dnode = %08x", le32_to_cpu(dnode->up)); if (up_de->down && de_down_pointer(up_de) == dno) { *posp = ((loff_t) le32_to_cpu(dnode->up) << 4) + c; hpfs_brelse4(&qbh0); return de; } } hpfs_error(inode->i_sb, "map_pos_dirent: pointer to dnode %08x not found in parent dnode %08x", dno, le32_to_cpu(dnode->up)); hpfs_brelse4(&qbh0); bail: *posp = 12; return de; } /* Find a dirent in tree */ struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno, const unsigned char *name, unsigned len, dnode_secno *dd, struct quad_buffer_head *qbh) { struct dnode *dnode; struct hpfs_dirent *de; struct hpfs_dirent *de_end; int c1, c2 = 0; if (!S_ISDIR(inode->i_mode)) hpfs_error(inode->i_sb, "map_dirent: not a directory\n"); again: if (hpfs_sb(inode->i_sb)->sb_chk) if (hpfs_stop_cycles(inode->i_sb, dno, &c1, &c2, "map_dirent")) return NULL; if (!(dnode = hpfs_map_dnode(inode->i_sb, dno, qbh))) return NULL; de_end = dnode_end_de(dnode); for (de = dnode_first_de(dnode); de < de_end; de = de_next_de(de)) { int t = hpfs_compare_names(inode->i_sb, name, len, de->name, de->namelen, de->last); if (!t) { if (dd) *dd = dno; return de; } if (t < 0) { if (de->down) { dno = de_down_pointer(de); hpfs_brelse4(qbh); goto again; } break; } } hpfs_brelse4(qbh); return NULL; } /* * Remove empty directory. In normal cases it is only one dnode with two * entries, but we must handle also such obscure cases when it's a tree * of empty dnodes. */ void hpfs_remove_dtree(struct super_block *s, dnode_secno dno) { struct quad_buffer_head qbh; struct dnode *dnode; struct hpfs_dirent *de; dnode_secno d1, d2, rdno = dno; while (1) { if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return; de = dnode_first_de(dnode); if (de->last) { if (de->down) d1 = de_down_pointer(de); else goto error; hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); dno = d1; } else break; } if (!de->first) goto error; d1 = de->down ? de_down_pointer(de) : 0; de = de_next_de(de); if (!de->last) goto error; d2 = de->down ? de_down_pointer(de) : 0; hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); do { while (d1) { if (!(dnode = hpfs_map_dnode(s, dno = d1, &qbh))) return; de = dnode_first_de(dnode); if (!de->last) goto error; d1 = de->down ? de_down_pointer(de) : 0; hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); } d1 = d2; d2 = 0; } while (d1); return; error: hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); hpfs_error(s, "directory %08x is corrupted or not empty", rdno); } /* * Find dirent for specified fnode. Use truncated 15-char name in fnode as * a help for searching. */ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, struct fnode *f, struct quad_buffer_head *qbh) { unsigned char *name1; unsigned char *name2; int name1len, name2len; struct dnode *d; dnode_secno dno, downd; struct fnode *upf; struct buffer_head *bh; struct hpfs_dirent *de, *de_end; int c; int c1, c2 = 0; int d1, d2 = 0; name1 = f->name; if (!(name2 = kmalloc(256, GFP_NOFS))) { printk("HPFS: out of memory, can't map dirent\n"); return NULL; } if (f->len <= 15) memcpy(name2, name1, name1len = name2len = f->len); else { memcpy(name2, name1, 15); memset(name2 + 15, 0xff, 256 - 15); /*name2[15] = 0xff;*/ name1len = 15; name2len = 256; } if (!(upf = hpfs_map_fnode(s, le32_to_cpu(f->up), &bh))) { kfree(name2); return NULL; } if (!fnode_is_dir(upf)) { brelse(bh); hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up)); kfree(name2); return NULL; } dno = le32_to_cpu(upf->u.external[0].disk_secno); brelse(bh); go_down: downd = 0; go_up: if (!(d = hpfs_map_dnode(s, dno, qbh))) { kfree(name2); return NULL; } de_end = dnode_end_de(d); de = dnode_first_de(d); if (downd) { while (de < de_end) { if (de->down) if (de_down_pointer(de) == downd) goto f; de = de_next_de(de); } hpfs_error(s, "pointer to dnode %08x not found in dnode %08x", downd, dno); hpfs_brelse4(qbh); kfree(name2); return NULL; } next_de: if (le32_to_cpu(de->fnode) == fno) { kfree(name2); return de; } c = hpfs_compare_names(s, name1, name1len, de->name, de->namelen, de->last); if (c < 0 && de->down) { dno = de_down_pointer(de); hpfs_brelse4(qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, dno, &c1, &c2, "map_fnode_dirent #1")) { kfree(name2); return NULL; } goto go_down; } f: if (le32_to_cpu(de->fnode) == fno) { kfree(name2); return de; } c = hpfs_compare_names(s, name2, name2len, de->name, de->namelen, de->last); if (c < 0 && !de->last) goto not_found; if ((de = de_next_de(de)) < de_end) goto next_de; if (d->root_dnode) goto not_found; downd = dno; dno = le32_to_cpu(d->up); hpfs_brelse4(qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, downd, &d1, &d2, "map_fnode_dirent #2")) { kfree(name2); return NULL; } goto go_up; not_found: hpfs_brelse4(qbh); hpfs_error(s, "dirent for fnode %08x not found", fno); kfree(name2); return NULL; }
wusijie/android_kernel_oneplus_msm8994
fs/hpfs/dnode.c
C
gpl-2.0
31,194
/* * YAFFS: Yet another FFS. A NAND-flash specific file system. * * Copyright (C) 2002-2010 Aleph One Ltd. * for Toby Churchill Ltd and Brightstar Engineering * * Created by Charles Manning <charles@aleph1.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* * This module provides the interface between yaffs_nand.c and the * MTD API. This version is used when the MTD interface supports the * 'mtd_oob_ops' style calls to read_oob and write_oob, circa 2.6.17, * and we have small-page NAND device. * * These functions are invoked via function pointers in yaffs_nand.c. * This replaces functionality provided by functions in yaffs_mtdif.c * and the yaffs_tags compatability functions in yaffs_tagscompat.c that are * called in yaffs_mtdif.c when the function pointers are NULL. * We assume the MTD layer is performing ECC (use_nand_ecc is true). */ #include "yportenv.h" #include "yaffs_trace.h" #include "yaffs_guts.h" #include "yaffs_packedtags1.h" #include "yaffs_tagscompat.h" /* for yaffs_calc_tags_ecc */ #include "yaffs_linux.h" #include "linux/kernel.h" #include "linux/version.h" #include "linux/types.h" #include "linux/mtd/mtd.h" #ifndef CONFIG_YAFFS_9BYTE_TAGS # define YTAG1_SIZE 8 #else # define YTAG1_SIZE 9 #endif /* Write a chunk (page) of data to NAND. * * Caller always provides ExtendedTags data which are converted to a more * compact (packed) form for storage in NAND. A mini-ECC runs over the * contents of the tags meta-data; used to valid the tags when read. * * - Pack ExtendedTags to packed_tags1 form * - Compute mini-ECC for packed_tags1 * - Write data and packed tags to NAND. * * Note: Due to the use of the packed_tags1 meta-data which does not include * a full sequence number (as found in the larger packed_tags2 form) it is * necessary for Yaffs to re-write a chunk/page (just once) to mark it as * discarded and dirty. This is not ideal: newer NAND parts are supposed * to be written just once. When Yaffs performs this operation, this * function is called with a NULL data pointer -- calling MTD write_oob * without data is valid usage (2.6.17). * * Any underlying MTD error results in YAFFS_FAIL. * Returns YAFFS_OK or YAFFS_FAIL. */ int nandmtd1_write_chunk_tags(struct yaffs_dev *dev, int nand_chunk, const u8 * data, const struct yaffs_ext_tags *etags) { struct mtd_info *mtd = yaffs_dev_to_mtd(dev); int chunk_bytes = dev->data_bytes_per_chunk; loff_t addr = ((loff_t) nand_chunk) * chunk_bytes; struct mtd_oob_ops ops; struct yaffs_packed_tags1 pt1; int retval; /* we assume that packed_tags1 and struct yaffs_tags are compatible */ compile_time_assertion(sizeof(struct yaffs_packed_tags1) == 12); compile_time_assertion(sizeof(struct yaffs_tags) == 8); yaffs_pack_tags1(&pt1, etags); yaffs_calc_tags_ecc((struct yaffs_tags *)&pt1); /* When deleting a chunk, the upper layer provides only skeletal * etags, one with is_deleted set. However, we need to update the * tags, not erase them completely. So we use the NAND write property * that only zeroed-bits stick and set tag bytes to all-ones and * zero just the (not) deleted bit. */ #ifndef CONFIG_YAFFS_9BYTE_TAGS if (etags->is_deleted) { memset(&pt1, 0xff, 8); /* clear delete status bit to indicate deleted */ pt1.deleted = 0; } #else ((u8 *) & pt1)[8] = 0xff; if (etags->is_deleted) { memset(&pt1, 0xff, 8); /* zero page_status byte to indicate deleted */ ((u8 *) & pt1)[8] = 0; } #endif memset(&ops, 0, sizeof(ops)); ops.mode = MTD_OOB_AUTO; ops.len = (data) ? chunk_bytes : 0; ops.ooblen = YTAG1_SIZE; ops.datbuf = (u8 *) data; ops.oobbuf = (u8 *) & pt1; retval = mtd->write_oob(mtd, addr, &ops); if (retval) { yaffs_trace(YAFFS_TRACE_MTD, "write_oob failed, chunk %d, mtd error %d", nand_chunk, retval); } return retval ? YAFFS_FAIL : YAFFS_OK; } /* Return with empty ExtendedTags but add ecc_result. */ static int rettags(struct yaffs_ext_tags *etags, int ecc_result, int retval) { if (etags) { memset(etags, 0, sizeof(*etags)); etags->ecc_result = ecc_result; } return retval; } /* Read a chunk (page) from NAND. * * Caller expects ExtendedTags data to be usable even on error; that is, * all members except ecc_result and block_bad are zeroed. * * - Check ECC results for data (if applicable) * - Check for blank/erased block (return empty ExtendedTags if blank) * - Check the packed_tags1 mini-ECC (correct if necessary/possible) * - Convert packed_tags1 to ExtendedTags * - Update ecc_result and block_bad members to refect state. * * Returns YAFFS_OK or YAFFS_FAIL. */ int nandmtd1_read_chunk_tags(struct yaffs_dev *dev, int nand_chunk, u8 * data, struct yaffs_ext_tags *etags) { struct mtd_info *mtd = yaffs_dev_to_mtd(dev); int chunk_bytes = dev->data_bytes_per_chunk; loff_t addr = ((loff_t) nand_chunk) * chunk_bytes; int eccres = YAFFS_ECC_RESULT_NO_ERROR; struct mtd_oob_ops ops; struct yaffs_packed_tags1 pt1; int retval; int deleted; memset(&ops, 0, sizeof(ops)); ops.mode = MTD_OOB_AUTO; ops.len = (data) ? chunk_bytes : 0; ops.ooblen = YTAG1_SIZE; ops.datbuf = data; ops.oobbuf = (u8 *) & pt1; /* Read page and oob using MTD. * Check status and determine ECC result. */ retval = mtd->read_oob(mtd, addr, &ops); if (retval) { yaffs_trace(YAFFS_TRACE_MTD, "read_oob failed, chunk %d, mtd error %d", nand_chunk, retval); } switch (retval) { case 0: /* no error */ break; case -EUCLEAN: /* MTD's ECC fixed the data */ eccres = YAFFS_ECC_RESULT_FIXED; dev->n_ecc_fixed++; break; case -EBADMSG: /* MTD's ECC could not fix the data */ dev->n_ecc_unfixed++; /* fall into... */ default: rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0); etags->block_bad = (mtd->block_isbad) (mtd, addr); return YAFFS_FAIL; } /* Check for a blank/erased chunk. */ if (yaffs_check_ff((u8 *) & pt1, 8)) { /* when blank, upper layers want ecc_result to be <= NO_ERROR */ return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK); } #ifndef CONFIG_YAFFS_9BYTE_TAGS /* Read deleted status (bit) then return it to it's non-deleted * state before performing tags mini-ECC check. pt1.deleted is * inverted. */ deleted = !pt1.deleted; pt1.deleted = 1; #else deleted = (yaffs_count_bits(((u8 *) & pt1)[8]) < 7); #endif /* Check the packed tags mini-ECC and correct if necessary/possible. */ retval = yaffs_check_tags_ecc((struct yaffs_tags *)&pt1); switch (retval) { case 0: /* no tags error, use MTD result */ break; case 1: /* recovered tags-ECC error */ dev->n_tags_ecc_fixed++; if (eccres == YAFFS_ECC_RESULT_NO_ERROR) eccres = YAFFS_ECC_RESULT_FIXED; break; default: /* unrecovered tags-ECC error */ dev->n_tags_ecc_unfixed++; return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL); } /* Unpack the tags to extended form and set ECC result. * [set should_be_ff just to keep yaffs_unpack_tags1 happy] */ pt1.should_be_ff = 0xFFFFFFFF; yaffs_unpack_tags1(etags, &pt1); etags->ecc_result = eccres; /* Set deleted state */ etags->is_deleted = deleted; return YAFFS_OK; } /* Mark a block bad. * * This is a persistant state. * Use of this function should be rare. * * Returns YAFFS_OK or YAFFS_FAIL. */ int nandmtd1_mark_block_bad(struct yaffs_dev *dev, int block_no) { struct mtd_info *mtd = yaffs_dev_to_mtd(dev); int blocksize = dev->param.chunks_per_block * dev->data_bytes_per_chunk; int retval; yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", block_no); retval = mtd->block_markbad(mtd, (loff_t) blocksize * block_no); return (retval) ? YAFFS_FAIL : YAFFS_OK; } /* Check any MTD prerequists. * * Returns YAFFS_OK or YAFFS_FAIL. */ static int nandmtd1_test_prerequists(struct mtd_info *mtd) { /* 2.6.18 has mtd->ecclayout->oobavail */ /* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */ int oobavail = mtd->ecclayout->oobavail; if (oobavail < YTAG1_SIZE) { yaffs_trace(YAFFS_TRACE_ERROR, "mtd device has only %d bytes for tags, need %d", oobavail, YTAG1_SIZE); return YAFFS_FAIL; } return YAFFS_OK; } /* Query for the current state of a specific block. * * Examine the tags of the first chunk of the block and return the state: * - YAFFS_BLOCK_STATE_DEAD, the block is marked bad * - YAFFS_BLOCK_STATE_NEEDS_SCANNING, the block is in use * - YAFFS_BLOCK_STATE_EMPTY, the block is clean * * Always returns YAFFS_OK. */ int nandmtd1_query_block(struct yaffs_dev *dev, int block_no, enum yaffs_block_state *state_ptr, u32 * seq_ptr) { struct mtd_info *mtd = yaffs_dev_to_mtd(dev); int chunk_num = block_no * dev->param.chunks_per_block; loff_t addr = (loff_t) chunk_num * dev->data_bytes_per_chunk; struct yaffs_ext_tags etags; int state = YAFFS_BLOCK_STATE_DEAD; int seqnum = 0; int retval; /* We don't yet have a good place to test for MTD config prerequists. * Do it here as we are called during the initial scan. */ if (nandmtd1_test_prerequists(mtd) != YAFFS_OK) return YAFFS_FAIL; retval = nandmtd1_read_chunk_tags(dev, chunk_num, NULL, &etags); etags.block_bad = (mtd->block_isbad) (mtd, addr); if (etags.block_bad) { yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "block %d is marked bad", block_no); state = YAFFS_BLOCK_STATE_DEAD; } else if (etags.ecc_result != YAFFS_ECC_RESULT_NO_ERROR) { /* bad tags, need to look more closely */ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING; } else if (etags.chunk_used) { state = YAFFS_BLOCK_STATE_NEEDS_SCANNING; seqnum = etags.seq_number; } else { state = YAFFS_BLOCK_STATE_EMPTY; } *state_ptr = state; *seq_ptr = seqnum; /* query always succeeds */ return YAFFS_OK; }
TesterTerbon/kernel_trebon
fs/yaffs2/yaffs_mtdif1.c
C
gpl-2.0
9,866
/* * Fujitsu B-series Lifebook PS/2 TouchScreen driver * * Copyright (c) 2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Kenan Esau <kenan.esau@conan.de> * * TouchScreen detection, absolute mode setting and packet layout is taken from * Harald Hoyer's description of the device. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/input.h> #include <linux/serio.h> #include <linux/libps2.h> #include <linux/dmi.h> #include <linux/slab.h> #include "psmouse.h" #include "lifebook.h" struct lifebook_data { struct input_dev *dev2; /* Relative device */ char phys[32]; }; static bool lifebook_present; static const char *desired_serio_phys; static int lifebook_limit_serio3(const struct dmi_system_id *d) { desired_serio_phys = "isa0060/serio3"; return 0; } static bool lifebook_use_6byte_proto; static int lifebook_set_6byte_proto(const struct dmi_system_id *d) { lifebook_use_6byte_proto = true; return 0; } static const struct dmi_system_id __initconst lifebook_dmi_table[] = { { /* FLORA-ie 55mi */ .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "FLORA-ie 55mi"), }, }, { /* LifeBook B */ .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "Lifebook B Series"), }, }, { /* LifeBook B */ .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B Series"), }, }, { /* Lifebook B */ .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK B Series"), }, }, { /* Lifebook B-2130 */ .matches = { DMI_MATCH(DMI_BOARD_NAME, "ZEPHYR"), }, }, { /* Lifebook B213x/B2150 */ .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B2131/B2133/B2150"), }, }, { /* Zephyr */ .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "ZEPHYR"), }, }, { /* Panasonic CF-18 */ .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "CF-18"), }, .callback = lifebook_limit_serio3, }, { /* Panasonic CF-28 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"), DMI_MATCH(DMI_PRODUCT_NAME, "CF-28"), }, .callback = lifebook_set_6byte_proto, }, { /* Panasonic CF-29 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"), DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"), }, .callback = lifebook_set_6byte_proto, }, { /* Panasonic CF-72 */ .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "CF-72"), }, .callback = lifebook_set_6byte_proto, }, { /* Lifebook B142 */ .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B142"), }, }, { } }; void __init lifebook_module_init(void) { lifebook_present = dmi_check_system(lifebook_dmi_table); } static psmouse_ret_t lifebook_process_byte(struct psmouse *psmouse) { struct lifebook_data *priv = psmouse->private; struct input_dev *dev1 = psmouse->dev; struct input_dev *dev2 = priv ? priv->dev2 : NULL; unsigned char *packet = psmouse->packet; bool relative_packet = packet[0] & 0x08; if (relative_packet || !lifebook_use_6byte_proto) { if (psmouse->pktcnt != 3) return PSMOUSE_GOOD_DATA; } else { switch (psmouse->pktcnt) { case 1: return (packet[0] & 0xf8) == 0x00 ? PSMOUSE_GOOD_DATA : PSMOUSE_BAD_DATA; case 2: return PSMOUSE_GOOD_DATA; case 3: return ((packet[2] & 0x30) << 2) == (packet[2] & 0xc0) ? PSMOUSE_GOOD_DATA : PSMOUSE_BAD_DATA; case 4: return (packet[3] & 0xf8) == 0xc0 ? PSMOUSE_GOOD_DATA : PSMOUSE_BAD_DATA; case 5: return (packet[4] & 0xc0) == (packet[2] & 0xc0) ? PSMOUSE_GOOD_DATA : PSMOUSE_BAD_DATA; case 6: if (((packet[5] & 0x30) << 2) != (packet[5] & 0xc0)) return PSMOUSE_BAD_DATA; if ((packet[5] & 0xc0) != (packet[1] & 0xc0)) return PSMOUSE_BAD_DATA; break; /* report data */ } } if (relative_packet) { if (!dev2) printk(KERN_WARNING "lifebook.c: got relative packet " "but no relative device set up\n"); } else { if (lifebook_use_6byte_proto) { input_report_abs(dev1, ABS_X, ((packet[1] & 0x3f) << 6) | (packet[2] & 0x3f)); input_report_abs(dev1, ABS_Y, 4096 - (((packet[4] & 0x3f) << 6) | (packet[5] & 0x3f))); } else { input_report_abs(dev1, ABS_X, (packet[1] | ((packet[0] & 0x30) << 4))); input_report_abs(dev1, ABS_Y, 1024 - (packet[2] | ((packet[0] & 0xC0) << 2))); } input_report_key(dev1, BTN_TOUCH, packet[0] & 0x04); input_sync(dev1); } if (dev2) { if (relative_packet) { input_report_rel(dev2, REL_X, ((packet[0] & 0x10) ? packet[1] - 256 : packet[1])); input_report_rel(dev2, REL_Y, -(int)((packet[0] & 0x20) ? packet[2] - 256 : packet[2])); } input_report_key(dev2, BTN_LEFT, packet[0] & 0x01); input_report_key(dev2, BTN_RIGHT, packet[0] & 0x02); input_sync(dev2); } return PSMOUSE_FULL_PACKET; } static int lifebook_absolute_mode(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param; if (psmouse_reset(psmouse)) return -1; /* * Enable absolute output -- ps2_command fails always but if * you leave this call out the touchsreen will never send * absolute coordinates */ param = lifebook_use_6byte_proto ? 0x08 : 0x07; ps2_command(ps2dev, &param, PSMOUSE_CMD_SETRES); return 0; } static void lifebook_relative_mode(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param = 0x06; ps2_command(ps2dev, &param, PSMOUSE_CMD_SETRES); } static void lifebook_set_resolution(struct psmouse *psmouse, unsigned int resolution) { static const unsigned char params[] = { 0, 1, 2, 2, 3 }; unsigned char p; if (resolution == 0 || resolution > 400) resolution = 400; p = params[resolution / 100]; ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES); psmouse->resolution = 50 << p; } static void lifebook_disconnect(struct psmouse *psmouse) { struct lifebook_data *priv = psmouse->private; psmouse_reset(psmouse); if (priv) { input_unregister_device(priv->dev2); kfree(priv); } psmouse->private = NULL; } int lifebook_detect(struct psmouse *psmouse, bool set_properties) { if (!lifebook_present) return -1; if (desired_serio_phys && strcmp(psmouse->ps2dev.serio->phys, desired_serio_phys)) return -1; if (set_properties) { psmouse->vendor = "Fujitsu"; psmouse->name = "Lifebook TouchScreen"; } return 0; } static int lifebook_create_relative_device(struct psmouse *psmouse) { struct input_dev *dev2; struct lifebook_data *priv; int error = -ENOMEM; priv = kzalloc(sizeof(struct lifebook_data), GFP_KERNEL); dev2 = input_allocate_device(); if (!priv || !dev2) goto err_out; priv->dev2 = dev2; snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys); dev2->phys = priv->phys; dev2->name = "PS/2 Touchpad"; dev2->id.bustype = BUS_I8042; dev2->id.vendor = 0x0002; dev2->id.product = PSMOUSE_LIFEBOOK; dev2->id.version = 0x0000; dev2->dev.parent = &psmouse->ps2dev.serio->dev; dev2->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); dev2->relbit[BIT_WORD(REL_X)] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); dev2->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT); error = input_register_device(priv->dev2); if (error) goto err_out; psmouse->private = priv; return 0; err_out: input_free_device(dev2); kfree(priv); return error; } int lifebook_init(struct psmouse *psmouse) { struct input_dev *dev1 = psmouse->dev; int max_coord = lifebook_use_6byte_proto ? 4096 : 1024; if (lifebook_absolute_mode(psmouse)) return -1; dev1->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); dev1->relbit[0] = 0; dev1->keybit[BIT_WORD(BTN_MOUSE)] = 0; dev1->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(dev1, ABS_X, 0, max_coord, 0, 0); input_set_abs_params(dev1, ABS_Y, 0, max_coord, 0, 0); if (!desired_serio_phys) { if (lifebook_create_relative_device(psmouse)) { lifebook_relative_mode(psmouse); return -1; } } psmouse->protocol_handler = lifebook_process_byte; psmouse->set_resolution = lifebook_set_resolution; psmouse->disconnect = lifebook_disconnect; psmouse->reconnect = lifebook_absolute_mode; psmouse->model = lifebook_use_6byte_proto ? 6 : 3; /* * Use packet size = 3 even when using 6-byte protocol because * that's what POLL will return on Lifebooks (according to spec). */ psmouse->pktsize = 3; return 0; }
schmatzler/zte-kernel-tureis
drivers/input/mouse/lifebook.c
C
gpl-2.0
8,392
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="<?php print $language->language ?>" lang="<?php print $language->language ?>" dir="<?php print $language->dir ?>"> <head> <title><?php print $head_title; ?></title> <?php print $head; ?> <?php print $styles; ?> <?php print $scripts; ?> </head> <body class="<?php print $classes; ?>"> <?php print $page_top; ?> <div id="branding"> <?php if ($title): ?><h1 class="page-title"><?php print $title; ?></h1><?php endif; ?> </div> <div id="page"> <?php if ($sidebar_first): ?> <div id="sidebar-first" class="sidebar"> <?php if ($logo): ?> <img id="logo" src="<?php print $logo ?>" alt="<?php print $site_name ?>" /> <?php endif; ?> <?php print $sidebar_first ?> </div> <?php endif; ?> <div id="content" class="clearfix"> <?php if ($messages): ?> <div id="console"><?php print $messages; ?></div> <?php endif; ?> <?php if ($help): ?> <div id="help"> <?php print $help; ?> </div> <?php endif; ?> <?php print $content; ?> </div> </div> <?php print $page_bottom; ?> </body> </html>
stjaymz/ss
themes/seven/maintenance-page.tpl.php
PHP
gpl-2.0
1,310
/* * Generic i2c interface for ALSA * * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> * Modified for the ALSA driver by Jaroslav Kysela <perex@perex.cz> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/string.h> #include <linux/errno.h> #include <sound/core.h> #include <sound/i2c.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Generic i2c interface for ALSA"); MODULE_LICENSE("GPL"); static int snd_i2c_bit_sendbytes(struct snd_i2c_device *device, unsigned char *bytes, int count); static int snd_i2c_bit_readbytes(struct snd_i2c_device *device, unsigned char *bytes, int count); static int snd_i2c_bit_probeaddr(struct snd_i2c_bus *bus, unsigned short addr); static struct snd_i2c_ops snd_i2c_bit_ops = { .sendbytes = snd_i2c_bit_sendbytes, .readbytes = snd_i2c_bit_readbytes, .probeaddr = snd_i2c_bit_probeaddr, }; static int snd_i2c_bus_free(struct snd_i2c_bus *bus) { struct snd_i2c_bus *slave; struct snd_i2c_device *device; if (snd_BUG_ON(!bus)) return -EINVAL; while (!list_empty(&bus->devices)) { device = snd_i2c_device(bus->devices.next); snd_i2c_device_free(device); } if (bus->master) list_del(&bus->buses); else { while (!list_empty(&bus->buses)) { slave = snd_i2c_slave_bus(bus->buses.next); snd_device_free(bus->card, slave); } } if (bus->private_free) bus->private_free(bus); kfree(bus); return 0; } static int snd_i2c_bus_dev_free(struct snd_device *device) { struct snd_i2c_bus *bus = device->device_data; return snd_i2c_bus_free(bus); } int snd_i2c_bus_create(struct snd_card *card, const char *name, struct snd_i2c_bus *master, struct snd_i2c_bus **ri2c) { struct snd_i2c_bus *bus; int err; static struct snd_device_ops ops = { .dev_free = snd_i2c_bus_dev_free, }; *ri2c = NULL; bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (bus == NULL) return -ENOMEM; mutex_init(&bus->lock_mutex); INIT_LIST_HEAD(&bus->devices); INIT_LIST_HEAD(&bus->buses); bus->card = card; bus->ops = &snd_i2c_bit_ops; if (master) { list_add_tail(&bus->buses, &master->buses); bus->master = master; } strlcpy(bus->name, name, sizeof(bus->name)); err = snd_device_new(card, SNDRV_DEV_BUS, bus, &ops); if (err < 0) { snd_i2c_bus_free(bus); return err; } *ri2c = bus; return 0; } EXPORT_SYMBOL(snd_i2c_bus_create); int snd_i2c_device_create(struct snd_i2c_bus *bus, const char *name, unsigned char addr, struct snd_i2c_device **rdevice) { struct snd_i2c_device *device; *rdevice = NULL; if (snd_BUG_ON(!bus)) return -EINVAL; device = kzalloc(sizeof(*device), GFP_KERNEL); if (device == NULL) return -ENOMEM; device->addr = addr; strlcpy(device->name, name, sizeof(device->name)); list_add_tail(&device->list, &bus->devices); device->bus = bus; *rdevice = device; return 0; } EXPORT_SYMBOL(snd_i2c_device_create); int snd_i2c_device_free(struct snd_i2c_device *device) { if (device->bus) list_del(&device->list); if (device->private_free) device->private_free(device); kfree(device); return 0; } EXPORT_SYMBOL(snd_i2c_device_free); int snd_i2c_sendbytes(struct snd_i2c_device *device, unsigned char *bytes, int count) { return device->bus->ops->sendbytes(device, bytes, count); } EXPORT_SYMBOL(snd_i2c_sendbytes); int snd_i2c_readbytes(struct snd_i2c_device *device, unsigned char *bytes, int count) { return device->bus->ops->readbytes(device, bytes, count); } EXPORT_SYMBOL(snd_i2c_readbytes); int snd_i2c_probeaddr(struct snd_i2c_bus *bus, unsigned short addr) { return bus->ops->probeaddr(bus, addr); } EXPORT_SYMBOL(snd_i2c_probeaddr); /* * bit-operations */ static inline void snd_i2c_bit_hw_start(struct snd_i2c_bus *bus) { if (bus->hw_ops.bit->start) bus->hw_ops.bit->start(bus); } static inline void snd_i2c_bit_hw_stop(struct snd_i2c_bus *bus) { if (bus->hw_ops.bit->stop) bus->hw_ops.bit->stop(bus); } static void snd_i2c_bit_direction(struct snd_i2c_bus *bus, int clock, int data) { if (bus->hw_ops.bit->direction) bus->hw_ops.bit->direction(bus, clock, data); } static void snd_i2c_bit_set(struct snd_i2c_bus *bus, int clock, int data) { bus->hw_ops.bit->setlines(bus, clock, data); } #if 0 static int snd_i2c_bit_clock(struct snd_i2c_bus *bus) { if (bus->hw_ops.bit->getclock) return bus->hw_ops.bit->getclock(bus); return -ENXIO; } #endif static int snd_i2c_bit_data(struct snd_i2c_bus *bus, int ack) { return bus->hw_ops.bit->getdata(bus, ack); } static void snd_i2c_bit_start(struct snd_i2c_bus *bus) { snd_i2c_bit_hw_start(bus); snd_i2c_bit_direction(bus, 1, 1); /* SCL - wr, SDA - wr */ snd_i2c_bit_set(bus, 1, 1); snd_i2c_bit_set(bus, 1, 0); snd_i2c_bit_set(bus, 0, 0); } static void snd_i2c_bit_stop(struct snd_i2c_bus *bus) { snd_i2c_bit_set(bus, 0, 0); snd_i2c_bit_set(bus, 1, 0); snd_i2c_bit_set(bus, 1, 1); snd_i2c_bit_hw_stop(bus); } static void snd_i2c_bit_send(struct snd_i2c_bus *bus, int data) { snd_i2c_bit_set(bus, 0, data); snd_i2c_bit_set(bus, 1, data); snd_i2c_bit_set(bus, 0, data); } static int snd_i2c_bit_ack(struct snd_i2c_bus *bus) { int ack; snd_i2c_bit_set(bus, 0, 1); snd_i2c_bit_set(bus, 1, 1); snd_i2c_bit_direction(bus, 1, 0); /* SCL - wr, SDA - rd */ ack = snd_i2c_bit_data(bus, 1); snd_i2c_bit_direction(bus, 1, 1); /* SCL - wr, SDA - wr */ snd_i2c_bit_set(bus, 0, 1); return ack ? -EIO : 0; } static int snd_i2c_bit_sendbyte(struct snd_i2c_bus *bus, unsigned char data) { int i, err; for (i = 7; i >= 0; i--) snd_i2c_bit_send(bus, !!(data & (1 << i))); err = snd_i2c_bit_ack(bus); if (err < 0) return err; return 0; } static int snd_i2c_bit_readbyte(struct snd_i2c_bus *bus, int last) { int i; unsigned char data = 0; snd_i2c_bit_set(bus, 0, 1); snd_i2c_bit_direction(bus, 1, 0); /* SCL - wr, SDA - rd */ for (i = 7; i >= 0; i--) { snd_i2c_bit_set(bus, 1, 1); if (snd_i2c_bit_data(bus, 0)) data |= (1 << i); snd_i2c_bit_set(bus, 0, 1); } snd_i2c_bit_direction(bus, 1, 1); /* SCL - wr, SDA - wr */ snd_i2c_bit_send(bus, !!last); return data; } static int snd_i2c_bit_sendbytes(struct snd_i2c_device *device, unsigned char *bytes, int count) { struct snd_i2c_bus *bus = device->bus; int err, res = 0; if (device->flags & SND_I2C_DEVICE_ADDRTEN) return -EIO; /* not yet implemented */ snd_i2c_bit_start(bus); err = snd_i2c_bit_sendbyte(bus, device->addr << 1); if (err < 0) { snd_i2c_bit_hw_stop(bus); return err; } while (count-- > 0) { err = snd_i2c_bit_sendbyte(bus, *bytes++); if (err < 0) { snd_i2c_bit_hw_stop(bus); return err; } res++; } snd_i2c_bit_stop(bus); return res; } static int snd_i2c_bit_readbytes(struct snd_i2c_device *device, unsigned char *bytes, int count) { struct snd_i2c_bus *bus = device->bus; int err, res = 0; if (device->flags & SND_I2C_DEVICE_ADDRTEN) return -EIO; /* not yet implemented */ snd_i2c_bit_start(bus); err = snd_i2c_bit_sendbyte(bus, (device->addr << 1) | 1); if (err < 0) { snd_i2c_bit_hw_stop(bus); return err; } while (count-- > 0) { err = snd_i2c_bit_readbyte(bus, count == 0); if (err < 0) { snd_i2c_bit_hw_stop(bus); return err; } *bytes++ = (unsigned char)err; res++; } snd_i2c_bit_stop(bus); return res; } static int snd_i2c_bit_probeaddr(struct snd_i2c_bus *bus, unsigned short addr) { int err; if (addr & 0x8000) /* 10-bit address */ return -EIO; /* not yet implemented */ if (addr & 0x7f80) /* invalid address */ return -EINVAL; snd_i2c_bit_start(bus); err = snd_i2c_bit_sendbyte(bus, addr << 1); snd_i2c_bit_stop(bus); return err; } static int __init alsa_i2c_init(void) { return 0; } static void __exit alsa_i2c_exit(void) { } module_init(alsa_i2c_init) module_exit(alsa_i2c_exit)
scruiser/kernel
sound/i2c/i2c.c
C
gpl-2.0
8,516
/* * Verbose error logging for ATAPI CD/DVD devices. * * Copyright (C) 1994-1996 Scott Snyder <snyder@fnald0.fnal.gov> * Copyright (C) 1996-1998 Erik Andersen <andersee@debian.org> * Copyright (C) 1998-2000 Jens Axboe <axboe@suse.de> */ #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/cdrom.h> #include <scsi/scsi.h> #ifndef CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS void ide_cd_log_error(const char *name, struct request *failed_command, struct request_sense *sense) { /* Suppress printing unit attention and `in progress of becoming ready' errors when we're not being verbose. */ if (sense->sense_key == UNIT_ATTENTION || (sense->sense_key == NOT_READY && (sense->asc == 4 || sense->asc == 0x3a))) return; printk(KERN_ERR "%s: error code: 0x%02x sense_key: 0x%02x " "asc: 0x%02x ascq: 0x%02x\n", name, sense->error_code, sense->sense_key, sense->asc, sense->ascq); } #else /* The generic packet command opcodes for CD/DVD Logical Units, * From Table 57 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */ static const struct { unsigned short packet_command; const char * const text; } packet_command_texts[] = { { GPCMD_TEST_UNIT_READY, "Test Unit Ready" }, { GPCMD_REQUEST_SENSE, "Request Sense" }, { GPCMD_FORMAT_UNIT, "Format Unit" }, { GPCMD_INQUIRY, "Inquiry" }, { GPCMD_START_STOP_UNIT, "Start/Stop Unit" }, { GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, "Prevent/Allow Medium Removal" }, { GPCMD_READ_FORMAT_CAPACITIES, "Read Format Capacities" }, { GPCMD_READ_CDVD_CAPACITY, "Read Cd/Dvd Capacity" }, { GPCMD_READ_10, "Read 10" }, { GPCMD_WRITE_10, "Write 10" }, { GPCMD_SEEK, "Seek" }, { GPCMD_WRITE_AND_VERIFY_10, "Write and Verify 10" }, { GPCMD_VERIFY_10, "Verify 10" }, { GPCMD_FLUSH_CACHE, "Flush Cache" }, { GPCMD_READ_SUBCHANNEL, "Read Subchannel" }, { GPCMD_READ_TOC_PMA_ATIP, "Read Table of Contents" }, { GPCMD_READ_HEADER, "Read Header" }, { GPCMD_PLAY_AUDIO_10, "Play Audio 10" }, { GPCMD_GET_CONFIGURATION, "Get Configuration" }, { GPCMD_PLAY_AUDIO_MSF, "Play Audio MSF" }, { GPCMD_PLAYAUDIO_TI, "Play Audio TrackIndex" }, { GPCMD_GET_EVENT_STATUS_NOTIFICATION, "Get Event Status Notification" }, { GPCMD_PAUSE_RESUME, "Pause/Resume" }, { GPCMD_STOP_PLAY_SCAN, "Stop Play/Scan" }, { GPCMD_READ_DISC_INFO, "Read Disc Info" }, { GPCMD_READ_TRACK_RZONE_INFO, "Read Track Rzone Info" }, { GPCMD_RESERVE_RZONE_TRACK, "Reserve Rzone Track" }, { GPCMD_SEND_OPC, "Send OPC" }, { GPCMD_MODE_SELECT_10, "Mode Select 10" }, { GPCMD_REPAIR_RZONE_TRACK, "Repair Rzone Track" }, { GPCMD_MODE_SENSE_10, "Mode Sense 10" }, { GPCMD_CLOSE_TRACK, "Close Track" }, { GPCMD_BLANK, "Blank" }, { GPCMD_SEND_EVENT, "Send Event" }, { GPCMD_SEND_KEY, "Send Key" }, { GPCMD_REPORT_KEY, "Report Key" }, { GPCMD_LOAD_UNLOAD, "Load/Unload" }, { GPCMD_SET_READ_AHEAD, "Set Read-ahead" }, { GPCMD_READ_12, "Read 12" }, { GPCMD_GET_PERFORMANCE, "Get Performance" }, { GPCMD_SEND_DVD_STRUCTURE, "Send DVD Structure" }, { GPCMD_READ_DVD_STRUCTURE, "Read DVD Structure" }, { GPCMD_SET_STREAMING, "Set Streaming" }, { GPCMD_READ_CD_MSF, "Read CD MSF" }, { GPCMD_SCAN, "Scan" }, { GPCMD_SET_SPEED, "Set Speed" }, { GPCMD_PLAY_CD, "Play CD" }, { GPCMD_MECHANISM_STATUS, "Mechanism Status" }, { GPCMD_READ_CD, "Read CD" }, }; /* From Table 303 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */ static const char * const sense_key_texts[16] = { "No sense data", "Recovered error", "Not ready", "Medium error", "Hardware error", "Illegal request", "Unit attention", "Data protect", "Blank check", "(reserved)", "(reserved)", "Aborted command", "(reserved)", "(reserved)", "Miscompare", "(reserved)", }; /* From Table 304 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */ static const struct { unsigned long asc_ascq; const char * const text; } sense_data_texts[] = { { 0x000000, "No additional sense information" }, { 0x000011, "Play operation in progress" }, { 0x000012, "Play operation paused" }, { 0x000013, "Play operation successfully completed" }, { 0x000014, "Play operation stopped due to error" }, { 0x000015, "No current audio status to return" }, { 0x010c0a, "Write error - padding blocks added" }, { 0x011700, "Recovered data with no error correction applied" }, { 0x011701, "Recovered data with retries" }, { 0x011702, "Recovered data with positive head offset" }, { 0x011703, "Recovered data with negative head offset" }, { 0x011704, "Recovered data with retries and/or CIRC applied" }, { 0x011705, "Recovered data using previous sector ID" }, { 0x011800, "Recovered data with error correction applied" }, { 0x011801, "Recovered data with error correction and retries applied"}, { 0x011802, "Recovered data - the data was auto-reallocated" }, { 0x011803, "Recovered data with CIRC" }, { 0x011804, "Recovered data with L-EC" }, { 0x015d00, "Failure prediction threshold exceeded" " - Predicted logical unit failure" }, { 0x015d01, "Failure prediction threshold exceeded" " - Predicted media failure" }, { 0x015dff, "Failure prediction threshold exceeded - False" }, { 0x017301, "Power calibration area almost full" }, { 0x020400, "Logical unit not ready - cause not reportable" }, /* Following is misspelled in ATAPI 2.6, _and_ in Mt. Fuji */ { 0x020401, "Logical unit not ready" " - in progress [sic] of becoming ready" }, { 0x020402, "Logical unit not ready - initializing command required" }, { 0x020403, "Logical unit not ready - manual intervention required" }, { 0x020404, "Logical unit not ready - format in progress" }, { 0x020407, "Logical unit not ready - operation in progress" }, { 0x020408, "Logical unit not ready - long write in progress" }, { 0x020600, "No reference position found (media may be upside down)" }, { 0x023000, "Incompatible medium installed" }, { 0x023a00, "Medium not present" }, { 0x025300, "Media load or eject failed" }, { 0x025700, "Unable to recover table of contents" }, { 0x030300, "Peripheral device write fault" }, { 0x030301, "No write current" }, { 0x030302, "Excessive write errors" }, { 0x030c00, "Write error" }, { 0x030c01, "Write error - Recovered with auto reallocation" }, { 0x030c02, "Write error - auto reallocation failed" }, { 0x030c03, "Write error - recommend reassignment" }, { 0x030c04, "Compression check miscompare error" }, { 0x030c05, "Data expansion occurred during compress" }, { 0x030c06, "Block not compressible" }, { 0x030c07, "Write error - recovery needed" }, { 0x030c08, "Write error - recovery failed" }, { 0x030c09, "Write error - loss of streaming" }, { 0x031100, "Unrecovered read error" }, { 0x031106, "CIRC unrecovered error" }, { 0x033101, "Format command failed" }, { 0x033200, "No defect spare location available" }, { 0x033201, "Defect list update failure" }, { 0x035100, "Erase failure" }, { 0x037200, "Session fixation error" }, { 0x037201, "Session fixation error writin lead-in" }, { 0x037202, "Session fixation error writin lead-out" }, { 0x037300, "CD control error" }, { 0x037302, "Power calibration area is full" }, { 0x037303, "Power calibration area error" }, { 0x037304, "Program memory area / RMA update failure" }, { 0x037305, "Program memory area / RMA is full" }, { 0x037306, "Program memory area / RMA is (almost) full" }, { 0x040200, "No seek complete" }, { 0x040300, "Write fault" }, { 0x040900, "Track following error" }, { 0x040901, "Tracking servo failure" }, { 0x040902, "Focus servo failure" }, { 0x040903, "Spindle servo failure" }, { 0x041500, "Random positioning error" }, { 0x041501, "Mechanical positioning or changer error" }, { 0x041502, "Positioning error detected by read of medium" }, { 0x043c00, "Mechanical positioning or changer error" }, { 0x044000, "Diagnostic failure on component (ASCQ)" }, { 0x044400, "Internal CD/DVD logical unit failure" }, { 0x04b600, "Media load mechanism failed" }, { 0x051a00, "Parameter list length error" }, { 0x052000, "Invalid command operation code" }, { 0x052100, "Logical block address out of range" }, { 0x052102, "Invalid address for write" }, { 0x052400, "Invalid field in command packet" }, { 0x052600, "Invalid field in parameter list" }, { 0x052601, "Parameter not supported" }, { 0x052602, "Parameter value invalid" }, { 0x052700, "Write protected media" }, { 0x052c00, "Command sequence error" }, { 0x052c03, "Current program area is not empty" }, { 0x052c04, "Current program area is empty" }, { 0x053001, "Cannot read medium - unknown format" }, { 0x053002, "Cannot read medium - incompatible format" }, { 0x053900, "Saving parameters not supported" }, { 0x054e00, "Overlapped commands attempted" }, { 0x055302, "Medium removal prevented" }, { 0x055500, "System resource failure" }, { 0x056300, "End of user area encountered on this track" }, { 0x056400, "Illegal mode for this track or incompatible medium" }, { 0x056f00, "Copy protection key exchange failure" " - Authentication failure" }, { 0x056f01, "Copy protection key exchange failure - Key not present" }, { 0x056f02, "Copy protection key exchange failure" " - Key not established" }, { 0x056f03, "Read of scrambled sector without authentication" }, { 0x056f04, "Media region code is mismatched to logical unit" }, { 0x056f05, "Drive region must be permanent" " / region reset count error" }, { 0x057203, "Session fixation error - incomplete track in session" }, { 0x057204, "Empty or partially written reserved track" }, { 0x057205, "No more RZONE reservations are allowed" }, { 0x05bf00, "Loss of streaming" }, { 0x062800, "Not ready to ready transition, medium may have changed" }, { 0x062900, "Power on, reset or hardware reset occurred" }, { 0x062a00, "Parameters changed" }, { 0x062a01, "Mode parameters changed" }, { 0x062e00, "Insufficient time for operation" }, { 0x063f00, "Logical unit operating conditions have changed" }, { 0x063f01, "Microcode has been changed" }, { 0x065a00, "Operator request or state change input (unspecified)" }, { 0x065a01, "Operator medium removal request" }, { 0x0bb900, "Play operation aborted" }, /* Here we use 0xff for the key (not a valid key) to signify * that these can have _any_ key value associated with them... */ { 0xff0401, "Logical unit is in process of becoming ready" }, { 0xff0400, "Logical unit not ready, cause not reportable" }, { 0xff0402, "Logical unit not ready, initializing command required" }, { 0xff0403, "Logical unit not ready, manual intervention required" }, { 0xff0500, "Logical unit does not respond to selection" }, { 0xff0800, "Logical unit communication failure" }, { 0xff0802, "Logical unit communication parity error" }, { 0xff0801, "Logical unit communication time-out" }, { 0xff2500, "Logical unit not supported" }, { 0xff4c00, "Logical unit failed self-configuration" }, { 0xff3e00, "Logical unit has not self-configured yet" }, }; void ide_cd_log_error(const char *name, struct request *failed_command, struct request_sense *sense) { int i; const char *s = "bad sense key!"; char buf[80]; printk(KERN_ERR "ATAPI device %s:\n", name); if (sense->error_code == 0x70) printk(KERN_CONT " Error: "); else if (sense->error_code == 0x71) printk(" Deferred Error: "); else if (sense->error_code == 0x7f) printk(KERN_CONT " Vendor-specific Error: "); else printk(KERN_CONT " Unknown Error Type: "); if (sense->sense_key < ARRAY_SIZE(sense_key_texts)) s = sense_key_texts[sense->sense_key]; printk(KERN_CONT "%s -- (Sense key=0x%02x)\n", s, sense->sense_key); if (sense->asc == 0x40) { sprintf(buf, "Diagnostic failure on component 0x%02x", sense->ascq); s = buf; } else { int lo = 0, mid, hi = ARRAY_SIZE(sense_data_texts); unsigned long key = (sense->sense_key << 16); key |= (sense->asc << 8); if (!(sense->ascq >= 0x80 && sense->ascq <= 0xdd)) key |= sense->ascq; s = NULL; while (hi > lo) { mid = (lo + hi) / 2; if (sense_data_texts[mid].asc_ascq == key || sense_data_texts[mid].asc_ascq == (0xff0000|key)) { s = sense_data_texts[mid].text; break; } else if (sense_data_texts[mid].asc_ascq > key) hi = mid; else lo = mid + 1; } } if (s == NULL) { if (sense->asc > 0x80) s = "(vendor-specific error)"; else s = "(reserved error code)"; } printk(KERN_ERR " %s -- (asc=0x%02x, ascq=0x%02x)\n", s, sense->asc, sense->ascq); if (failed_command != NULL) { int lo = 0, mid, hi = ARRAY_SIZE(packet_command_texts); s = NULL; while (hi > lo) { mid = (lo + hi) / 2; if (packet_command_texts[mid].packet_command == failed_command->cmd[0]) { s = packet_command_texts[mid].text; break; } if (packet_command_texts[mid].packet_command > failed_command->cmd[0]) hi = mid; else lo = mid + 1; } printk(KERN_ERR " The failed \"%s\" packet command " "was: \n \"", s); for (i = 0; i < BLK_MAX_CDB; i++) printk(KERN_CONT "%02x ", failed_command->cmd[i]); printk(KERN_CONT "\"\n"); } /* The SKSV bit specifies validity of the sense_key_specific * in the next two commands. It is bit 7 of the first byte. * In the case of NOT_READY, if SKSV is set the drive can * give us nice ETA readings. */ if (sense->sense_key == NOT_READY && (sense->sks[0] & 0x80)) { int progress = (sense->sks[1] << 8 | sense->sks[2]) * 100; printk(KERN_ERR " Command is %02d%% complete\n", progress / 0xffff); } if (sense->sense_key == ILLEGAL_REQUEST && (sense->sks[0] & 0x80) != 0) { printk(KERN_ERR " Error in %s byte %d", (sense->sks[0] & 0x40) != 0 ? "command packet" : "command data", (sense->sks[1] << 8) + sense->sks[2]); if ((sense->sks[0] & 0x40) != 0) printk(KERN_CONT " bit %d", sense->sks[0] & 0x07); printk(KERN_CONT "\n"); } } #endif
SerenityS/android_kernel_lge_msm8974
drivers/ide/ide-cd_verbose.c
C
gpl-2.0
13,840
<?php namespace TYPO3\CMS\Rtehtmlarea\Extension; /** * This file is part of the TYPO3 CMS project. * * It is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License, either version 2 * of the License, or any later version. * * For the full copyright and license information, please read the * LICENSE.txt file that was distributed with this source code. * * The TYPO3 project - inspiring people to share! */ /** * TextStyle plugin for htmlArea RTE * * @author Stanislas Rolland <typo3(arobas)sjbr.ca> */ class TextStyle extends \TYPO3\CMS\Rtehtmlarea\RteHtmlAreaApi { protected $extensionKey = 'rtehtmlarea'; // The key of the extension that is extending htmlArea RTE protected $pluginName = 'TextStyle'; // The name of the plugin registered by the extension protected $relativePathToLocallangFile = 'extensions/TextStyle/locallang.xlf'; // Path to this main locallang file of the extension relative to the extension dir. protected $relativePathToSkin = ''; // Path to the skin (css) file relative to the extension dir. protected $htmlAreaRTE; // Reference to the invoking object protected $thisConfig; // Reference to RTE PageTSConfig protected $toolbar; // Reference to RTE toolbar array protected $LOCAL_LANG; // Frontend language array protected $pluginButtons = 'textstyle'; // The comma separated list of button names that the extension id adding to the htmlArea RTE tollbar protected $pluginLabels = 'textstylelabel'; // The comma separated list of label names that the extension id adding to the htmlArea RTE tollbar // The name-converting array, converting the button names used in the RTE PageTSConfing to the button id's used by the JS scripts protected $convertToolbarForHtmlAreaArray = array( 'textstylelabel' => 'I[text_style]', 'textstyle' => 'TextStyle' ); protected $requiresClassesConfiguration = TRUE; }
mohsinabbas/typo3_src-6.2.4-blank
typo3/sysext/rtehtmlarea/Classes/Extension/TextStyle.php
PHP
gpl-2.0
1,932
/* * Copyright (C) 2011,2013 by Jonathan Naylor G4KLX * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "RemoteProtocolHandler.h" #include "DStarDefines.h" #include "SHA256.h" #include "Utils.h" const unsigned int BUFFER_LENGTH = 2000U; CRemoteProtocolHandler::CRemoteProtocolHandler(unsigned int port, const wxString& address) : m_socket(address, port), m_address(), m_port(0U), m_loggedIn(false), m_type(RPHT_NONE), m_inBuffer(NULL), m_inLength(0U), m_outBuffer(NULL) { wxASSERT(port > 0U); m_inBuffer = new unsigned char[BUFFER_LENGTH]; m_outBuffer = new unsigned char[BUFFER_LENGTH]; } CRemoteProtocolHandler::~CRemoteProtocolHandler() { delete[] m_outBuffer; delete[] m_inBuffer; } bool CRemoteProtocolHandler::open() { return m_socket.open(); } RPH_TYPE CRemoteProtocolHandler::readType() { m_type = RPHT_NONE; in_addr address; unsigned int port; int length = m_socket.read(m_inBuffer, BUFFER_LENGTH, address, port); if (length <= 0) return m_type; // CUtils::dump(wxT("Incoming"), m_inBuffer, length); if (::memcmp(m_inBuffer, "LIN", 3U) == 0) { m_loggedIn = false; m_address = address; m_port = port; m_type = RPHT_LOGIN; return m_type; } if (address.s_addr == inet_addr("127.0.0.1")) { if (::memcmp(m_inBuffer, "LKS", 3U) == 0) { m_inLength = length; m_type = RPHT_LINKSCR; return m_type; } } if (m_loggedIn) { if (address.s_addr != m_address.s_addr || port != m_port) { sendNAK(wxT("You are not logged in")); return m_type; } } m_inLength = length; if (::memcmp(m_inBuffer, "SHA", 3U) == 0) { if (m_loggedIn) { sendNAK(wxT("Someone is already logged in")); return m_type; } m_type = RPHT_HASH; return m_type; } else if (::memcmp(m_inBuffer, "GCS", 3U) == 0) { if (!m_loggedIn) { sendNAK(wxT("You are not logged in")); return m_type; } m_type = RPHT_CALLSIGNS; return m_type; } else if (::memcmp(m_inBuffer, "GRP", 3U) == 0) { if (!m_loggedIn) { sendNAK(wxT("You are not logged in")); return m_type; } m_type = RPHT_REPEATER; return m_type; } else if (::memcmp(m_inBuffer, "GSN", 3U) == 0) { if (!m_loggedIn) { sendNAK(wxT("You are not logged in")); return m_type; } m_type = RPHT_STARNET; return m_type; } else if (::memcmp(m_inBuffer, "LNK", 3U) == 0) { if (!m_loggedIn) { sendNAK(wxT("You are not logged in")); return m_type; } m_type = RPHT_LINK; return m_type; } else if (::memcmp(m_inBuffer, "UNL", 3U) == 0) { if (!m_loggedIn) { sendNAK(wxT("You are not logged in")); return m_type; } m_type = RPHT_UNLINK; return m_type; } else if (::memcmp(m_inBuffer, "LGO", 3U) == 0) { if (!m_loggedIn) { sendNAK(wxT("You are not logged in")); return m_type; } m_type = RPHT_LOGOFF; return m_type; } else if (::memcmp(m_inBuffer, "LOG", 3U) == 0) { if (!m_loggedIn) return m_type; m_type = RPHT_LOGOUT; return m_type; } else { if (!m_loggedIn) { sendNAK(wxT("You are not logged in")); return m_type; } m_type = RPHT_UNKNOWN; return m_type; } } bool CRemoteProtocolHandler::readHash(const wxString& password, wxUint32 random) { if (m_type != RPHT_HASH) return false; unsigned char* hash = m_inBuffer + 3U; unsigned int len = password.Len() + sizeof(wxUint32); unsigned char* in = new unsigned char[len]; unsigned char* out = new unsigned char[32U]; ::memcpy(in, &random, sizeof(wxUint32)); for (unsigned int i = 0U; i < password.Len(); i++) in[i + sizeof(unsigned int)] = password.GetChar(i); CSHA256 sha256; sha256.buffer(in, len, out); bool res = ::memcmp(out, hash, 32U) == 0; delete[] in; delete[] out; return res; } wxString CRemoteProtocolHandler::readRepeater() { if (m_type != RPHT_REPEATER) return wxEmptyString; wxString callsign((char*)(m_inBuffer + 3U), wxConvLocal, LONG_CALLSIGN_LENGTH); return callsign; } wxString CRemoteProtocolHandler::readStarNetGroup() { if (m_type != RPHT_STARNET) return wxEmptyString; wxString callsign((char*)(m_inBuffer + 3U), wxConvLocal, LONG_CALLSIGN_LENGTH); return callsign; } bool CRemoteProtocolHandler::readLogoff(wxString& callsign, wxString& user) { if (m_type != RPHT_LOGOFF) return false; callsign = wxString((char*)(m_inBuffer + 3U), wxConvLocal, LONG_CALLSIGN_LENGTH); user = wxString((char*)(m_inBuffer + 3U + LONG_CALLSIGN_LENGTH), wxConvLocal, LONG_CALLSIGN_LENGTH); return true; } bool CRemoteProtocolHandler::readLink(wxString& callsign, RECONNECT& reconnect, wxString& reflector) { if (m_type != RPHT_LINK) return false; callsign = wxString((char*)(m_inBuffer + 3U), wxConvLocal, LONG_CALLSIGN_LENGTH); wxInt32 temp; ::memcpy(&temp, m_inBuffer + 3U + LONG_CALLSIGN_LENGTH, sizeof(wxInt32)); reconnect = RECONNECT(wxINT32_SWAP_ON_BE(temp)); reflector = wxString((char*)(m_inBuffer + 3U + LONG_CALLSIGN_LENGTH + sizeof(wxInt32)), wxConvLocal, LONG_CALLSIGN_LENGTH); if (reflector.IsSameAs(wxT(" "))) reflector.Clear(); return true; } bool CRemoteProtocolHandler::readUnlink(wxString& callsign, PROTOCOL& protocol, wxString& reflector) { if (m_type != RPHT_UNLINK) return false; callsign = wxString((char*)(m_inBuffer + 3U), wxConvLocal, LONG_CALLSIGN_LENGTH); wxInt32 temp; ::memcpy(&temp, m_inBuffer + 3U + LONG_CALLSIGN_LENGTH, sizeof(wxInt32)); protocol = PROTOCOL(wxINT32_SWAP_ON_BE(temp)); reflector = wxString((char*)(m_inBuffer + 3U + LONG_CALLSIGN_LENGTH + sizeof(wxInt32)), wxConvLocal, LONG_CALLSIGN_LENGTH); return true; } bool CRemoteProtocolHandler::readLinkScr(wxString& callsign, RECONNECT& reconnect, wxString& reflector) { if (m_type != RPHT_LINKSCR) return false; callsign = wxString((char*)(m_inBuffer + 3U), wxConvLocal, LONG_CALLSIGN_LENGTH); reflector = wxString((char*)(m_inBuffer + 3U + LONG_CALLSIGN_LENGTH), wxConvLocal, LONG_CALLSIGN_LENGTH); wxString rec = wxString((char*)(m_inBuffer + 3U + 2U * LONG_CALLSIGN_LENGTH), wxConvLocal, 1U); unsigned long val; rec.ToULong(&val); reconnect = RECONNECT(val); if (reflector.IsSameAs(wxT(" "))) reflector.Clear(); return true; } bool CRemoteProtocolHandler::sendCallsigns(const wxArrayString& repeaters, const wxArrayString& starNets) { unsigned char* p = m_outBuffer; ::memcpy(p, "CAL", 3U); p += 3U; for (unsigned int n = 0U; n < repeaters.GetCount(); n++) { *p++ = 'R'; ::memset(p, ' ' , LONG_CALLSIGN_LENGTH); for (unsigned int i = 0U; i < repeaters.Item(n).Len(); i++) p[i] = repeaters.Item(n).GetChar(i); p += LONG_CALLSIGN_LENGTH; } for (unsigned int n = 0U; n < starNets.GetCount(); n++) { *p++ = 'S'; ::memset(p, ' ' , LONG_CALLSIGN_LENGTH); for (unsigned int i = 0U; i < starNets.Item(n).Len(); i++) p[i] = starNets.Item(n).GetChar(i); p += LONG_CALLSIGN_LENGTH; } // CUtils::dump(wxT("Outgoing"), m_outBuffer, p - m_outBuffer); return m_socket.write(m_outBuffer, p - m_outBuffer, m_address, m_port); } bool CRemoteProtocolHandler::sendRepeater(const CRemoteRepeaterData& data) { unsigned char* p = m_outBuffer; ::memcpy(p, "RPT", 3U); p += 3U; ::memset(p, ' ', LONG_CALLSIGN_LENGTH); for (unsigned int i = 0U; i < data.getCallsign().Len(); i++) p[i] = data.getCallsign().GetChar(i); p += LONG_CALLSIGN_LENGTH; wxInt32 reconnect = wxINT32_SWAP_ON_BE(data.getReconnect()); ::memcpy(p, &reconnect, sizeof(wxInt32)); p += sizeof(wxInt32); ::memset(p, ' ', LONG_CALLSIGN_LENGTH); for (unsigned int i = 0U; i < data.getReflector().Len(); i++) p[i] = data.getReflector().GetChar(i); p += LONG_CALLSIGN_LENGTH; for (unsigned int n = 0U; n < data.getLinkCount(); n++) { CRemoteLinkData& link = data.getLink(n); ::memset(p, ' ', LONG_CALLSIGN_LENGTH); for (unsigned int i = 0U; i < link.getCallsign().Len(); i++) p[i] = link.getCallsign().GetChar(i); p += LONG_CALLSIGN_LENGTH; wxInt32 protocol = wxINT32_SWAP_ON_BE(link.getProtocol()); ::memcpy(p, &protocol, sizeof(wxInt32)); p += sizeof(wxInt32); wxInt32 linked = wxINT32_SWAP_ON_BE(link.isLinked()); ::memcpy(p, &linked, sizeof(wxInt32)); p += sizeof(wxInt32); wxInt32 direction = wxINT32_SWAP_ON_BE(link.getDirection()); ::memcpy(p, &direction, sizeof(wxInt32)); p += sizeof(wxInt32); wxInt32 dongle = wxINT32_SWAP_ON_BE(link.isDongle()); ::memcpy(p, &dongle, sizeof(wxInt32)); p += sizeof(wxInt32); } // CUtils::dump(wxT("Outgoing"), m_outBuffer, p - m_outBuffer); return m_socket.write(m_outBuffer, p - m_outBuffer, m_address, m_port); } bool CRemoteProtocolHandler::sendStarNetGroup(const CRemoteStarNetGroup& data) { unsigned char* p = m_outBuffer; ::memcpy(p, "SNT", 3U); p += 3U; ::memset(p, ' ', LONG_CALLSIGN_LENGTH); for (unsigned int i = 0U; i < data.getCallsign().Len(); i++) p[i] = data.getCallsign().GetChar(i); p += LONG_CALLSIGN_LENGTH; ::memset(p, ' ', LONG_CALLSIGN_LENGTH); for (unsigned int i = 0U; i < data.getLogoff().Len(); i++) p[i] = data.getLogoff().GetChar(i); p += LONG_CALLSIGN_LENGTH; wxUint32 timer = wxUINT32_SWAP_ON_BE(data.getTimer()); ::memcpy(p, &timer, sizeof(wxUint32)); p += sizeof(wxUint32); wxUint32 timeout = wxUINT32_SWAP_ON_BE(data.getTimeout()); ::memcpy(p, &timeout, sizeof(wxUint32)); p += sizeof(wxUint32); for (unsigned int n = 0U; n < data.getUserCount(); n++) { CRemoteStarNetUser& user = data.getUser(n); ::memset(p, ' ', LONG_CALLSIGN_LENGTH); for (unsigned int i = 0U; i < user.getCallsign().Len(); i++) p[i] = user.getCallsign().GetChar(i); p += LONG_CALLSIGN_LENGTH; wxUint32 timer = wxUINT32_SWAP_ON_BE(user.getTimer()); ::memcpy(p, &timer, sizeof(wxUint32)); p += sizeof(wxUint32); wxUint32 timeout = wxUINT32_SWAP_ON_BE(user.getTimeout()); ::memcpy(p, &timeout, sizeof(wxUint32)); p += sizeof(wxUint32); } // CUtils::dump(wxT("Outgoing"), m_outBuffer, p - m_outBuffer); return m_socket.write(m_outBuffer, p - m_outBuffer, m_address, m_port); } void CRemoteProtocolHandler::setLoggedIn(bool set) { m_loggedIn = set; } void CRemoteProtocolHandler::close() { m_socket.close(); } bool CRemoteProtocolHandler::sendACK() { ::memcpy(m_outBuffer + 0U, "ACK", 3U); // CUtils::dump(wxT("Outgoing"), m_outBuffer, 3U); return m_socket.write(m_outBuffer, 3U, m_address, m_port); } bool CRemoteProtocolHandler::sendNAK(const wxString& text) { ::memcpy(m_outBuffer + 0U, "NAK", 3U); ::memset(m_outBuffer + 3U, 0x00U, text.Len() + 1U); for (unsigned int i = 0U; i < text.Len(); i++) m_outBuffer[i + 3U] = text.GetChar(i); // CUtils::dump(wxT("Outgoing"), m_outBuffer, 3U + text.Len() + 1U); return m_socket.write(m_outBuffer, 3U + text.Len() + 1U, m_address, m_port); } bool CRemoteProtocolHandler::sendRandom(wxUint32 random) { ::memcpy(m_outBuffer + 0U, "RND", 3U); wxUint32 temp = wxUINT32_SWAP_ON_BE(random); ::memcpy(m_outBuffer + 3U, &temp, sizeof(wxUint32)); // CUtils::dump(wxT("Outgoing"), m_outBuffer, 3U + sizeof(wxUint32)); return m_socket.write(m_outBuffer, 3U + sizeof(wxUint32), m_address, m_port); }
n8ohu/DMRRepeater
ircDDBGateway/Common/RemoteProtocolHandler.cpp
C++
gpl-2.0
12,097
<?php // Template Name: Portfolio Three Column get_header(); ?> <?php $content_css = 'width:100%'; $sidebar_css = 'display:none'; $content_class = ''; $sidebar_exists = false; $sidebar_left = ''; $double_sidebars = false; $sidebar_1 = get_post_meta( $post->ID, 'sbg_selected_sidebar_replacement', true ); $sidebar_2 = get_post_meta( $post->ID, 'sbg_selected_sidebar_2_replacement', true ); if( $smof_data['pages_global_sidebar'] ) { if( $smof_data['pages_sidebar'] != 'None' ) { $sidebar_1 = array( $smof_data['pages_sidebar'] ); } else { $sidebar_1 = ''; } if( $smof_data['pages_sidebar_2'] != 'None' ) { $sidebar_2 = array( $smof_data['pages_sidebar_2'] ); } else { $sidebar_2 = ''; } } if( ( is_array( $sidebar_1 ) && ( $sidebar_1[0] || $sidebar_1[0] === '0' ) ) && ( is_array( $sidebar_2 ) && ( $sidebar_2[0] || $sidebar_2[0] === '0' ) ) ) { $double_sidebars = true; } if( is_array( $sidebar_1 ) && ( $sidebar_1[0] || $sidebar_1[0] === '0' ) ) { $sidebar_exists = true; } else { $sidebar_exists = false; } if( ! $sidebar_exists ) { $content_css = 'width:100%'; $sidebar_css = 'display:none'; $sidebar_exists = false; } elseif(get_post_meta($post->ID, 'pyre_sidebar_position', true) == 'left') { $content_css = 'float:right;'; $sidebar_css = 'float:left;'; $content_class = 'portfolio-two-sidebar'; $sidebar_exists = true; $sidebar_left = 1; } elseif(get_post_meta($post->ID, 'pyre_sidebar_position', true) == 'right') { $content_css = 'float:left;'; $sidebar_css = 'float:right;'; $content_class = 'portfolio-two-sidebar'; $sidebar_exists = true; } elseif(get_post_meta($post->ID, 'pyre_sidebar_position', true) == 'default' || ! metadata_exists( 'post', $post->ID, 'pyre_sidebar_position' )) { $content_class = 'portfolio-two-sidebar'; if($smof_data['default_sidebar_pos'] == 'Left') { $content_css = 'float:right;'; $sidebar_css = 'float:left;'; $sidebar_exists = true; $sidebar_left = 1; } elseif($smof_data['default_sidebar_pos'] == 'Right') { $content_css = 'float:left;'; $sidebar_css = 'float:right;'; $sidebar_exists = true; $sidebar_left = 2; } } if(get_post_meta($post->ID, 'pyre_sidebar_position', true) == 'right') { $sidebar_left = 2; } if( $smof_data['pages_global_sidebar'] ) { if( $smof_data['pages_sidebar'] != 'None' ) { $sidebar_1 = $smof_data['pages_sidebar']; if( $smof_data['default_sidebar_pos'] == 'Right' ) { $content_css = 'float:left;'; $sidebar_css = 'float:right;'; $sidebar_left = 2; } else { $content_css = 'float:right;'; $sidebar_css = 'float:left;'; $sidebar_left = 1; } } if( $smof_data['pages_sidebar_2'] != 'None' ) { $sidebar_2 = $smof_data['pages_sidebar_2']; } if( $smof_data['pages_sidebar'] != 'None' && $smof_data['pages_sidebar_2'] != 'None' ) { $double_sidebars = true; } } else { $sidebar_1 = '0'; $sidebar_2 = '0'; } if($double_sidebars == true) { $content_css = 'float:left;'; $sidebar_css = 'float:left;'; $sidebar_2_css = 'float:left;'; } else { $sidebar_left = 1; } ?> <div id="content" class="portfolio portfolio-three <?php echo $content_class; ?>" style="<?php echo $content_css; ?>"> <?php while(have_posts()): the_post(); ?> <div id="post-<?php the_ID(); ?>" <?php post_class(); ?>> <?php echo avada_render_rich_snippets_for_pages(); ?> <?php echo avada_featured_images_for_pages(); ?> <div class="post-content"> <?php the_content(); ?> <?php avada_link_pages(); ?> </div> </div> <?php $current_page_id = $post->ID; ?> <?php endwhile; ?> <?php if(is_front_page()) { $paged = (get_query_var('page')) ? get_query_var('page') : 1; } else { $paged = (get_query_var('paged')) ? get_query_var('paged') : 1; } $args = array( 'post_type' => 'avada_portfolio', 'paged' => $paged, 'posts_per_page' => $smof_data['portfolio_items'], ); $pcats = get_post_meta(get_the_ID(), 'pyre_portfolio_category', true); if($pcats && $pcats[0] == 0) { unset($pcats[0]); } if($pcats){ $args['tax_query'][] = array( 'taxonomy' => 'portfolio_category', 'field' => 'term_id', 'terms' => $pcats ); } $gallery = new WP_Query($args); $portfolio_taxs = array(); if(is_array($gallery->posts) && !empty($gallery->posts)) { foreach($gallery->posts as $gallery_post) { $post_taxs = wp_get_post_terms($gallery_post->ID, 'portfolio_category', array("fields" => "all")); if(is_array($post_taxs) && !empty($post_taxs)) { foreach($post_taxs as $post_tax) { if(is_array($pcats) && !empty($pcats) && (in_array($post_tax->term_id, $pcats) || in_array($post_tax->parent, $pcats )) ) { $portfolio_taxs[urldecode($post_tax->slug)] = $post_tax->name; } if(empty($pcats) || !isset($pcats)) { $portfolio_taxs[urldecode($post_tax->slug)] = $post_tax->name; } } } } } $all_terms = get_terms('portfolio_category'); $sorted_taxs = array(); if( !empty( $all_terms ) && is_array( $all_terms ) ) { foreach( $all_terms as $term ) { if( array_key_exists ( urldecode($term->slug) , $portfolio_taxs ) ) { $sorted_taxs[urldecode($term->slug)] = $term->name; } } } $portfolio_taxs = $sorted_taxs; $portfolio_category = get_terms('portfolio_category'); if( ! post_password_required($post->ID) ): if(is_array($portfolio_taxs) && !empty($portfolio_taxs) && get_post_meta($post->ID, 'pyre_portfolio_filters', true) != 'no'): ?> <ul class="portfolio-tabs clearfix"> <li class="active"><a data-filter="*" href="#"><?php echo __('All', 'Avada'); ?></a></li> <?php foreach($portfolio_taxs as $portfolio_tax_slug => $portfolio_tax_name): ?> <li><a data-filter=".<?php echo $portfolio_tax_slug; ?>" href="#"><?php echo $portfolio_tax_name; ?></a></li> <?php endforeach; ?> </ul> <?php endif; ?> <div class="portfolio-wrapper"> <?php $custom_colulmn_spacing = false; if( get_post_meta($post->ID, 'pyre_portfolio_column_spacing', true) || get_post_meta($post->ID, 'pyre_portfolio_column_spacing', true) === '0' ) { $custom_colulmn_spacing = true; $col_spacing = get_post_meta($post->ID, 'pyre_portfolio_column_spacing', true) / 2; echo sprintf( '<style type="text/css">.portfolio-wrapper{margin: 0 %spx;}.portfolio-wrapper .col-spacing{padding:%spx;}</style>', ( -1 ) * $col_spacing, $col_spacing ); } else if( $smof_data['portfolio_column_spacing'] ) { $custom_colulmn_spacing = true; $col_spacing = $smof_data['portfolio_column_spacing'] / 2; echo sprintf( '<style type="text/css">.portfolio-wrapper{margin: 0 %spx;}.portfolio-wrapper .col-spacing{padding:%spx;}</style>', ( -1 ) * $col_spacing, $col_spacing ); } while($gallery->have_posts()): $gallery->the_post(); if($pcats) { $permalink = tf_addUrlParameter(get_permalink(), 'portfolioID', $current_page_id); } else { $permalink = get_permalink(); } if(has_post_thumbnail() || get_post_meta($post->ID, 'pyre_video', true)): ?> <?php $item_classes = ''; $item_cats = get_the_terms($post->ID, 'portfolio_category'); if($item_cats): foreach($item_cats as $item_cat) { $item_classes .= urldecode($item_cat->slug) . ' '; } endif; if( $custom_colulmn_spacing ) { $item_classes .= ' col-spacing'; } $featured_image_size = avada_set_portfolio_image_size( $current_page_id ); ?> <div class="portfolio-item <?php echo $item_classes; ?>"> <?php echo avada_render_rich_snippets_for_pages(); ?> <?php echo avada_image_rollover( get_the_ID(), $featured_image_size, $permalink ); ?> </div> <?php endif; endwhile; ?> </div> <?php themefusion_pagination($gallery->max_num_pages, $range = 2); ?> <?php endif; ?> </div> <?php if( $sidebar_exists == true ): ?> <?php wp_reset_query(); ?> <div id="sidebar" class="sidebar" style="<?php echo $sidebar_css; ?>"> <?php if($sidebar_left == 1) { generated_dynamic_sidebar($sidebar_1); } if($sidebar_left == 2) { generated_dynamic_sidebar_2($sidebar_2); } ?> </div> <?php if( $double_sidebars == true ): ?> <div id="sidebar-2" class="sidebar" style="<?php echo $sidebar_2_css; ?>"> <?php if($sidebar_left == 1) { generated_dynamic_sidebar_2($sidebar_2); } if($sidebar_left == 2) { generated_dynamic_sidebar($sidebar_1); } ?> </div> <?php endif; ?> <?php endif; ?> <?php get_footer(); ?>
larrywq/iboardauto
wp-content/themes/Avada/portfolio-three-column.php
PHP
gpl-2.0
8,501
/* Mednafen - Multi-system Emulator * * Copyright notice for this file: * Copyright (C) 2002 Xodnizel * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "mapinc.h" static uint8 PRGBanks[2], IREMCon, CHRBanks[8], WRAM[8192]; static void DoCHR(void) { for(int x = 0; x < 8; x++) setchr1(x * 1024, CHRBanks[x]); } static void DoPRG(void) { if(IREMCon & 2) { setprg8(0xC000, PRGBanks[0]); setprg8(0x8000, 0xFE); } else { setprg8(0x8000, PRGBanks[0]); setprg8(0xC000, 0xFE); } setprg8(0xa000, PRGBanks[1]); } static void DoMirroring(void) { setmirror((IREMCon & 1) ? MI_H : MI_V); } static DECLFW(Mapper32_write) { switch(A>>12) { case 0x8: PRGBanks[0] = V; DoPRG(); break; case 0x9:IREMCon = V; DoPRG(); DoMirroring(); break; case 0xa:PRGBanks[1] = V;DoPRG(); break; } if((A&0xF000)==0xb000) { CHRBanks[A & 7] = V; DoCHR(); } } static void Power(CartInfo *info) { PRGBanks[0] = 0; PRGBanks[1] = 1; for(int x = 0; x < 8; x++) CHRBanks[x] = x; IREMCon = 0; setprg16(0xc000, 0x7F); DoPRG(); DoCHR(); DoMirroring(); setprg8r(0x10, 0x6000, 0); if(!info->battery) memset(WRAM, 0xFF, 8192); } static int StateAction(StateMem *sm, int load, int data_only) { SFORMAT StateRegs[] = { SFARRAY(PRGBanks, 2), SFARRAY(CHRBanks, 8), SFVAR(IREMCon), SFARRAY(WRAM, 8192), SFEND }; int ret = MDFNSS_StateAction(sm, load, data_only, StateRegs, "MAPR"); if(load) { DoPRG(); DoCHR(); DoMirroring(); } return(ret); } int Mapper32_Init(CartInfo *info) { info->Power = Power; info->StateAction = StateAction; SetupCartPRGMapping(0x10, WRAM, 8192, 1); SetWriteHandler(0x8000,0xffff,Mapper32_write); SetReadHandler(0x6000, 0xFFFF, CartBR); SetWriteHandler(0x6000, 0x7FFF, CartBW); if(info->battery) { memset(WRAM, 0xFF, 8192); info->SaveGame[0] = WRAM; info->SaveGameLen[0] = 8192; } return(1); }
bazzinotti/mednafen
src/nes/boards/32.cpp
C++
gpl-2.0
2,600
<?php /* * Copyright (c) 2009, Tracmor, LLC * * This file is part of Tracmor. * * Tracmor is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Tracmor is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Tracmor; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* $Id: fedex-tags.php,v 1.2 2006/08/15 22:52:14 hunter Exp $ This module contains all the required codes needed by the FedEx Ship Manager API. All hash key lookups need to be done with lower case keys. I wanted to follow the FedEx Tagged Transaction Guide as close as possible so some of the names are pretty long. FedEx does occasionally change these tags so please check that you always have the most current version of this module. Changes should not break existing code. All changes are documented in a Changes file. Copyright (c) 2004 Vermonster LLC All rights reserved. */ class FedExTags { /** * load FedEx services => Transaction Types, Applicable Carrier * * @var FE_TT * @access public */ protected $FE_TT = array ( 'ground_close' => array ('007','FDXG'), 'express_cancel' => array ('023','FDXE'), 'express_ship' => array ('021','FDXE'), 'express_global_rate' => array ('022','FDXE'), 'services_avail' => array ('019','FDXE'), 'services_rate' => array ('025',''), 'fedex_locater' => array ('410',''), 'express_email' => array ('037','FDXE'), 'express_cancel_email' => array ('033','FDXE'), 'express_tag' => array ('046','FDXE'), 'express_cancel_tag' => array ('047','FDXE'), 'express_tag_avail' => array ('048','FDXE'), 'ground_ship' => array ('021','FDXG'), 'ground_cancel' => array ('023','FDXG'), 'subscribe' => array ('211',''), 'ground_global_rate' => array ('022','FDXG'), 'ground_tag' => array ('044','FDXE'), 'ground_cancel_tag' => array ('045','FDXE'), 'ground_email' => array ('037','FDXE'), 'ground_cancel_email' => array ('033','FDXE'), 'sig_proof_delivery' => array ('405',''), 'track' => array ('403',''), 'address_validate' => array ('415',''), 'get_location' => array ('018','FDXE'), 'send_version' => array ('070','') ); /** * A List of FedEx Service Types * * @var FE_ST * @access public */ protected $FE_ST = array ( '01' => 'Express Priority Overnight', '03' => 'Express Economy Two Day', '05' => 'Express Standard Overnight', '06' => 'Express First Overnight', '20' => 'Express Saver', '70' => 'Freight Overnight', '80' => 'Freight Two Day', '83' => 'Freight Express Saver', '86' => 'Freight International Priority', '90' => 'Ground Home Delivery', '92' => 'Ground Business Delivery' ); /** * A List of FedEx Express Pay Types * * @var FE_PT * @access public */ static protected $FE_PT = array ( '1' => 'Sender', '2' => 'Recipient', '3' => 'Third Party', ); /** * A List of FedEx Ground Pay Types * * @var FG_PT * @access public */ static protected $FG_PT = array ( '1' => 'Sender', '2' => 'Collect', '3' => 'Third Party', '5' => 'Recipient' ); /** * A List of FedEx Label Printer Types * * @var FG_LPT * @access public */ static protected $FE_LPT = array ( '1' => 'Laser Printer', '2' => 'EPL2 Thermal Printer', '5' => 'ZPL Thermal Printer' ); /** * A List of FedEx Label Format Types * * @var FG_LFT * @access public */ static protected $FE_LFT = array ( '3' => '354 - 4x6 with Doc Tab at the bottom', '4' => '435 - 4x6 without Doc Tab', //'5' => 'Plain paper', '6' => '354 - 4x6 with Doc Tab at the top' ); /** * A list of all the FedEx tags. An attempt to make items more human readable * * @var FE_RE * @access public */ protected $FE_RE = array( 0 => 'transaction_code', 1 => 'customer_transaction_identifier', 2 => 'transaction_error_code', 3 => 'transaction_error_message', 4 => 'sender_company', 5 => 'sender_address_line_1', 6 => 'sender_address_line_2', 7 => 'sender_city', 8 => 'sender_state', 9 => 'sender_postal_code', 10 => 'sender_fedex_express_account_number', 11 => 'recipient_company', 12 => 'recipient_contact_name', 13 => 'recipient_address_line_1', 14 => 'recipient_address_line_2', 15 => 'recipient_city', 16 => 'recipient_state', 17 => 'recipient_postal_code', 18 => 'recipient_phone_number', 20 => 'payer_account_number', 23 => 'pay_type', 24 => 'ship_date', 25 => 'reference_information', 27 => 'cod_flag', 28 => 'cod_return_tracking_number', 29 => 'tracking_number', 30 => 'ursa_code', 32 => 'sender_contact_name', 33 => 'service_commitment', 38 => 'sender_department', 40 => 'alcohol_type', 41 => 'alcohol_packaging', 44 => 'hal_address', 46 => 'hal_city', 47 => 'hal_state', 48 => 'hal_postal_code', 49 => 'hal_phone_number', 50 => 'recipient_country', 51 => 'signature_release_ok_flag', 52 => 'alcohol_packages', 57 => 'dim_height', 58 => 'dim_width', 59 => 'dim_length', 65 => 'astra_barcode', 66 => 'broker_name', 67 => 'broker_phone_number', 68 => 'customs_declared_value_currency_type', 70 => 'duties_pay_type', 71 => 'duties_payer_account', 72 => 'terms_of_sale', 73 => 'parties_to_transaction', 74 => 'country_of_ultimate_destination', 75 => 'weight_units', 76 => 'commodity_number_of_pieces', 79 => 'description_of_contents', 80 => 'country_of_manufacturer', 81 => 'harmonized_code', 82 => 'unit_quantity', 83 => 'export_license_number', 84 => 'export_license_expiration_date', 99 => 'end_of_record', 112 => 'total_weight', 113 => 'commercial_invoice_flag', 116 => 'package_total', 117 => 'sender_country_code', 118 => 'recipient_irs', 120 => 'ci_marks_and_numbers', 169 => 'importer_country', 170 => 'importer_name', 171 => 'importer_company', 172 => 'importer_address_line_1', 173 => 'importer_address_line_2', 174 => 'importer_city', 175 => 'importer_state', 176 => 'importer_postal_code', 177 => 'importer_account_number', 178 => 'importer_number_phone', 180 => 'importer_id', 183 => 'sender_phone_number', 186 => 'cod_add_freight_charges_flag', 188 => 'label_buffer_data_stream', 190 => 'document_pib_shipment_flag', 194 => 'delivery_day', 195 => 'destination', 198 => 'destination_location_id', 404 => 'commodity_license_exception', 409 => 'delivery_date', 411 => 'cod_return_label_buffer_data_stream', 413 => 'nafta_flag', 414 => 'commodity_unit_of_measure', 418 => 'ci_comments', 431 => 'dim_weight_used_flag', 439 => 'cod_return_contact_name', 440 => 'residential_delivery_flag', 496 => 'freight_service_commitment', 498 => 'meter_number', 526 => 'form_id', 527 => 'cod_return_form_id', 528 => 'commodity_eccn', 535 => 'cod_return', 536 => 'cod_return_service_commitment', 543 => 'cod_return_collect_plus_freight_amount', 557 => 'message_type_code', 558 => 'message_code', 559 => 'message_text', 600 => 'forwarding_agent_routed_export_transaction_indicator', 602 => 'exporter_ein_ssn_indicator', 603 => 'int-con_company_name', 604 => 'int-con_contact_name', 605 => 'int-con_address_line_1', 606 => 'int-con_address_line_2', 607 => 'int-con_city', 608 => 'int-con_state', 609 => 'int-con_zip', 610 => 'int-con_phone_number', 611 => 'int-con_country', 1005 => 'manifest_invoic_e_file_name', 1006 => 'domain_name', 1007 => 'close_manifest_date', 1008 => 'package_ready_time', 1009 => 'time_companyclose', 1010 => 'courier_remarks', 1011 => 'dispatch_number', 1012 => 'dispatch_location', 1013 => 'dispatch_message', 1032 => 'duties_payer_country_code', 1089 => 'rate_scale', 1090 => 'rate_currency_type', 1092 => 'rate_zone', 1096 => 'origin_location_id', 1099 => 'volume_units', 1101 => 'payer_credit_card_number', 1102 => 'payer_credit_card_type', 1103 => 'sender_fax', 1104 => 'payer_credit_card_expiration_date', 1115 => 'ship_time', 1116 => 'dim_units', 1117 => 'package_sequence', 1118 => 'release_authorization_number', 1119 => 'future_day_shipment', 1120 => 'inside_pickup_flag', 1121 => 'inside_delivery_flag', 1123 => 'master_tracking_number', 1124 => 'master_form_id', 1137 => 'ursa_uned_prefix', 1139 => 'sender_irs_ein_number', 1145 => 'recipient_department', 1159 => 'scan_description', 1160 => 'scan_location_city', 1161 => 'scan_location_state', 1162 => 'scan_date', 1163 => 'scan_time', 1164 => 'scan_location_country', 1167 => 'disp_exception_cd', 1168 => 'status_exception_cd', 1174 => 'bso_flag', 1178 => 'ursa_suffix_code', 1179 => 'broker_fdx_account_number', 1180 => 'broker_company', 1181 => 'broker_line_1_address', 1182 => 'broker_line_2_address', 1183 => 'broker_city', 1184 => 'broker_state', 1185 => 'broker_postal_code', 1186 => 'broker_country_code', 1187 => 'broker_id_number', 1193 => 'ship_delete_message', 1195 => 'payer_country_code', 1200 => 'hold_at_location_hal_flag', 1201 => 'sender_email_address', 1202 => 'recipient_email_address', 1203 => 'optional_ship_alert_message', 1204 => 'ship_alert_email_address', 1206 => 'ship_alert_notification_flag', 1208 => 'no_indirect_delivery_flag_signature_required', 1210 => 'purpose_of_shipment', 1211 => 'pod_address', 1213 => 'proactive_notification_flag', 1237 => 'cod_return_phone', 1238 => 'cod_return_company', 1239 => 'cod_return_department', 1240 => 'cod_return_address_1', 1241 => 'cod_return_address_2', 1242 => 'cod_return_city', 1243 => 'cod_return_state', 1244 => 'cod_return_postal_code', 1253 => 'packaging_list_enclosed_flag', 1265 => 'hold_at_location_contact_name', 1266 => 'saturday_delivery_flag', 1267 => 'saturday_pickup_flag', 1268 => 'dry_ice_flag', 1271 => 'shippers_load_and_count_slac', 1272 => 'booking_number', 1273 => 'packaging_type', 1274 => 'service_type', 1286 => 'exporter_ppi-_contact_name', 1287 => 'exporter_ppi-company_name', 1288 => 'exporter_ppi-address_line_1', 1289 => 'exporter_ppi-address_line_2', 1290 => 'exporter_ppi-city', 1291 => 'exporter_ppi-state', 1292 => 'exporter_ppi-zip', 1293 => 'exporter_ppi-country', 1294 => 'exporter_ppi-phone_number', 1295 => 'exporter_ppi-ein_ssn', 1297 => 'customer_invoice_number', 1300 => 'purchase_order_number', 1331 => 'dangerous', 1332 => 'alcohol_flag', 1333 => 'drop_off_type', 1337 => 'package_content_information', 1339 => 'estimated_delivery_date', 1340 => 'estimated_delivery_time', 1341 => 'sender_pager_number', 1342 => 'recipient_pager_number', 1343 => 'broker_email_address', 1344 => 'broker_fax_number', 1346 => 'emerge_shipment_identifier', 1347 => 'emerge_merchant_identifier', 1349 => 'aes_filing_status', 1350 => 'xtn_suffix_number', 1352 => 'sender_ein_ssn_identificator', 1358 => 'aes_ftsr_exemption_number', 1359 => 'sed_legend_number', 1364 => 'dispatch_message_code', 1365 => 'dispatch_date', 1366 => 'close_manifest_time', 1367 => 'close_manifest_data_buffer', 1368 => 'label_type', 1369 => 'label_printer_type', 1370 => 'label_media_type', 1371 => 'manifest_only_request_flag', 1372 => 'manifest_total', 1376 => 'rate_weight_unit_of_measure', 1377 => 'dim_weight_unit_of_measure', 1391 => 'client_revision_indicator', 1392 => 'inbound_visibility_block_shipment_data_indicator', 1394 => 'shipment_content_records_total', 1395 => 'part_number', 1396 => 'sku_item_upc', 1397 => 'receive_quantity', 1398 => 'description', 1399 => 'aes_entry_number', 1400 => 'total_shipment_weight', 1401 => 'total_package_weight', 1402 => 'billed_weight', 1403 => 'dim_weight', 1404 => 'total_volume', 1405 => 'alcohol_volume', 1406 => 'dry_ice_weight', 1407 => 'commodity_weight', 1408 => 'commodity_unit_value', 1409 => 'cod_amount', 1410 => 'commodity_customs_value', 1411 => 'total_customs_value', 1412 => 'freight_charge', 1413 => 'insurance_charge', 1414 => 'taxes_miscellaneous_charge', 1415 => 'declared_value_carriage_value', 1416 => 'base_rate_amount', 1417 => 'total_surcharge_amount', 1418 => 'total_discount_amount', 1419 => 'net_charge_amount', 1420 => 'total_rebate_amount', 1429 => 'list_variable_handling_charge_amount', 1431 => 'list_total_customer_charge', 1432 => 'cod_customer_amount', 1450 => 'more_data_indicator', 1451 => 'sequence_number', 1452 => 'last_tracking_number', 1453 => 'track_reference_type', 1454 => 'track_reference', 1456 => 'spod_type_request', 1458 => 'spod_fax_recipient_name', 1459 => 'spod_fax_recipient_number', 1460 => 'spod_fax_sender_name', 1461 => 'spod_fax_sender_phone_number', 1462 => 'language_indicator', 1463 => 'spod_fax_recipient_company_name_mail', 1464 => 'spod_fax_recipient_address_line_1_mail', 1465 => 'spod_fax_recipient_address_line_2_mail', 1466 => 'spod_fax_recipient_city_mail', 1467 => 'spod_fax_recipient_state_mail', 1468 => 'spod_fax_recipient_zip_postal_code_mail', 1469 => 'spod_fax_recipient_country_mail', 1470 => 'spod_fax_confirmation', 1471 => 'spod_letter', 1472 => 'spod_ground_recipient_name', 1473 => 'spod_ground_recipient_company_name', 1474 => 'spod_ground_recipient_address_line_1', 1475 => 'spod_ground_recipient_address_line_2', 1476 => 'spod_ground_recipient_city', 1477 => 'spod_ground_recipient_state_province', 1478 => 'spod_ground_recipient_zip_postal_code', 1479 => 'spod_ground_recipient_country', 1480 => 'more_information', 1507 => 'list_total_surcharge_amount', 1525 => 'effective_net_discount', 1528 => 'list_net_charge_amount', 1529 => 'rate_indicator_1_numeric_valid_values', 1530 => 'list_base_rate_amount', 1531 => 'list_total_discount_amount', 1532 => 'list_total_rebate_amount', 1534 => 'detail_scan_indicator', 1535 => 'paging_token', 1536 => 'number_of_relationships', 1537 => 'search_relationship_string', 1538 => 'search_relationship_type_code', 1551 => 'delivery_notification_flag', 1552 => 'language_code', 1553 => 'shipper_delivery_notification_flag', 1554 => 'shipper_ship_alert_flag', 1555 => 'shipper_language_code', 1556 => 'recipient_delivery_notification_flag', 1557 => 'recipient_ship_alert_flag', 1558 => 'recipient_language_code', 1559 => 'broker_delivery_notification_flag', 1560 => 'broker_ship_alert_flag', 1561 => 'broker_language_code', 1562 => 'fedex_staffed_location_flag', 1563 => 'fedex_self_service_location_indicator', 1564 => 'fasc', 1565 => 'latest_express_dropoff_flag', 1566 => 'express_dropoff_after_time', 1567 => 'fedex_location_intersection_street_address', 1568 => 'distance', 1569 => 'hours_of_operation', 1570 => 'hours_of_operation_sat', 1571 => 'last_express_dropoff', 1572 => 'last_express_dropoff_sat', 1573 => 'express_service_flag', 1574 => 'location_count', 1575 => 'fedex_location_business_name', 1576 => 'fedex_location_business_type', 1577 => 'fedex_location_city', 1578 => 'fedex_location_state', 1579 => 'fedex_location_postal_code', 1580 => 'dangerous_goods_flag', 1581 => 'saturday_service_flag', 1582 => 'begin_date', 1583 => 'end_date', 1584 => 'tracking_groups', 1590 => 'csp_solution_type', 1591 => 'csp_solution_indicator', 1600 => 'urbanization_code', 1601 => 'maximum_match_count', 1602 => 'score', 1603 => 'single_house_number_match_flag', 1604 => 'actual_match_count', 1604 => 'rank', 1606 => 'variable_handling_charge_level', 1607 => 'doc_tab_header_1', 1608 => 'doc_tab_header_2', 1609 => 'doc_tab_header_3', 1610 => 'doc_tab_header_4', 1611 => 'doc_tab_header_5', 1612 => 'doc_tab_header_6', 1613 => 'doc_tab_header_7', 1614 => 'doc_tab_header_8', 1615 => 'doc_tab_header_9', 1616 => 'doc_tab_header_10', 1617 => 'doc_tab_header_11', 1618 => 'doc_tab_header_12', 1624 => 'doc_tab_field_1', 1625 => 'doc_tab_field_2', 1626 => 'doc_tab_field_3', 1627 => 'doc_tab_field_4', 1628 => 'doc_tab_field_5', 1629 => 'doc_tab_field_6', 1630 => 'doc_tab_field_7', 1631 => 'doc_tab_field_8', 1632 => 'doc_tab_field_9', 1633 => 'doc_tab_field_10', 1634 => 'doc_tab_field_11', 1635 => 'doc_tab_field_12', 1636 => 'delivery_area_surcharge', 1637 => 'list_delivery_area_surcharge', 1638 => 'fuel_surcharge', 1639 => 'list_fuel_surcharge', 1640 => 'fice_surcharge', 1642 => 'value_added_tax', 1644 => 'offshore_surcharge', 1645 => 'list_offshore_surcharge', 1649 => 'other_surcharges', 1650 => 'list_other_surcharges', 1704 => 'service_type_description', 1705 => 'deliver_to', 1706 => 'signed_for', 1707 => 'delivery_time', 1711 => 'status_exception', 1713 => 'tracking_cod_flag', 1715 => 'number_of_track_activities', 1716 => 'delivery_reattempt_date', 1717 => 'delivery_reattempt_time', 1718 => 'package_type_description', 1720 => 'delivery_date_numeric', 1721 => 'tracking_activity_line_1', 1722 => 'tracking_activity_line_2', 1723 => 'tracking_activity_line_3', 1724 => 'tracking_activity_line_4', 1725 => 'tracking_activity_line_5', 1726 => 'tracking_activity_line_6', 1727 => 'tracking_activity_line_7', 1728 => 'tracking_activity_line_8', 1729 => 'tracking_activity_line_9', 1730 => 'tracking_activity_line_10', 1731 => 'tracking_activity_line_11', 1732 => 'tracking_activity_line_12', 1733 => 'tracking_activity_line_13', 1734 => 'tracking_activity_line_14', 1735 => 'tracking_activity_line_15', 1960 => 'exception_notification_flag', 1961 => 'shipper_exception_notification_flag', 1962 => 'recipient_exception_notification_flag', 2254 => 'recipient_fax_number', 2382 => 'return_shipment_indicator', 2399 => 'signature_option', 2973 => 'doc_tab_type_indicator', 3000 => 'cod_type_collection', 3001 => 'fedex_ground_purchase_order', 3002 => 'fedex_ground_invoice', 3003 => 'fedex_ground_customer_reference', 3008 => 'autopod_flag', 3009 => 'aod_flag', 3010 => 'oversize_flag', 3011 => 'other_oversize_flag', 3018 => 'nonstandard_container_flag', 3019 => 'fedex_signature_home_delivery_flag', 3020 => 'fedex_home_delivery_type', 3023 => 'fedex_home_delivery_date', 3024 => 'fedex_home_delivery_phone_number', 3025 => 'carrier_code', 3028 => 'ground_account_number', 3033 => 'oversize_package_total', 3034 => 'prp_control_number', 3035 => 'ship_alert_fax_number', 3044 => 'package_location_for_pickup', 3045 => 'cod_return_reference_indicator', 3046 => 'additional_handling_detected', 3053 => 'multiweight_net_charge', 3090 => 'last_ground_dropoff', 3091 => 'last_ground_dropoff_sat', 3092 => 'ground_service_flag', 3124 => 'oversize_classification', 4003 => 'subscriber_contact_name', 4004 => 'subscriber_password_reminder', 4007 => 'subscriber_company_name', 4008 => 'subscriber_address_line_1', 4009 => 'subscriber_address_line_2', 4011 => 'subscriber_city_name', 4012 => 'subscriber_state_code', 4013 => 'subscriber_postal_code', 4014 => 'subscriber_country_code', 4015 => 'subscriber_phone_number', 4017 => 'subscriber_pager_number', 4018 => 'subscriber_email_address', 4021 => 'subscription_service_name', 4022 => 'subscriber_fax_number', 9744 => 'vendor_product_name', 9745 => 'vendor_product_platform', 9746 => 'vendor_product_version', ); protected $FE_SE = array(); /** Constructor build the FE_RE in reverse order */ function FedExTags () { foreach ($this->FE_RE as $key => $value) { $this->FE_SE[$value] = $key; } } /** * Look up a field name and convert it to a FedEx tag * * @param string $in field to look for * @return string * @access public */ function fieldNameToTag($in) { list ($num, $name, $mult) = $this->splitField($in); return "$num$mult"; } /** * Look up a FedEx tag and convert it to a field name * * @param string $in field to look for * @return string * @access public */ function fieldTagToName($in) { list($num, $name, $mult) = $this->splitField($in); return "$name$mult"; } /** * Split field to find tag and name * * @param string $tag * @return string * @access public */ function splitField($tag) { if (preg_match('/^(.*?)(-[1-9]\d*)?\z/', $tag, $hits)) { $base = $hits[1]; if (isset($hits[2])) $multi = $hits[2]; } else { die ("Invalid field tag or name `$tag'"); } if (empty($multi)) $multi = ''; //echo "[$base] [$multi]"; $num = preg_match('/^\d+\z/', $base) ? $base : $this->FE_SE[strtolower($base)]; $name = $this->FE_RE[$num]; if (!$name) die("Invalid FE_RE `$num'"); return array($num, $name, $multi); } /** * get service type * * @param string $tag * @return string * @access public */ function service_type($in) { foreach ($this->FE_ST as $key => $value) { if ($key==$in) { return $value; } } } /** * get array of Express pay types * * @return string * @access public */ static public function get_express_pay_types() { return self::$FE_PT; } /** * get array of Ground pay types * * @return string * @access public */ static public function get_ground_pay_types() { return self::$FG_PT; } /** * get Ground pay type * * @param string $tag * @return string * @access public */ static public function ground_pay_type($in) { foreach (self::$FG_PT as $key => $value) { if ($key==$in) { return $value; } } } /** * get Express pay type * * @param string $tag * @return string * @access public */ static public function express_pay_type($in) { foreach (self::$FE_PT as $key => $value) { if ($key==$in) { return $value; } } } /** * get array of FedEx Label Printer Types * * @return string * @access public */ static public function get_label_printer_types() { return self::$FE_LPT; } /** * get array of FedEx Label Format Types * * @return string * @access public */ static public function get_label_format_types() { return self::$FE_LFT; } /** * get FedEx Label Printer Type * * @param string $tag * @return string * @access public */ static public function label_printer_type($in) { foreach (self::$FE_LPT as $key => $value) { if ($key==$in) { return $value; } } } /** * get FedEx Label Format Type * * @param string $tag * @return string * @access public */ static public function label_format_type($in) { foreach (self::$FE_LFT as $key => $value) { if ($key==$in) { return $value; } } } } ?>
hongkeat/tracmor
shipping/fedex-tags.class.php
PHP
gpl-2.0
27,203
<?php /** * Demo (fake data) reviews content loader. * * PHP version 7 * * Copyright (C) Villanova University 2018. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * @category VuFind2 * @package Content * @author Demian Katz <demian.katz@villanova.edu> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link http://vufind.org/wiki/vufind2:developer_manual Wiki */ namespace VuFind\Content\Reviews; /** * Demo (fake data) reviews content loader. * * @category VuFind2 * @package Content * @author Demian Katz <demian.katz@villanova.edu> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link http://vufind.org/wiki/vufind2:developer_manual Wiki */ class Demo extends \VuFind\Content\AbstractBase { /** * This method is responsible for generating fake author note data for testing * purposes. * * @param string $key API key * @param \VuFindCode\ISBN $isbnObj ISBN object * * @throws \Exception * @return array Returns array with table of contents data. */ public function loadByIsbn($key, \VuFindCode\ISBN $isbnObj) { // Initialize return value: return [ ['Content' => 'Demo review key: ' . $key], ['Content' => 'Demo review ISBN: ' . $isbnObj->get13()], ]; } }
j4lib/vufind
module/VuFind/src/VuFind/Content/Reviews/Demo.php
PHP
gpl-2.0
1,998
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2000-2006 Donald N. Allingham # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ Parses the lds.xml file to build the temple/code maps """ from ..const import DATA_DIR import os import logging from xml.parsers.expat import ParserCreate from ..const import GRAMPS_LOCALE as glocale _ = glocale.translation.gettext LOG = logging.getLogger(".") class LdsTemples: """ Parsing class for the LDS temples file """ def __init__(self): """ Parses the lds.xml file to load the LDS temple code to name maps """ self.__temple_codes = {} self.__temple_to_abrev = {} self.__current_temple = "" self.__tlist = [] lds_filename = os.path.expanduser(os.path.join(DATA_DIR, "lds.xml")) try: parser = ParserCreate() parser.StartElementHandler = self.__start_element parser.EndElementHandler = self.__end_element parser.CharacterDataHandler = self.__characters with open(lds_filename, 'rb') as xml_file: parser.ParseFile(xml_file) except Exception as msg: LOG.error(str(msg)) def is_valid_code(self, code): """ returns True if the code is a valid LDS temple code according to the lds.xml file """ return self.__temple_to_abrev.get(code) is not None def is_valid_name(self, name): """ returns True if the name matches a temple name (not code) in the lds.xml file """ return self.__temple_codes.get(name) is not None def code(self, name): """ returns the LDS Temple code that corresponds to the name """ return self.__temple_codes.get(name, _("Unknown")) def name(self, code): """ returns the name associated with the LDS Temple code """ return self.__temple_to_abrev.get(code, _("Unknown")) def name_code_data(self): """ returns a list of temple codes, temple name tuples """ return sorted([(code, name) for name, code in self.__temple_codes.items()], key=lambda v: v[1]) def __start_element(self, tag, attrs): """ XML parsing function that is called when an XML element is first found """ self.__tlist = [] if tag == "temple": self.__current_temple = attrs.get('name') def __end_element(self, tag): """ XML parsing function that is called when an XML element is closed """ text = ''.join(self.__tlist) if tag == "code": if self.__temple_codes.get(self.__current_temple) is None: self.__temple_codes[self.__current_temple] = text self.__temple_to_abrev[text] = self.__current_temple def __characters(self, data): """ XML parsing function that collects text data """ self.__tlist.append(data) TEMPLES = LdsTemples()
SNoiraud/gramps
gramps/gen/utils/lds.py
Python
gpl-2.0
3,753
/* GStreamer * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu> * Copyright (C) <2004> Thomas Vander Stichele <thomas at apestaart dot org> * Copyright (C) 2006 Wim Taymans <wim at fluendo dot com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301, USA. */ /** * SECTION:element-multifdsink * @see_also: tcpserversink * * This plugin writes incoming data to a set of file descriptors. The * file descriptors can be added to multifdsink by emitting the #GstMultiFdSink::add signal. * For each descriptor added, the #GstMultiFdSink::client-added signal will be called. * * The multifdsink element needs to be set into READY, PAUSED or PLAYING state * before operations such as adding clients are possible. * * A client can also be added with the #GstMultiFdSink::add-full signal * that allows for more control over what and how much data a client * initially receives. * * Clients can be removed from multifdsink by emitting the #GstMultiFdSink::remove signal. For * each descriptor removed, the #GstMultiFdSink::client-removed signal will be called. The * #GstMultiFdSink::client-removed signal can also be fired when multifdsink decides that a * client is not active anymore or, depending on the value of the * #GstMultiFdSink:recover-policy property, if the client is reading too slowly. * In all cases, multifdsink will never close a file descriptor itself. * The user of multifdsink is responsible for closing all file descriptors. * This can for example be done in response to the #GstMultiFdSink::client-fd-removed signal. * Note that multifdsink still has a reference to the file descriptor when the * #GstMultiFdSink::client-removed signal is emitted, so that "get-stats" can be performed on * the descriptor; it is therefore not safe to close the file descriptor in * the #GstMultiFdSink::client-removed signal handler, and you should use the * #GstMultiFdSink::client-fd-removed signal to safely close the fd. * * Multifdsink internally keeps a queue of the incoming buffers and uses a * separate thread to send the buffers to the clients. This ensures that no * client write can block the pipeline and that clients can read with different * speeds. * * When adding a client to multifdsink, the #GstMultiFdSink:sync-method property will define * which buffer in the queued buffers will be sent first to the client. Clients * can be sent the most recent buffer (which might not be decodable by the * client if it is not a keyframe), the next keyframe received in * multifdsink (which can take some time depending on the keyframe rate), or the * last received keyframe (which will cause a simple burst-on-connect). * Multifdsink will always keep at least one keyframe in its internal buffers * when the sync-mode is set to latest-keyframe. * * There are additional values for the #GstMultiFdSink:sync-method * property to allow finer control over burst-on-connect behaviour. By selecting * the 'burst' method a minimum burst size can be chosen, 'burst-keyframe' * additionally requires that the burst begin with a keyframe, and * 'burst-with-keyframe' attempts to burst beginning with a keyframe, but will * prefer a minimum burst size even if it requires not starting with a keyframe. * * Multifdsink can be instructed to keep at least a minimum amount of data * expressed in time or byte units in its internal queues with the * #GstMultiFdSink:time-min and #GstMultiFdSink:bytes-min properties respectively. * These properties are useful if the application adds clients with the * #GstMultiFdSink::add-full signal to make sure that a burst connect can * actually be honored. * * When streaming data, clients are allowed to read at a different rate than * the rate at which multifdsink receives data. If the client is reading too * fast, no data will be send to the client until multifdsink receives more * data. If the client, however, reads too slowly, data for that client will be * queued up in multifdsink. Two properties control the amount of data * (buffers) that is queued in multifdsink: #GstMultiFdSink:buffers-max and * #GstMultiFdSink:buffers-soft-max. A client that falls behind by * #GstMultiFdSink:buffers-max is removed from multifdsink forcibly. * * A client with a lag of at least #GstMultiFdSink:buffers-soft-max enters the recovery * procedure which is controlled with the #GstMultiFdSink:recover-policy property. * A recover policy of NONE will do nothing, RESYNC_LATEST will send the most recently * received buffer as the next buffer for the client, RESYNC_SOFT_LIMIT * positions the client to the soft limit in the buffer queue and * RESYNC_KEYFRAME positions the client at the most recent keyframe in the * buffer queue. * * multifdsink will by default synchronize on the clock before serving the * buffers to the clients. This behaviour can be disabled by setting the sync * property to FALSE. Multifdsink will by default not do QoS and will never * drop late buffers. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <gst/gst-i18n-plugin.h> #include <sys/ioctl.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <string.h> #include <fcntl.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/stat.h> #include <netinet/in.h> #ifdef HAVE_FIONREAD_IN_SYS_FILIO #include <sys/filio.h> #endif #include "gstmultifdsink.h" #define NOT_IMPLEMENTED 0 GST_DEBUG_CATEGORY_STATIC (multifdsink_debug); #define GST_CAT_DEFAULT (multifdsink_debug) /* MultiFdSink signals and args */ enum { /* methods */ SIGNAL_ADD, SIGNAL_ADD_BURST, SIGNAL_REMOVE, SIGNAL_REMOVE_FLUSH, SIGNAL_GET_STATS, /* signals */ SIGNAL_CLIENT_ADDED, SIGNAL_CLIENT_REMOVED, SIGNAL_CLIENT_FD_REMOVED, LAST_SIGNAL }; /* this is really arbitrarily chosen */ #define DEFAULT_HANDLE_READ TRUE enum { PROP_0, PROP_HANDLE_READ, PROP_LAST }; static void gst_multi_fd_sink_stop_pre (GstMultiHandleSink * mhsink); static void gst_multi_fd_sink_stop_post (GstMultiHandleSink * mhsink); static gboolean gst_multi_fd_sink_start_pre (GstMultiHandleSink * mhsink); static gpointer gst_multi_fd_sink_thread (GstMultiHandleSink * mhsink); static void gst_multi_fd_sink_add (GstMultiFdSink * sink, int fd); static void gst_multi_fd_sink_add_full (GstMultiFdSink * sink, int fd, GstSyncMethod sync, GstFormat min_format, guint64 min_value, GstFormat max_format, guint64 max_value); static void gst_multi_fd_sink_remove (GstMultiFdSink * sink, int fd); static void gst_multi_fd_sink_remove_flush (GstMultiFdSink * sink, int fd); static GstStructure *gst_multi_fd_sink_get_stats (GstMultiFdSink * sink, int fd); static void gst_multi_fd_sink_emit_client_added (GstMultiHandleSink * mhsink, GstMultiSinkHandle handle); static void gst_multi_fd_sink_emit_client_removed (GstMultiHandleSink * mhsink, GstMultiSinkHandle handle, GstClientStatus status); static GstMultiHandleClient *gst_multi_fd_sink_new_client (GstMultiHandleSink * mhsink, GstMultiSinkHandle handle, GstSyncMethod sync_method); static void gst_multi_fd_sink_client_free (GstMultiHandleSink * m, GstMultiHandleClient * client); static int gst_multi_fd_sink_client_get_fd (GstMultiHandleClient * client); static void gst_multi_fd_sink_handle_debug (GstMultiSinkHandle handle, gchar debug[30]); static gpointer gst_multi_fd_sink_handle_hash_key (GstMultiSinkHandle handle); static void gst_multi_fd_sink_hash_adding (GstMultiHandleSink * mhsink, GstMultiHandleClient * mhclient); static void gst_multi_fd_sink_hash_removing (GstMultiHandleSink * mhsink, GstMultiHandleClient * mhclient); static void gst_multi_fd_sink_hash_changed (GstMultiHandleSink * mhsink); static void gst_multi_fd_sink_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec); static void gst_multi_fd_sink_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec); #define gst_multi_fd_sink_parent_class parent_class G_DEFINE_TYPE (GstMultiFdSink, gst_multi_fd_sink, GST_TYPE_MULTI_HANDLE_SINK); static guint gst_multi_fd_sink_signals[LAST_SIGNAL] = { 0 }; static void gst_multi_fd_sink_class_init (GstMultiFdSinkClass * klass) { GObjectClass *gobject_class; GstElementClass *gstelement_class; GstMultiHandleSinkClass *gstmultihandlesink_class; gobject_class = (GObjectClass *) klass; gstelement_class = (GstElementClass *) klass; gstmultihandlesink_class = (GstMultiHandleSinkClass *) klass; gobject_class->set_property = gst_multi_fd_sink_set_property; gobject_class->get_property = gst_multi_fd_sink_get_property; /** * GstMultiFdSink::handle-read * * Handle read requests from clients and discard the data. */ g_object_class_install_property (gobject_class, PROP_HANDLE_READ, g_param_spec_boolean ("handle-read", "Handle Read", "Handle client reads and discard the data", DEFAULT_HANDLE_READ, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); /** * GstMultiFdSink::add: * @gstmultifdsink: the multifdsink element to emit this signal on * @fd: the file descriptor to add to multifdsink * * Hand the given open file descriptor to multifdsink to write to. */ gst_multi_fd_sink_signals[SIGNAL_ADD] = g_signal_new ("add", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_LAST | G_SIGNAL_ACTION, G_STRUCT_OFFSET (GstMultiFdSinkClass, add), NULL, NULL, g_cclosure_marshal_generic, G_TYPE_NONE, 1, G_TYPE_INT); /** * GstMultiFdSink::add-full: * @gstmultifdsink: the multifdsink element to emit this signal on * @fd: the file descriptor to add to multifdsink * @sync: the sync method to use * @format_min: the format of @value_min * @value_min: the minimum amount of data to burst expressed in * @format_min units. * @format_max: the format of @value_max * @value_max: the maximum amount of data to burst expressed in * @format_max units. * * Hand the given open file descriptor to multifdsink to write to and * specify the burst parameters for the new connection. */ gst_multi_fd_sink_signals[SIGNAL_ADD_BURST] = g_signal_new ("add-full", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_LAST | G_SIGNAL_ACTION, G_STRUCT_OFFSET (GstMultiFdSinkClass, add_full), NULL, NULL, g_cclosure_marshal_generic, G_TYPE_NONE, 6, G_TYPE_INT, GST_TYPE_SYNC_METHOD, GST_TYPE_FORMAT, G_TYPE_UINT64, GST_TYPE_FORMAT, G_TYPE_UINT64); /** * GstMultiFdSink::remove: * @gstmultifdsink: the multifdsink element to emit this signal on * @fd: the file descriptor to remove from multifdsink * * Remove the given open file descriptor from multifdsink. */ gst_multi_fd_sink_signals[SIGNAL_REMOVE] = g_signal_new ("remove", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_LAST | G_SIGNAL_ACTION, G_STRUCT_OFFSET (GstMultiFdSinkClass, remove), NULL, NULL, g_cclosure_marshal_generic, G_TYPE_NONE, 1, G_TYPE_INT); /** * GstMultiFdSink::remove-flush: * @gstmultifdsink: the multifdsink element to emit this signal on * @fd: the file descriptor to remove from multifdsink * * Remove the given open file descriptor from multifdsink after flushing all * the pending data to the fd. */ gst_multi_fd_sink_signals[SIGNAL_REMOVE_FLUSH] = g_signal_new ("remove-flush", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_LAST | G_SIGNAL_ACTION, G_STRUCT_OFFSET (GstMultiFdSinkClass, remove_flush), NULL, NULL, g_cclosure_marshal_generic, G_TYPE_NONE, 1, G_TYPE_INT); /** * GstMultiFdSink::get-stats: * @gstmultifdsink: the multifdsink element to emit this signal on * @fd: the file descriptor to get stats of from multifdsink * * Get statistics about @fd. This function returns a GValueArray to ease * automatic wrapping for bindings. * * Returns: a GValueArray with the statistics. The array contains guint64 * values that represent respectively: total number of bytes sent, time * when the client was added, time when the client was * disconnected/removed, time the client is/was active, last activity * time (in epoch seconds), number of buffers dropped. * All times are expressed in nanoseconds (GstClockTime). * The array can be 0-length if the client was not found. */ gst_multi_fd_sink_signals[SIGNAL_GET_STATS] = g_signal_new ("get-stats", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_LAST | G_SIGNAL_ACTION, G_STRUCT_OFFSET (GstMultiFdSinkClass, get_stats), NULL, NULL, g_cclosure_marshal_generic, GST_TYPE_STRUCTURE, 1, G_TYPE_INT); /** * GstMultiFdSink::client-added: * @gstmultifdsink: the multifdsink element that emitted this signal * @fd: the file descriptor that was added to multifdsink * * The given file descriptor was added to multifdsink. This signal will * be emitted from the streaming thread so application should be prepared * for that. */ gst_multi_fd_sink_signals[SIGNAL_CLIENT_ADDED] = g_signal_new ("client-added", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_LAST, 0, NULL, NULL, g_cclosure_marshal_generic, G_TYPE_NONE, 1, G_TYPE_INT); /** * GstMultiFdSink::client-removed: * @gstmultifdsink: the multifdsink element that emitted this signal * @fd: the file descriptor that is to be removed from multifdsink * @status: the reason why the client was removed * * The given file descriptor is about to be removed from multifdsink. This * signal will be emitted from the streaming thread so applications should * be prepared for that. * * @gstmultifdsink still holds a handle to @fd so it is possible to call * the get-stats signal from this callback. For the same reason it is * not safe to close() and reuse @fd in this callback. */ gst_multi_fd_sink_signals[SIGNAL_CLIENT_REMOVED] = g_signal_new ("client-removed", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_LAST, 0, NULL, NULL, g_cclosure_marshal_generic, G_TYPE_NONE, 2, G_TYPE_INT, GST_TYPE_CLIENT_STATUS); /** * GstMultiFdSink::client-fd-removed: * @gstmultifdsink: the multifdsink element that emitted this signal * @fd: the file descriptor that was removed from multifdsink * * The given file descriptor was removed from multifdsink. This signal will * be emitted from the streaming thread so applications should be prepared * for that. * * In this callback, @gstmultifdsink has removed all the information * associated with @fd and it is therefore not possible to call get-stats * with @fd. It is however safe to close() and reuse @fd in the callback. */ gst_multi_fd_sink_signals[SIGNAL_CLIENT_FD_REMOVED] = g_signal_new ("client-fd-removed", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_LAST, 0, NULL, NULL, g_cclosure_marshal_generic, G_TYPE_NONE, 1, G_TYPE_INT); gst_element_class_set_static_metadata (gstelement_class, "Multi filedescriptor sink", "Sink/Network", "Send data to multiple filedescriptors", "Thomas Vander Stichele <thomas at apestaart dot org>, " "Wim Taymans <wim@fluendo.com>"); klass->add = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_add); klass->add_full = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_add_full); klass->remove = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_remove); klass->remove_flush = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_remove_flush); klass->get_stats = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_get_stats); gstmultihandlesink_class->emit_client_added = gst_multi_fd_sink_emit_client_added; gstmultihandlesink_class->emit_client_removed = gst_multi_fd_sink_emit_client_removed; gstmultihandlesink_class->stop_pre = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_stop_pre); gstmultihandlesink_class->stop_post = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_stop_post); gstmultihandlesink_class->start_pre = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_start_pre); gstmultihandlesink_class->thread = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_thread); gstmultihandlesink_class->new_client = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_new_client); gstmultihandlesink_class->client_free = gst_multi_fd_sink_client_free; gstmultihandlesink_class->client_get_fd = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_client_get_fd); gstmultihandlesink_class->handle_debug = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_handle_debug); gstmultihandlesink_class->handle_hash_key = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_handle_hash_key); gstmultihandlesink_class->hash_changed = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_hash_changed); gstmultihandlesink_class->hash_adding = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_hash_adding); gstmultihandlesink_class->hash_removing = GST_DEBUG_FUNCPTR (gst_multi_fd_sink_hash_removing); GST_DEBUG_CATEGORY_INIT (multifdsink_debug, "multifdsink", 0, "FD sink"); } static void gst_multi_fd_sink_init (GstMultiFdSink * this) { GstMultiHandleSink *mhsink = GST_MULTI_HANDLE_SINK (this); mhsink->handle_hash = g_hash_table_new (g_direct_hash, g_direct_equal); this->handle_read = DEFAULT_HANDLE_READ; } /* methods to emit signals */ static void gst_multi_fd_sink_emit_client_added (GstMultiHandleSink * mhsink, GstMultiSinkHandle handle) { g_signal_emit (mhsink, gst_multi_fd_sink_signals[SIGNAL_CLIENT_ADDED], 0, handle.fd); } static void gst_multi_fd_sink_emit_client_removed (GstMultiHandleSink * mhsink, GstMultiSinkHandle handle, GstClientStatus status) { g_signal_emit (mhsink, gst_multi_fd_sink_signals[SIGNAL_CLIENT_REMOVED], 0, handle.fd, status); } static void gst_multi_fd_sink_client_free (GstMultiHandleSink * mhsink, GstMultiHandleClient * client) { g_signal_emit (mhsink, gst_multi_fd_sink_signals[SIGNAL_CLIENT_FD_REMOVED], 0, client->handle.fd); } /* action signals */ static void gst_multi_fd_sink_add (GstMultiFdSink * sink, int fd) { GstMultiSinkHandle handle; handle.fd = fd; gst_multi_handle_sink_add (GST_MULTI_HANDLE_SINK_CAST (sink), handle); } static void gst_multi_fd_sink_add_full (GstMultiFdSink * sink, int fd, GstSyncMethod sync, GstFormat min_format, guint64 min_value, GstFormat max_format, guint64 max_value) { GstMultiSinkHandle handle; handle.fd = fd; gst_multi_handle_sink_add_full (GST_MULTI_HANDLE_SINK_CAST (sink), handle, sync, min_format, min_value, max_format, max_value); } static void gst_multi_fd_sink_remove (GstMultiFdSink * sink, int fd) { GstMultiSinkHandle handle; handle.fd = fd; gst_multi_handle_sink_remove (GST_MULTI_HANDLE_SINK_CAST (sink), handle); } static void gst_multi_fd_sink_remove_flush (GstMultiFdSink * sink, int fd) { GstMultiSinkHandle handle; handle.fd = fd; gst_multi_handle_sink_remove_flush (GST_MULTI_HANDLE_SINK_CAST (sink), handle); } static GstStructure * gst_multi_fd_sink_get_stats (GstMultiFdSink * sink, int fd) { GstMultiSinkHandle handle; handle.fd = fd; return gst_multi_handle_sink_get_stats (GST_MULTI_HANDLE_SINK_CAST (sink), handle); } /* vfuncs */ static GstMultiHandleClient * gst_multi_fd_sink_new_client (GstMultiHandleSink * mhsink, GstMultiSinkHandle handle, GstSyncMethod sync_method) { struct stat statbuf; GstTCPClient *client; GstMultiHandleClient *mhclient; GstMultiFdSink *sink = GST_MULTI_FD_SINK (mhsink); GstMultiHandleSinkClass *mhsinkclass = GST_MULTI_HANDLE_SINK_GET_CLASS (mhsink); /* create client datastructure */ client = g_new0 (GstTCPClient, 1); mhclient = (GstMultiHandleClient *) client; mhclient->handle = handle; gst_poll_fd_init (&client->gfd); client->gfd.fd = mhclient->handle.fd; gst_multi_handle_sink_client_init (mhclient, sync_method); mhsinkclass->handle_debug (handle, mhclient->debug); /* set the socket to non blocking */ if (fcntl (handle.fd, F_SETFL, O_NONBLOCK) < 0) { GST_ERROR_OBJECT (mhsink, "failed to make socket %s non-blocking: %s", mhclient->debug, g_strerror (errno)); } /* we always read from a client */ gst_poll_add_fd (sink->fdset, &client->gfd); /* we don't try to read from write only fds */ if (sink->handle_read) { gint flags; flags = fcntl (handle.fd, F_GETFL, 0); if ((flags & O_ACCMODE) != O_WRONLY) { gst_poll_fd_ctl_read (sink->fdset, &client->gfd, TRUE); } } /* figure out the mode, can't use send() for non sockets */ if (fstat (handle.fd, &statbuf) == 0 && S_ISSOCK (statbuf.st_mode)) { client->is_socket = TRUE; gst_multi_handle_sink_setup_dscp_client (mhsink, mhclient); } return mhclient; } static int gst_multi_fd_sink_client_get_fd (GstMultiHandleClient * client) { GstTCPClient *tclient = (GstTCPClient *) client; return tclient->gfd.fd; } static void gst_multi_fd_sink_handle_debug (GstMultiSinkHandle handle, gchar debug[30]) { g_snprintf (debug, 30, "[fd %5d]", handle.fd); } static gpointer gst_multi_fd_sink_handle_hash_key (GstMultiSinkHandle handle) { return GINT_TO_POINTER (handle.fd); } static void gst_multi_fd_sink_hash_changed (GstMultiHandleSink * mhsink) { GstMultiFdSink *sink = GST_MULTI_FD_SINK (mhsink); gst_poll_restart (sink->fdset); } /* handle a read on a client fd, * which either indicates a close or should be ignored * returns FALSE if some error occured or the client closed. */ static gboolean gst_multi_fd_sink_handle_client_read (GstMultiFdSink * sink, GstTCPClient * client) { int avail, fd; gboolean ret; GstMultiHandleClient *mhclient = (GstMultiHandleClient *) client; fd = client->gfd.fd; if (ioctl (fd, FIONREAD, &avail) < 0) goto ioctl_failed; GST_DEBUG_OBJECT (sink, "%s select reports client read of %d bytes", mhclient->debug, avail); ret = TRUE; if (avail == 0) { /* client sent close, so remove it */ GST_DEBUG_OBJECT (sink, "%s client asked for close, removing", mhclient->debug); mhclient->status = GST_CLIENT_STATUS_CLOSED; ret = FALSE; } else if (avail < 0) { GST_WARNING_OBJECT (sink, "%s avail < 0, removing", mhclient->debug); mhclient->status = GST_CLIENT_STATUS_ERROR; ret = FALSE; } else { guint8 dummy[512]; gint nread; /* just Read 'n' Drop, could also just drop the client as it's not supposed * to write to us except for closing the socket, I guess it's because we * like to listen to our customers. */ do { /* this is the maximum we can read */ gint to_read = MIN (avail, 512); GST_DEBUG_OBJECT (sink, "%s client wants us to read %d bytes", mhclient->debug, to_read); nread = read (fd, dummy, to_read); if (nread < -1) { GST_WARNING_OBJECT (sink, "%s could not read %d bytes: %s (%d)", mhclient->debug, to_read, g_strerror (errno), errno); mhclient->status = GST_CLIENT_STATUS_ERROR; ret = FALSE; break; } else if (nread == 0) { GST_WARNING_OBJECT (sink, "%s 0 bytes in read, removing", mhclient->debug); mhclient->status = GST_CLIENT_STATUS_ERROR; ret = FALSE; break; } avail -= nread; } while (avail > 0); } return ret; /* ERRORS */ ioctl_failed: { GST_WARNING_OBJECT (sink, "%s ioctl failed: %s (%d)", mhclient->debug, g_strerror (errno), errno); mhclient->status = GST_CLIENT_STATUS_ERROR; return FALSE; } } /* Handle a write on a client, * which indicates a read request from a client. * * For each client we maintain a queue of GstBuffers that contain the raw bytes * we need to send to the client. * * We first check to see if we need to send streamheaders. If so, we queue them. * * Then we run into the main loop that tries to send as many buffers as * possible. It will first exhaust the mhclient->sending queue and if the queue * is empty, it will pick a buffer from the global queue. * * Sending the buffers from the mhclient->sending queue is basically writing * the bytes to the socket and maintaining a count of the bytes that were * sent. When the buffer is completely sent, it is removed from the * mhclient->sending queue and we try to pick a new buffer for sending. * * When the sending returns a partial buffer we stop sending more data as * the next send operation could block. * * This functions returns FALSE if some error occured. */ static gboolean gst_multi_fd_sink_handle_client_write (GstMultiFdSink * sink, GstTCPClient * client) { gboolean more; gboolean flushing; GstClockTime now; GTimeVal nowtv; GstMultiHandleSink *mhsink = GST_MULTI_HANDLE_SINK (sink); GstMultiHandleSinkClass *mhsinkclass = GST_MULTI_HANDLE_SINK_GET_CLASS (mhsink); GstMultiHandleClient *mhclient = (GstMultiHandleClient *) client; int fd = mhclient->handle.fd; flushing = mhclient->status == GST_CLIENT_STATUS_FLUSHING; more = TRUE; do { gint maxsize; g_get_current_time (&nowtv); now = GST_TIMEVAL_TO_TIME (nowtv); if (!mhclient->sending) { /* client is not working on a buffer */ if (mhclient->bufpos == -1) { /* client is too fast, remove from write queue until new buffer is * available */ /* FIXME: specific */ gst_poll_fd_ctl_write (sink->fdset, &client->gfd, FALSE); /* if we flushed out all of the client buffers, we can stop */ if (mhclient->flushcount == 0) goto flushed; return TRUE; } else { /* client can pick a buffer from the global queue */ GstBuffer *buf; GstClockTime timestamp; /* for new connections, we need to find a good spot in the * bufqueue to start streaming from */ if (mhclient->new_connection && !flushing) { gint position = gst_multi_handle_sink_new_client_position (mhsink, mhclient); if (position >= 0) { /* we got a valid spot in the queue */ mhclient->new_connection = FALSE; mhclient->bufpos = position; } else { /* cannot send data to this client yet */ /* FIXME: specific */ gst_poll_fd_ctl_write (sink->fdset, &client->gfd, FALSE); return TRUE; } } /* we flushed all remaining buffers, no need to get a new one */ if (mhclient->flushcount == 0) goto flushed; /* grab buffer */ buf = g_array_index (mhsink->bufqueue, GstBuffer *, mhclient->bufpos); mhclient->bufpos--; /* update stats */ timestamp = GST_BUFFER_TIMESTAMP (buf); if (mhclient->first_buffer_ts == GST_CLOCK_TIME_NONE) mhclient->first_buffer_ts = timestamp; if (timestamp != -1) mhclient->last_buffer_ts = timestamp; /* decrease flushcount */ if (mhclient->flushcount != -1) mhclient->flushcount--; GST_LOG_OBJECT (sink, "%s client %p at position %d", mhclient->debug, client, mhclient->bufpos); /* queueing a buffer will ref it */ mhsinkclass->client_queue_buffer (mhsink, mhclient, buf); /* need to start from the first byte for this new buffer */ mhclient->bufoffset = 0; } } /* see if we need to send something */ if (mhclient->sending) { ssize_t wrote; GstBuffer *head; GstMapInfo info; guint8 *data; /* pick first buffer from list */ head = GST_BUFFER (mhclient->sending->data); if (!gst_buffer_map (head, &info, GST_MAP_READ)) g_return_val_if_reached (FALSE); data = info.data; maxsize = info.size - mhclient->bufoffset; /* FIXME: specific */ /* try to write the complete buffer */ #ifdef MSG_NOSIGNAL #define FLAGS MSG_NOSIGNAL #else #define FLAGS 0 #endif if (client->is_socket) { wrote = send (fd, data + mhclient->bufoffset, maxsize, FLAGS); } else { wrote = write (fd, data + mhclient->bufoffset, maxsize); } gst_buffer_unmap (head, &info); if (wrote < 0) { /* hmm error.. */ if (errno == EAGAIN) { /* nothing serious, resource was unavailable, try again later */ more = FALSE; } else if (errno == ECONNRESET) { goto connection_reset; } else { goto write_error; } } else { if (wrote < maxsize) { /* partial write means that the client cannot read more and we should * stop sending more */ GST_LOG_OBJECT (sink, "partial write on %s of %" G_GSSIZE_FORMAT " bytes", mhclient->debug, wrote); mhclient->bufoffset += wrote; more = FALSE; } else { /* complete buffer was written, we can proceed to the next one */ mhclient->sending = g_slist_remove (mhclient->sending, head); gst_buffer_unref (head); /* make sure we start from byte 0 for the next buffer */ mhclient->bufoffset = 0; } /* update stats */ mhclient->bytes_sent += wrote; mhclient->last_activity_time = now; mhsink->bytes_served += wrote; } } } while (more); return TRUE; /* ERRORS */ flushed: { GST_DEBUG_OBJECT (sink, "%s flushed, removing", mhclient->debug); mhclient->status = GST_CLIENT_STATUS_REMOVED; return FALSE; } connection_reset: { GST_DEBUG_OBJECT (sink, "%s connection reset by peer, removing", mhclient->debug); mhclient->status = GST_CLIENT_STATUS_CLOSED; return FALSE; } write_error: { GST_WARNING_OBJECT (sink, "%s could not write, removing client: %s (%d)", mhclient->debug, g_strerror (errno), errno); mhclient->status = GST_CLIENT_STATUS_ERROR; return FALSE; } } static void gst_multi_fd_sink_hash_adding (GstMultiHandleSink * mhsink, GstMultiHandleClient * mhclient) { GstMultiFdSink *sink = GST_MULTI_FD_SINK (mhsink); GstTCPClient *client = (GstTCPClient *) mhclient; gst_poll_fd_ctl_write (sink->fdset, &client->gfd, TRUE); } static void gst_multi_fd_sink_hash_removing (GstMultiHandleSink * mhsink, GstMultiHandleClient * mhclient) { GstMultiFdSink *sink = GST_MULTI_FD_SINK (mhsink); GstTCPClient *client = (GstTCPClient *) mhclient; gst_poll_remove_fd (sink->fdset, &client->gfd); } /* Handle the clients. Basically does a blocking select for one * of the client fds to become read or writable. We also have a * filedescriptor to receive commands on that we need to check. * * After going out of the select call, we read and write to all * clients that can do so. Badly behaving clients are put on a * garbage list and removed. */ static void gst_multi_fd_sink_handle_clients (GstMultiFdSink * sink) { int result; GList *clients, *next; gboolean try_again; GstMultiFdSinkClass *fclass; guint cookie; GstMultiHandleSink *mhsink = GST_MULTI_HANDLE_SINK (sink); int fd; fclass = GST_MULTI_FD_SINK_GET_CLASS (sink); do { try_again = FALSE; /* check for: * - server socket input (ie, new client connections) * - client socket input (ie, clients saying goodbye) * - client socket output (ie, client reads) */ GST_LOG_OBJECT (sink, "waiting on action on fdset"); result = gst_poll_wait (sink->fdset, mhsink->timeout != 0 ? mhsink->timeout : GST_CLOCK_TIME_NONE); /* Handle the special case in which the sink is not receiving more buffers * and will not disconnect inactive client in the streaming thread. */ if (G_UNLIKELY (result == 0)) { GstClockTime now; GTimeVal nowtv; g_get_current_time (&nowtv); now = GST_TIMEVAL_TO_TIME (nowtv); CLIENTS_LOCK (mhsink); for (clients = mhsink->clients; clients; clients = next) { GstTCPClient *client; GstMultiHandleClient *mhclient; client = (GstTCPClient *) clients->data; mhclient = (GstMultiHandleClient *) client; next = g_list_next (clients); if (mhsink->timeout > 0 && now - mhclient->last_activity_time > mhsink->timeout) { mhclient->status = GST_CLIENT_STATUS_SLOW; gst_multi_handle_sink_remove_client_link (mhsink, clients); } } CLIENTS_UNLOCK (mhsink); return; } else if (result < 0) { GST_WARNING_OBJECT (sink, "wait failed: %s (%d)", g_strerror (errno), errno); if (errno == EBADF) { /* ok, so one or more of the fds is invalid. We loop over them to find * the ones that give an error to the F_GETFL fcntl. */ CLIENTS_LOCK (mhsink); restart: cookie = mhsink->clients_cookie; for (clients = mhsink->clients; clients; clients = next) { GstTCPClient *client; GstMultiHandleClient *mhclient; long flags; int res; if (cookie != mhsink->clients_cookie) { GST_DEBUG_OBJECT (sink, "Cookie changed finding bad fd"); goto restart; } client = (GstTCPClient *) clients->data; mhclient = (GstMultiHandleClient *) client; next = g_list_next (clients); fd = client->gfd.fd; res = fcntl (fd, F_GETFL, &flags); if (res == -1) { GST_WARNING_OBJECT (sink, "fnctl failed for %d, removing: %s (%d)", fd, g_strerror (errno), errno); if (errno == EBADF) { mhclient->status = GST_CLIENT_STATUS_ERROR; /* releases the CLIENTS lock */ gst_multi_handle_sink_remove_client_link (mhsink, clients); } } } CLIENTS_UNLOCK (mhsink); /* after this, go back in the select loop as the read/writefds * are not valid */ try_again = TRUE; } else if (errno == EINTR) { /* interrupted system call, just redo the wait */ try_again = TRUE; } else if (errno == EBUSY) { /* the call to gst_poll_wait() was flushed */ return; } else { /* this is quite bad... */ GST_ELEMENT_ERROR (sink, RESOURCE, READ, (NULL), ("select failed: %s (%d)", g_strerror (errno), errno)); return; } } else { GST_LOG_OBJECT (sink, "wait done: %d sockets with events", result); } } while (try_again); /* subclasses can check fdset with this virtual function */ if (fclass->wait) fclass->wait (sink, sink->fdset); /* Check the clients */ CLIENTS_LOCK (mhsink); restart2: cookie = mhsink->clients_cookie; for (clients = mhsink->clients; clients; clients = next) { GstTCPClient *client; GstMultiHandleClient *mhclient; if (mhsink->clients_cookie != cookie) { GST_DEBUG_OBJECT (sink, "Restarting loop, cookie out of date"); goto restart2; } client = (GstTCPClient *) clients->data; mhclient = (GstMultiHandleClient *) client; next = g_list_next (clients); if (mhclient->status != GST_CLIENT_STATUS_FLUSHING && mhclient->status != GST_CLIENT_STATUS_OK) { gst_multi_handle_sink_remove_client_link (mhsink, clients); continue; } if (gst_poll_fd_has_closed (sink->fdset, &client->gfd)) { mhclient->status = GST_CLIENT_STATUS_CLOSED; gst_multi_handle_sink_remove_client_link (mhsink, clients); continue; } if (gst_poll_fd_has_error (sink->fdset, &client->gfd)) { GST_WARNING_OBJECT (sink, "gst_poll_fd_has_error for %d", client->gfd.fd); mhclient->status = GST_CLIENT_STATUS_ERROR; gst_multi_handle_sink_remove_client_link (mhsink, clients); continue; } if (gst_poll_fd_can_read (sink->fdset, &client->gfd)) { /* handle client read */ if (!gst_multi_fd_sink_handle_client_read (sink, client)) { gst_multi_handle_sink_remove_client_link (mhsink, clients); continue; } } if (gst_poll_fd_can_write (sink->fdset, &client->gfd)) { /* handle client write */ if (!gst_multi_fd_sink_handle_client_write (sink, client)) { gst_multi_handle_sink_remove_client_link (mhsink, clients); continue; } } } CLIENTS_UNLOCK (mhsink); } /* we handle the client communication in another thread so that we do not block * the gstreamer thread while we select() on the client fds */ static gpointer gst_multi_fd_sink_thread (GstMultiHandleSink * mhsink) { GstMultiFdSink *sink = GST_MULTI_FD_SINK (mhsink); while (mhsink->running) { gst_multi_fd_sink_handle_clients (sink); } return NULL; } static void gst_multi_fd_sink_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) { GstMultiFdSink *multifdsink; multifdsink = GST_MULTI_FD_SINK (object); switch (prop_id) { case PROP_HANDLE_READ: multifdsink->handle_read = g_value_get_boolean (value); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } static void gst_multi_fd_sink_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec) { GstMultiFdSink *multifdsink; multifdsink = GST_MULTI_FD_SINK (object); switch (prop_id) { case PROP_HANDLE_READ: g_value_set_boolean (value, multifdsink->handle_read); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } static gboolean gst_multi_fd_sink_start_pre (GstMultiHandleSink * mhsink) { GstMultiFdSink *mfsink = GST_MULTI_FD_SINK (mhsink); GST_INFO_OBJECT (mfsink, "starting"); if ((mfsink->fdset = gst_poll_new (TRUE)) == NULL) goto socket_pair; return TRUE; /* ERRORS */ socket_pair: { GST_ELEMENT_ERROR (mfsink, RESOURCE, OPEN_READ_WRITE, (NULL), GST_ERROR_SYSTEM); return FALSE; } } static gboolean multifdsink_hash_remove (gpointer key, gpointer value, gpointer data) { return TRUE; } static void gst_multi_fd_sink_stop_pre (GstMultiHandleSink * mhsink) { GstMultiFdSink *mfsink = GST_MULTI_FD_SINK (mhsink); gst_poll_set_flushing (mfsink->fdset, TRUE); } static void gst_multi_fd_sink_stop_post (GstMultiHandleSink * mhsink) { GstMultiFdSink *mfsink = GST_MULTI_FD_SINK (mhsink); if (mfsink->fdset) { gst_poll_free (mfsink->fdset); mfsink->fdset = NULL; } g_hash_table_foreach_remove (mhsink->handle_hash, multifdsink_hash_remove, mfsink); }
veo-labs/gst-plugins-base
gst/tcp/gstmultifdsink.c
C
gpl-2.0
39,319
/* * Broadcom SiliconBackplane chipcommon serial flash interface * * Copyright (C) 2015, Broadcom Corporation. All Rights Reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $Id: sflash.c 345826 2012-07-19 06:34:55Z $ */ #include <bcm_cfg.h> #include <typedefs.h> #include <osl.h> #include <bcmutils.h> #include <siutils.h> #include <hndsoc.h> #include <sbhndcpu.h> #include <sbchipc.h> #include <bcmdevs.h> #include <sflash.h> #ifdef BCMDBG #define SFL_MSG(args) printf args #else #define SFL_MSG(args) #endif /* BCMDBG */ /* Private global state */ static struct sflash sflash; /* Issue a serial flash command */ static INLINE void sflash_cmd(osl_t *osh, chipcregs_t *cc, uint opcode) { W_REG(osh, &cc->flashcontrol, SFLASH_START | opcode); while (R_REG(osh, &cc->flashcontrol) & SFLASH_BUSY); } static bool firsttime = TRUE; /* Initialize serial flash access */ struct sflash * sflash_init(si_t *sih, chipcregs_t *cc) { uint32 id, id2; const char *name = ""; osl_t *osh; ASSERT(sih); osh = si_osh(sih); bzero(&sflash, sizeof(sflash)); sflash.type = sih->cccaps & CC_CAP_FLASH_MASK; switch (sflash.type) { case SFLASH_ST: /* Probe for ST chips */ name = "ST compatible"; sflash_cmd(osh, cc, SFLASH_ST_DP); W_REG(osh, &cc->flashaddress, 0); sflash_cmd(osh, cc, SFLASH_ST_RES); id = R_REG(osh, &cc->flashdata); sflash.blocksize = 64 * 1024; switch (id) { case 0x11: /* ST M25P20 2 Mbit Serial Flash */ sflash.numblocks = 4; break; case 0x12: /* ST M25P40 4 Mbit Serial Flash */ sflash.numblocks = 8; break; case 0x13: sflash_cmd(osh, cc, SFLASH_MXIC_RDID); id = R_REG(osh, &cc->flashdata); if (id == SFLASH_MXIC_MFID) { /* MXIC MX25L8006E 8 Mbit Serial Flash */ sflash.blocksize = 4 * 1024; sflash.numblocks = 16 * 16; } else { /* ST M25P80 8 Mbit Serial Flash */ sflash.numblocks = 16; } break; case 0x14: /* ST M25P16 16 Mbit Serial Flash */ sflash.numblocks = 32; break; case 0x15: /* ST M25P32 32 Mbit Serial Flash */ sflash.numblocks = 64; break; case 0x16: /* ST M25P64 64 Mbit Serial Flash */ sflash.numblocks = 128; break; case 0x17: /* ST M25FL128 128 Mbit Serial Flash */ sflash.numblocks = 256; break; case 0xbf: /* All of the following flashes are SST with * 4KB subsectors. Others should be added but * We'll have to revamp the way we identify them * since RES is not eough to disambiguate them. */ name = "SST"; sflash.blocksize = 4 * 1024; W_REG(osh, &cc->flashaddress, 1); sflash_cmd(osh, cc, SFLASH_ST_RES); id2 = R_REG(osh, &cc->flashdata); switch (id2) { case 1: /* SST25WF512 512 Kbit Serial Flash */ sflash.numblocks = 16; break; case 0x48: /* SST25VF512 512 Kbit Serial Flash */ sflash.numblocks = 16; break; case 2: /* SST25WF010 1 Mbit Serial Flash */ sflash.numblocks = 32; break; case 0x49: /* SST25VF010 1 Mbit Serial Flash */ sflash.numblocks = 32; break; case 3: /* SST25WF020 2 Mbit Serial Flash */ sflash.numblocks = 64; break; case 0x43: /* SST25VF020 2 Mbit Serial Flash */ sflash.numblocks = 64; break; case 4: /* SST25WF040 4 Mbit Serial Flash */ sflash.numblocks = 128; break; case 0x44: /* SST25VF040 4 Mbit Serial Flash */ sflash.numblocks = 128; break; case 0x8d: /* SST25VF040B 4 Mbit Serial Flash */ sflash.numblocks = 128; break; case 5: /* SST25WF080 8 Mbit Serial Flash */ sflash.numblocks = 256; break; case 0x8e: /* SST25VF080B 8 Mbit Serial Flash */ sflash.numblocks = 256; break; case 0x41: /* SST25VF016 16 Mbit Serial Flash */ sflash.numblocks = 512; break; case 0x4a: /* SST25VF032 32 Mbit Serial Flash */ sflash.numblocks = 1024; break; case 0x4b: /* SST25VF064 64 Mbit Serial Flash */ sflash.numblocks = 2048; break; } break; } break; case SFLASH_AT: /* Probe for Atmel chips */ name = "Atmel"; sflash_cmd(osh, cc, SFLASH_AT_STATUS); id = R_REG(osh, &cc->flashdata) & 0x3c; switch (id) { case 0xc: /* Atmel AT45DB011 1Mbit Serial Flash */ sflash.blocksize = 256; sflash.numblocks = 512; break; case 0x14: /* Atmel AT45DB021 2Mbit Serial Flash */ sflash.blocksize = 256; sflash.numblocks = 1024; break; case 0x1c: /* Atmel AT45DB041 4Mbit Serial Flash */ sflash.blocksize = 256; sflash.numblocks = 2048; break; case 0x24: /* Atmel AT45DB081 8Mbit Serial Flash */ sflash.blocksize = 256; sflash.numblocks = 4096; break; case 0x2c: /* Atmel AT45DB161 16Mbit Serial Flash */ sflash.blocksize = 512; sflash.numblocks = 4096; break; case 0x34: /* Atmel AT45DB321 32Mbit Serial Flash */ sflash.blocksize = 512; sflash.numblocks = 8192; break; case 0x3c: /* Atmel AT45DB642 64Mbit Serial Flash */ sflash.blocksize = 1024; sflash.numblocks = 8192; break; } break; } sflash.size = sflash.blocksize * sflash.numblocks; sflash.phybase = SI_FLASH2; if (firsttime) printf("Found an %s serial flash with %d %dKB blocks; total size %dMB\n", name, sflash.numblocks, sflash.blocksize / 1024, sflash.size / (1024 * 1024)); firsttime = FALSE; return sflash.size ? &sflash : NULL; } /* Read len bytes starting at offset into buf. Returns number of bytes read. */ int sflash_read(si_t *sih, chipcregs_t *cc, uint offset, uint len, uchar *buf) { uint8 *from, *to; int cnt, i; ASSERT(sih); if (!len) return 0; if ((offset + len) > sflash.size) return -22; if ((len >= 4) && (offset & 3)) cnt = 4 - (offset & 3); else if ((len >= 4) && ((uintptr)buf & 3)) cnt = 4 - ((uintptr)buf & 3); else cnt = len; if (sih->ccrev == 12) from = (uint8 *)OSL_UNCACHED((void *)SI_FLASH2 + offset); else from = (uint8 *)OSL_CACHED((void *)SI_FLASH2 + offset); to = (uint8 *)buf; if (cnt < 4) { for (i = 0; i < cnt; i ++) { /* Cannot use R_REG because in bigendian that will * xor the address and we don't want that here. */ *to = *from; from ++; to ++; } return cnt; } while (cnt >= 4) { *(uint32 *)to = *(uint32 *)from; from += 4; to += 4; cnt -= 4; } return (len - cnt); } /* Poll for command completion. Returns zero when complete. */ int sflash_poll(si_t *sih, chipcregs_t *cc, uint offset) { osl_t *osh; ASSERT(sih); osh = si_osh(sih); if (offset >= sflash.size) return -22; switch (sflash.type) { case SFLASH_ST: /* Check for ST Write In Progress bit */ sflash_cmd(osh, cc, SFLASH_ST_RDSR); return R_REG(osh, &cc->flashdata) & SFLASH_ST_WIP; case SFLASH_AT: /* Check for Atmel Ready bit */ sflash_cmd(osh, cc, SFLASH_AT_STATUS); return !(R_REG(osh, &cc->flashdata) & SFLASH_AT_READY); } return 0; } /* Write len bytes starting at offset into buf. Returns number of bytes * written. Caller should poll for completion. */ #define ST_RETRIES 3 #ifdef IL_BIGENDIAN #ifdef BCMHND74K #define GET_BYTE(ptr) (*(uint8 *)((uint32)(ptr) ^ 7)) #else /* !74K, bcm33xx */ #define GET_BYTE(ptr) (*(uint8 *)((uint32)(ptr) ^ 3)) #endif /* BCMHND74K */ #else /* !IL_BIGENDIAN */ #define GET_BYTE(ptr) (*(ptr)) #endif /* IL_BIGENDIAN */ int sflash_write(si_t *sih, chipcregs_t *cc, uint offset, uint length, const uchar *buffer) { struct sflash *sfl; uint off = offset, len = length; const uint8 *buf = buffer; uint8 data; int ret = 0, ntry = 0; bool is4712b0; uint32 page, byte, mask; osl_t *osh; ASSERT(sih); osh = si_osh(sih); if (!len) return 0; sfl = &sflash; if ((off + len) > sfl->size) return -22; switch (sfl->type) { case SFLASH_ST: is4712b0 = (CHIPID(sih->chip) == BCM4712_CHIP_ID) && (CHIPREV(sih->chiprev) == 3); /* Enable writes */ retry: sflash_cmd(osh, cc, SFLASH_ST_WREN); off = offset; len = length; buf = buffer; ntry++; if (is4712b0) { mask = 1 << 14; W_REG(osh, &cc->flashaddress, off); data = GET_BYTE(buf); buf++; W_REG(osh, &cc->flashdata, data); /* Set chip select */ OR_REG(osh, &cc->gpioout, mask); /* Issue a page program with the first byte */ sflash_cmd(osh, cc, SFLASH_ST_PP); ret = 1; off++; len--; while (len > 0) { if ((off & 255) == 0) { /* Page boundary, drop cs and return */ AND_REG(osh, &cc->gpioout, ~mask); OSL_DELAY(1); if (!sflash_poll(sih, cc, off)) { /* Flash rejected command */ if (ntry <= ST_RETRIES) goto retry; else return -11; } return ret; } else { /* Write single byte */ data = GET_BYTE(buf); buf++; sflash_cmd(osh, cc, data); } ret++; off++; len--; } /* All done, drop cs */ AND_REG(osh, &cc->gpioout, ~mask); OSL_DELAY(1); if (!sflash_poll(sih, cc, off)) { /* Flash rejected command */ if (ntry <= ST_RETRIES) goto retry; else return -12; } } else if (sih->ccrev >= 20) { W_REG(osh, &cc->flashaddress, off); data = GET_BYTE(buf); buf++; W_REG(osh, &cc->flashdata, data); /* Issue a page program with CSA bit set */ sflash_cmd(osh, cc, SFLASH_ST_CSA | SFLASH_ST_PP); ret = 1; off++; len--; while (len > 0) { if ((off & 255) == 0) { /* Page boundary, poll droping cs and return */ W_REG(NULL, &cc->flashcontrol, 0); OSL_DELAY(1); if (sflash_poll(sih, cc, off) == 0) { /* Flash rejected command */ SFL_MSG(("sflash: pp rejected, ntry: %d," " off: %d/%d, len: %d/%d, ret:" "%d\n", ntry, off, offset, len, length, ret)); if (ntry <= ST_RETRIES) goto retry; else return -11; } return ret; } else { /* Write single byte */ data = GET_BYTE(buf); buf++; sflash_cmd(osh, cc, SFLASH_ST_CSA | data); } ret++; off++; len--; } /* All done, drop cs & poll */ W_REG(NULL, &cc->flashcontrol, 0); OSL_DELAY(1); if (sflash_poll(sih, cc, off) == 0) { /* Flash rejected command */ SFL_MSG(("sflash: pp rejected, ntry: %d, off: %d/%d," " len: %d/%d, ret: %d\n", ntry, off, offset, len, length, ret)); if (ntry <= ST_RETRIES) goto retry; else return -12; } } else { ret = 1; W_REG(osh, &cc->flashaddress, off); data = GET_BYTE(buf); buf++; W_REG(osh, &cc->flashdata, data); /* Page program */ sflash_cmd(osh, cc, SFLASH_ST_PP); } break; case SFLASH_AT: mask = sfl->blocksize - 1; page = (off & ~mask) << 1; byte = off & mask; /* Read main memory page into buffer 1 */ if (byte || (len < sfl->blocksize)) { W_REG(osh, &cc->flashaddress, page); sflash_cmd(osh, cc, SFLASH_AT_BUF1_LOAD); /* 250 us for AT45DB321B */ SPINWAIT(sflash_poll(sih, cc, off), 1000); ASSERT(!sflash_poll(sih, cc, off)); } /* Write into buffer 1 */ for (ret = 0; (ret < (int)len) && (byte < sfl->blocksize); ret++) { W_REG(osh, &cc->flashaddress, byte++); W_REG(osh, &cc->flashdata, *buf++); sflash_cmd(osh, cc, SFLASH_AT_BUF1_WRITE); } /* Write buffer 1 into main memory page */ W_REG(osh, &cc->flashaddress, page); sflash_cmd(osh, cc, SFLASH_AT_BUF1_PROGRAM); break; } return ret; } /* Erase a region. Returns number of bytes scheduled for erasure. * Caller should poll for completion. */ int sflash_erase(si_t *sih, chipcregs_t *cc, uint offset) { struct sflash *sfl; osl_t *osh; ASSERT(sih); osh = si_osh(sih); sfl = &sflash; if (offset >= sfl->size) return -22; switch (sfl->type) { case SFLASH_ST: sflash_cmd(osh, cc, SFLASH_ST_WREN); W_REG(osh, &cc->flashaddress, offset); /* Newer flashes have "sub-sectors" which can be erased independently * with a new command: ST_SSE. The ST_SE command erases 64KB just as * before. */ sflash_cmd(osh, cc, (sfl->blocksize < (64 * 1024)) ? SFLASH_ST_SSE : SFLASH_ST_SE); return sfl->blocksize; case SFLASH_AT: W_REG(osh, &cc->flashaddress, offset << 1); sflash_cmd(osh, cc, SFLASH_AT_PAGE_ERASE); return sfl->blocksize; } return 0; } /* * writes the appropriate range of flash, a NULL buf simply erases * the region of flash */ int sflash_commit(si_t *sih, chipcregs_t *cc, uint offset, uint len, const uchar *buf) { struct sflash *sfl; uchar *block = NULL, *cur_ptr, *blk_ptr; uint blocksize = 0, mask, cur_offset, cur_length, cur_retlen, remainder; uint blk_offset, blk_len, copied; int bytes, ret = 0; osl_t *osh; ASSERT(sih); osh = si_osh(sih); /* Check address range */ if (len <= 0) return 0; sfl = &sflash; if ((offset + len) > sfl->size) return -1; blocksize = sfl->blocksize; mask = blocksize - 1; /* Allocate a block of mem */ if (!(block = MALLOC(osh, blocksize))) return -1; while (len) { /* Align offset */ cur_offset = offset & ~mask; cur_length = blocksize; cur_ptr = block; remainder = blocksize - (offset & mask); if (len < remainder) cur_retlen = len; else cur_retlen = remainder; /* buf == NULL means erase only */ if (buf) { /* Copy existing data into holding block if necessary */ if ((offset & mask) || (len < blocksize)) { blk_offset = cur_offset; blk_len = cur_length; blk_ptr = cur_ptr; /* Copy entire block */ while (blk_len) { copied = sflash_read(sih, cc, blk_offset, blk_len, blk_ptr); blk_offset += copied; blk_len -= copied; blk_ptr += copied; } } /* Copy input data into holding block */ memcpy(cur_ptr + (offset & mask), buf, cur_retlen); } /* Erase block */ if ((ret = sflash_erase(sih, cc, (uint) cur_offset)) < 0) goto done; while (sflash_poll(sih, cc, (uint) cur_offset)); /* buf == NULL means erase only */ if (!buf) { offset += cur_retlen; len -= cur_retlen; continue; } /* Write holding block */ while (cur_length > 0) { if ((bytes = sflash_write(sih, cc, (uint) cur_offset, (uint) cur_length, (uchar *) cur_ptr)) < 0) { ret = bytes; goto done; } while (sflash_poll(sih, cc, (uint) cur_offset)); cur_offset += bytes; cur_length -= bytes; cur_ptr += bytes; } offset += cur_retlen; len -= cur_retlen; buf += cur_retlen; } ret = len; done: if (block) MFREE(osh, block, blocksize); return ret; }
artemh/asuswrt-merlin
release/src-rt-7.14.114.x/src/shared/sflash.c
C
gpl-2.0
15,127
/* * Copyright (C) 2011 - 2012 mooege project - http://www.mooege.org * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ namespace Mooege.Core.MooNet.Helpers { public static class NotificationTypeHelper { /// <summary> /// Returns the NotificationType for the given notification. /// </summary> /// <param name="notification">The notification</param> /// <returns><see cref="NotificationType"/></returns> public static NotificationType GetNotificationType(this bnet.protocol.notification.Notification notification) { switch (notification.Type) { case "WHISPER": return NotificationType.Whisper; } return NotificationType.Unknown; } /// <summary> /// Notification types /// </summary> public enum NotificationType { Unknown, Whisper } } }
mdz444/mooege
src/Mooege/Core/MooNet/Helpers/NotificationTypeHelper.cs
C#
gpl-2.0
1,645
/* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* ** print_default.c: ** Print all parameters in a default file that will be given to some program. ** ** Written by Monty */ #include <my_global.h> #include <my_sys.h> #include <m_string.h> #include <my_getopt.h> #include "my_default.h" const char *config_file="my"; /* Default config file */ static char *my_login_path; static my_bool *show_passwords; uint verbose= 0, opt_defaults_file_used= 0; const char *default_dbug_option="d:t:o,/tmp/my_print_defaults.trace"; static struct my_option my_long_options[] = { /* NB: --config-file is troublesome, because get_defaults_options() doesn't know about it, but we pretend --config-file is like --defaults-file. In fact they behave differently: see the comments at the top of mysys/default.c for how --defaults-file should behave. This --config-file option behaves as: - If it has a directory name part (absolute or relative), then only this file is read; no error is given if the file doesn't exist - If the file has no directory name part, the standard locations are searched for a file of this name (and standard filename extensions are added if the file has no extension) */ {"config-file", 'c', "Deprecated, please use --defaults-file instead. " "Name of config file to read; if no extension is given, default " "extension (e.g., .ini or .cnf) will be added", &config_file, &config_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef DBUG_OFF {"debug", '#', "This is a non-debug version. Catch this and exit", 0,0, 0, GET_DISABLED, OPT_ARG, 0, 0, 0, 0, 0, 0}, #else {"debug", '#', "Output debug log", &default_dbug_option, &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif {"defaults-file", 'c', "Like --config-file, except: if first option, " "then read this file only, do not read global or per-user config " "files; should be the first option", &config_file, &config_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"defaults-extra-file", 'e', "Read this file after the global config file and before the config " "file in the users home directory; should be the first option", &my_defaults_extra_file, &my_defaults_extra_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"defaults-group-suffix", 'g', "In addition to the given groups, read also groups with this suffix", &my_defaults_group_suffix, &my_defaults_group_suffix, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"extra-file", 'e', "Deprecated. Synonym for --defaults-extra-file.", &my_defaults_extra_file, &my_defaults_extra_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"no-defaults", 'n', "Ignore reading of default option file(s), " "except for login file.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"login-path", 'l', "Path to be read from under the login file.", &my_login_path, &my_login_path, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"show", 's', "Show passwords in plain text.", &show_passwords, &show_passwords, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"help", '?', "Display this help message and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "Increase the output level", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void usage(my_bool version) { printf("%s Ver 1.6 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE); if (version) return; puts("This software comes with ABSOLUTELY NO WARRANTY. This is free software,\nand you are welcome to modify and redistribute it under the GPL license\n"); puts("Prints all arguments that is give to some program using the default files"); printf("Usage: %s [OPTIONS] groups\n", my_progname); my_print_help(my_long_options); my_print_default_files(config_file); my_print_variables(my_long_options); printf("\nExample usage:\n%s --defaults-file=example.cnf client mysql\n", my_progname); } static my_bool get_one_option(int optid, const struct my_option *opt MY_ATTRIBUTE((unused)), char *argument MY_ATTRIBUTE((unused))) { switch (optid) { case 'c': opt_defaults_file_used= 1; break; case 'n': break; case 'I': case '?': usage(0); exit(0); case 'v': verbose++; break; case 'V': usage(1); exit(0); case '#': DBUG_PUSH(argument ? argument : default_dbug_option); break; } return 0; } static int get_options(int *argc,char ***argv) { int ho_error; if ((ho_error=handle_options(argc, argv, my_long_options, get_one_option))) exit(ho_error); if (*argc < 1) { usage(0); return 1; } return 0; } int main(int argc, char **argv) { int count, error, args_used; char **load_default_groups, *tmp_arguments[6]; char **argument, **arguments, **org_argv; char *defaults, *extra_defaults, *group_suffix, *login_path; MY_INIT(argv[0]); org_argv= argv; args_used= get_defaults_options(argc, argv, &defaults, &extra_defaults, &group_suffix, &login_path, FALSE); /* Copy defaults-xxx arguments & program name */ count=args_used+1; arguments= tmp_arguments; memcpy((char*) arguments, (char*) org_argv, count * sizeof(*org_argv)); arguments[count]= 0; /* Check out the args */ if (!(load_default_groups=(char**) my_malloc((argc+1)*sizeof(char*), MYF(MY_WME)))) exit(1); if (get_options(&argc,&argv)) exit(1); memcpy((char*) load_default_groups, (char*) argv, (argc + 1) * sizeof(*argv)); if ((error= load_defaults(config_file, (const char **) load_default_groups, &count, &arguments))) { if (verbose && opt_defaults_file_used) { if (error == 1) fprintf(stderr, "WARNING: Defaults file '%s' not found!\n", config_file); /* This error is not available now. For the future */ if (error == 2) fprintf(stderr, "WARNING: Defaults file '%s' is not a regular file!\n", config_file); } error= 2; exit(error); } for (argument= arguments+1 ; *argument ; argument++) if (!my_getopt_is_args_separator(*argument)) /* skip arguments separator */ { if (!(show_passwords) && strncmp(*argument, "--password", 10) == 0) puts("--password=*****"); else puts(*argument); } my_free(load_default_groups); free_defaults(arguments); exit(0); }
Jun-Yuan/percona-server
extra/my_print_defaults.c
C
gpl-2.0
7,307
/* Copyright (C) 2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ #include "common.h" #include <NdbTest.hpp> #include <NdbMain.h> SQLRETURN SQLHENVFREE_check, SQLHDBC_check; // NDB_COMMAND(SQLTest1, ......., 65535) int NDBT_ALLOCHANDLE_HDBC() { SQLHENV henv; SQLHDBC hdbc; /*****************************HDBC Handle*****************************/ SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &henv); SQLHDBC_check = SQLAllocHandle(SQL_HANDLE_DBC, henv, &hdbc); if (SQLHDBC_check == -1) { return NDBT_ProgramExit(NDBT_FAILED); } if (SQLHDBC_check == 0) { return 0; } SQLHENVFREE_check = SQLFreeHandle(SQL_HANDLE_ENV, henv); if (SQLHENVFREE_check == -1) { // Deallocate any allocated memory, if it exists return(-1); //return NDBT_ProgramExit(NDBT_FAILED); } if (SQLHENVFREE_check == 0) { return 0; } }
project-zerus/mariadb
storage/ndb/test/odbc/client/NDBT_ALLOCHANDLE_HDBC.cpp
C++
gpl-2.0
1,565
/************************************************************* * * MathJax/jax/output/SVG/fonts/Gyre-Pagella/Script/Regular/Main.js * * Copyright (c) 2013 The MathJax Consortium * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ MathJax.OutputJax['SVG'].FONTDATA.FONTS['GyrePagellaMathJax_Script'] = { directory: 'Script/Regular', family: 'GyrePagellaMathJax_Script', id: 'GYREPAGELLASCRIPT', 0x20: [0,0,250,0,0,''], 0xA0: [0,0,250,0,0,''], 0x210A: [398,398,508,-62,562,'299 -36c-173 -97 -338 -114 -338 -221c0 -26 43 -117 176 -117c31 0 98 3 115 93zM562 215c-51 -85 -111 -147 -173 -194l-54 -273c-26 -135 -162 -146 -198 -146c-144 0 -199 99 -199 141c0 124 182 137 365 243l17 86c-35 -47 -85 -80 -137 -80 c-104 0 -160 73 -160 164c0 126 111 242 237 242c48 0 85 -22 114 -53l4 -3c13 0 35 48 61 48h21l-66 -344c56 44 109 101 153 176zM371 325c-28 31 -63 49 -111 49c-81 0 -161 -110 -161 -232c0 -76 35 -126 84 -126c60 0 124 52 151 127'], 0x210B: [798,18,790,55,844,'233 267c-98 -47 -163 -124 -163 -220c0 -26 12 -40 30 -40c25 0 60 27 89 84c11 22 22 68 44 176zM844 215l-37 -72c-48 -94 -104 -151 -176 -151c-68 0 -89 61 -89 122c0 30 4 46 47 267l-270 -83c-32 -163 -42 -206 -63 -233c-34 -55 -90 -83 -134 -83 c-37 0 -67 21 -67 65c0 104 74 192 183 244l58 297c6 28 10 53 10 76c0 51 -17 89 -59 121l10 13c93 -69 115 -161 115 -212c0 -10 -1 -19 -3 -26l-45 -236l270 82c42 209 63 303 85 332c24 34 57 42 72 42c16 0 68 -8 68 -99c0 -102 -56 -218 -147 -270 c-30 -156 -58 -297 -58 -349c0 -31 7 -45 30 -45c29 0 94 25 149 131l36 74zM803 686c0 62 -28 78 -44 78c-22 0 -45 -124 -82 -326c82 51 126 154 126 248'], 0x2110: [787,8,577,75,647,'647 722c0 -69 -52 -91 -93 -91c-12 0 -25 2 -39 6c-17 -47 -25 -168 -90 -507c-5 -26 -15 -49 -28 -68c5 -1 9 -1 14 -1c93 0 165 83 205 161l15 -7c-34 -68 -119 -169 -220 -169c-9 0 -17 1 -26 2c-35 -38 -87 -56 -138 -56c-87 0 -172 52 -172 138 c0 73 75 151 110 169l6 -13c-34 -17 -100 -92 -100 -156c0 -61 62 -112 127 -112c36 0 71 15 97 50c-15 0 -16 9 -16 14c0 8 7 14 14 14c0 0 4 0 16 -6c9 18 17 39 22 65c57 290 68 453 98 509c-71 34 -150 82 -207 82c-65 0 -91 -18 -137 -108l-14 7c47 93 78 125 151 125 c65 0 148 -52 220 -86c49 70 100 103 136 103c30 0 49 -23 49 -65zM632 722c0 27 -7 45 -20 45c-17 0 -46 -31 -88 -109c10 -3 21 -4 30 -4c32 0 78 8 78 68'], 0x2112: [755,8,789,52,842,'842 215l-36 -72c-48 -94 -103 -151 -175 -151c-114 0 -192 76 -262 128c-58 -82 -166 -128 -239 -128c-45 0 -78 18 -78 55c0 113 161 147 233 147c15 0 28 -4 37 -5c15 50 33 153 88 430c18 95 100 136 178 136c87 0 171 -50 171 -136c0 -73 -74 -152 -109 -170l-6 14 c33 17 100 92 100 156c0 61 -63 112 -128 112c-56 0 -113 -39 -132 -138c-47 -241 -68 -363 -91 -427c101 -48 162 -148 262 -148c34 0 93 47 135 130l38 74zM311 156c-18 8 -42 12 -65 12c-82 0 -178 -48 -178 -121c0 -22 20 -35 50 -35c121 0 193 144 193 144'], 0x211B: [764,8,905,55,958,'958 215l-31 -62c-53 -104 -77 -161 -150 -161c-127 0 -218 242 -293 343c-6 6 -7 11 -7 16c0 9 12 35 56 42c9 -11 26 -42 68 -42c152 0 163 155 163 207c0 87 -66 167 -182 167c-41 0 -89 -9 -144 -33c35 -54 44 -110 44 -143c0 -9 0 -17 -1 -22 c-6 -55 -6 -38 -75 -394c-19 -95 -102 -141 -180 -141c-87 0 -171 52 -171 138c0 73 74 151 109 169l8 -13c-34 -17 -102 -92 -102 -156c0 -61 64 -112 129 -112c56 0 112 38 131 137l78 399c5 28 9 53 9 77c0 16 -2 32 -6 46c-100 -58 -151 -161 -151 -251 c0 -50 16 -97 49 -130l-11 -10c-35 35 -54 86 -54 140c0 97 55 206 162 270c-9 20 -25 39 -47 55l10 13c24 -18 43 -37 59 -57c60 30 115 42 166 42c147 0 248 -103 248 -202c0 -101 -98 -219 -241 -219h-20c77 -128 124 -312 195 -312c65 0 87 48 137 144l31 62'], 0x212C: [764,8,887,47,940,'636 406c-24 9 -52 15 -85 15c-65 0 -93 -10 -93 -21c0 -23 48 -26 85 -26c34 0 67 12 93 32zM764 183c0 33 -10 143 -91 202c-36 -21 -80 -34 -130 -34c-52 0 -101 16 -101 49c0 21 38 45 109 45c38 0 73 -8 106 -20c36 38 58 91 58 143c0 78 -48 165 -172 165 c-57 0 -107 -14 -152 -36c39 -56 48 -115 48 -150c0 -7 0 -15 -1 -20l-58 -301c-6 -26 -13 -50 -22 -71c79 -51 144 -139 232 -139c48 0 93 14 124 43c0 0 -20 3 -20 15c0 8 7 14 15 14c5 0 14 -6 23 -10c21 27 32 62 32 105zM940 215c-33 -67 -70 -158 -166 -162 c-47 -39 -112 -61 -184 -61c-102 0 -175 86 -245 138c-60 -90 -160 -138 -228 -138c-40 0 -70 18 -70 55c0 52 53 144 203 144c14 0 28 -1 41 -5c4 13 7 27 11 43l65 325c5 27 9 52 9 74c0 21 -4 39 -9 56c-95 -60 -150 -164 -150 -258c0 -50 14 -96 48 -130l-10 -10 c-37 36 -53 87 -53 140c0 101 57 211 157 276c-9 18 -24 35 -43 49l9 13c21 -15 39 -33 53 -50c47 26 103 43 165 43c172 0 242 -99 242 -190c0 -60 -31 -121 -83 -163c85 -48 140 -131 140 -209c0 -48 -18 -90 -49 -123c73 13 96 80 133 150zM282 164c-9 2 -18 4 -28 4 c-141 0 -192 -74 -192 -121c0 -22 20 -31 46 -31c47 0 120 37 169 135c3 4 4 9 5 13'], 0x212F: [398,8,406,21,459,'459 215l-30 -59c-58 -116 -155 -164 -239 -164c-91 0 -169 57 -169 154c0 74 61 252 236 252c71 0 112 -33 112 -87c0 -51 -47 -150 -187 -150c-31 0 -57 7 -79 17c-5 -23 -7 -43 -7 -54c0 -77 43 -108 100 -108c74 0 169 54 217 148l32 58zM307 311c0 47 -18 63 -50 63 c-88 0 -132 -102 -150 -179c20 -10 45 -18 75 -18c83 0 125 90 125 134'], 0x2130: [757,8,569,55,623,'623 215l-30 -59c-61 -118 -169 -164 -273 -164c-137 0 -265 82 -265 203c0 85 63 174 158 220c-23 28 -36 63 -36 104c0 117 113 238 244 238c111 0 165 -58 165 -130c0 -38 -14 -81 -44 -123l-12 11c29 39 42 79 42 113c0 64 -48 105 -149 105 c-154 0 -172 -201 -172 -252c0 -19 3 -35 8 -49c27 7 56 13 87 13c43 0 75 -19 75 -45c0 -31 -49 -49 -70 -49c-45 0 -84 16 -116 42c-91 -56 -102 -176 -102 -210c0 -110 93 -167 197 -167c94 0 194 45 247 148l31 58zM400 400c0 11 -26 21 -54 21c-30 0 -55 -4 -77 -11 c16 -23 42 -36 83 -36c17 0 48 13 48 26'], 0x2131: [802,8,754,55,835,'835 768c0 -20 -21 -53 -32 -69c-26 6 -50 7 -74 7c-92 0 -174 -30 -258 -48c10 -21 17 -46 17 -73c0 -13 -2 -27 -4 -43l-46 -238c44 -2 79 -5 107 -5c114 0 91 20 118 121c4 17 18 23 33 23c22 0 46 -15 46 -36c0 -5 -1 -9 -4 -14c-20 -42 -40 -69 -59 -89 c-7 -15 -9 -31 -9 -44c0 -39 24 -66 58 -66c49 0 59 24 64 35l15 -8c-6 -13 -19 -44 -79 -44c-42 0 -79 36 -79 90c0 3 2 13 2 16c-20 -11 -44 -15 -71 -15c-38 0 -83 6 -146 12l-28 -150c-19 -95 -102 -138 -180 -138c-87 0 -171 52 -171 138h15c0 -61 64 -112 129 -112 c56 0 113 38 133 137l26 128c-15 0 -30 2 -46 2l17 23h34l45 222c5 29 9 55 9 77c0 14 -1 27 -4 40c-15 -1 -31 -2 -46 -2c-172 0 -245 70 -245 112c0 32 33 45 77 45c109 0 207 -65 247 -109c103 32 209 92 369 92c14 0 20 -6 20 -17zM402 681c-48 96 -155 104 -193 104 c-40 0 -71 -10 -71 -28c0 -25 53 -84 196 -84c24 0 46 3 68 8'], 0x2133: [783,73,940,47,1011,'1011 763c0 -13 -14 -56 -23 -64c-19 7 -48 37 -81 37c-6 0 -11 -1 -18 -4l-82 -576c-5 -34 -7 -61 -7 -83c0 -34 9 -52 33 -52h2c27 1 76 69 106 127l38 74l14 -7l-36 -72c-63 -123 -96 -151 -169 -151c-51 0 -64 39 -64 90c0 25 3 52 7 78l62 447l-291 -490 c0 -6 1 -12 1 -18c0 -68 -38 -129 -69 -172c-11 12 -17 30 -17 50c0 26 9 54 20 74c-3 21 -9 44 -14 74l-80 464c-5 32 -13 58 -22 81c-119 -656 -166 -678 -212 -678c-23 0 -47 22 -62 51c8 4 15 9 28 9c0 0 6 -1 10 -1c5 -24 14 -43 24 -43h3c21 0 65 10 192 695 c-19 29 -47 48 -83 64l5 16c80 -33 177 -85 195 -190l76 -441l322 546c36 59 86 78 133 78c20 0 41 -4 59 -10v-3'], 0x2134: [398,8,471,23,524,'360 289c0 49 -21 85 -92 85c-150 0 -172 -197 -172 -260c0 -61 25 -98 96 -98c43 0 75 18 98 45c-8 3 -10 8 -10 13c0 7 6 14 13 14c0 0 2 0 14 -5c49 77 53 191 53 206zM524 215c-34 -68 -72 -162 -173 -162h-6c-42 -37 -94 -61 -153 -61c-96 0 -169 69 -169 165 c0 129 123 241 245 241c104 0 161 -74 161 -163c0 -59 -26 -121 -68 -166c86 5 110 78 149 153'], 0x1D49C: [783,8,783,33,836,'836 215l-36 -72c-60 -117 -115 -151 -158 -151c-61 0 -96 74 -104 139l-11 111h-231c-33 -72 -66 -137 -92 -181c-25 -43 -57 -69 -96 -69c-42 0 -75 39 -75 72c0 4 1 8 2 11l14 -3l-1 -8c0 -22 25 -56 60 -56c21 0 47 17 75 66c25 43 55 102 86 168h-34l7 31h41 c77 164 159 358 180 428c-14 30 -38 50 -73 66l5 16c130 -51 168 -162 172 -201l34 -309h67l-6 -31h-59l11 -90c10 -96 22 -136 53 -136c23 0 70 39 118 132l37 74zM523 273l-34 316c-3 29 -7 53 -13 75c-30 -85 -99 -248 -165 -391h212'], 0x1D49E: [757,8,568,55,621,'621 215l-29 -59c-62 -118 -164 -164 -262 -164c-208 0 -275 160 -275 304c0 132 74 461 382 461c113 0 160 -64 160 -136c0 -65 -39 -136 -106 -170l-6 14c57 29 96 95 96 154c0 61 -41 114 -144 114c-280 0 -306 -387 -306 -452c0 -146 69 -265 217 -265 c86 0 179 49 229 148l30 58'], 0x1D49F: [764,8,867,39,920,'920 215c-33 -68 -94 -130 -196 -130h-9c-53 -56 -120 -93 -195 -93c-84 0 -135 59 -192 106c-64 -77 -158 -106 -206 -106c-41 0 -83 18 -83 55c0 52 57 129 164 129c31 0 58 -8 83 -21c10 22 18 47 23 74l65 325c6 27 10 52 10 74c0 23 -4 45 -12 65 c-116 -57 -185 -165 -185 -264c0 -49 18 -96 55 -133l-11 -10c-40 40 -59 91 -59 144c0 104 70 220 189 281c-9 14 -22 29 -37 40l9 13c18 -14 34 -28 47 -44c49 22 108 37 171 37c210 0 298 -150 298 -318c0 -120 -46 -250 -121 -339c91 1 138 46 178 122zM276 134 c-22 12 -46 18 -73 18c-99 0 -148 -64 -148 -105c0 -22 35 -31 61 -31c49 0 117 39 160 118zM771 454c0 149 -60 279 -220 279c-58 0 -112 -10 -157 -30c43 -58 53 -119 53 -156c0 -7 0 -15 -1 -20l-59 -301c-7 -43 -24 -79 -45 -109c56 -45 104 -101 178 -101 c51 0 98 31 137 80c-4 3 -4 5 -4 9c0 7 6 15 13 15c4 0 6 -3 6 -3c60 87 99 218 99 337'], 0x1D4A2: [757,406,637,51,690,'690 215c-45 -77 -98 -134 -153 -180l-56 -287c-26 -135 -162 -154 -199 -154c-143 0 -199 107 -199 149c0 128 185 139 368 252l18 88c-53 -57 -130 -91 -202 -91c-112 0 -216 80 -216 272c0 153 71 493 386 493c113 0 160 -64 160 -136c0 -65 -39 -136 -106 -170l-6 14 c57 29 96 95 96 154c0 61 -41 114 -144 114c-280 0 -313 -408 -313 -518c0 -147 66 -199 144 -199c73 0 156 44 208 105l39 201h78l-51 -261c48 42 94 94 134 161zM447 -27c-174 -104 -340 -119 -340 -230c0 -26 43 -125 175 -125c31 0 99 11 116 101'], 0x1D4A5: [787,406,679,55,749,'733 722c0 27 -6 45 -19 45c-17 0 -47 -31 -89 -109c11 -3 21 -4 30 -4c33 0 78 8 78 68zM749 722c0 -69 -52 -91 -94 -91c-11 0 -24 2 -39 6c-17 -44 -21 -143 -86 -481l-28 -147c80 48 157 116 216 213l14 -7c-64 -107 -147 -177 -234 -228l-46 -239 c-26 -135 -162 -154 -198 -154c-143 0 -199 107 -199 149c0 115 176 135 361 221l36 191c58 290 68 453 98 509c-70 34 -150 82 -207 82c-65 0 -91 -18 -136 -108l-15 7c47 93 78 125 151 125c65 0 148 -52 220 -86c49 70 100 103 136 103c30 0 50 -23 50 -65zM412 -57 c-174 -80 -334 -100 -334 -200c0 -26 43 -125 176 -125c31 0 98 11 115 101'], 0x1D4A6: [788,8,881,62,935,'589 722c0 23 -4 33 -9 33c-13 0 -35 -48 -56 -108c32 8 65 29 65 75zM935 215l-31 -62c-54 -104 -77 -161 -150 -161c-127 0 -165 288 -240 390c-30 -37 -54 -75 -72 -109l-29 -143c-19 -95 -100 -138 -178 -138c-87 0 -173 52 -173 138c0 73 75 151 110 169l7 -13 c-33 -17 -101 -92 -101 -156c0 -61 65 -112 130 -112c56 0 113 38 133 137l72 374c8 38 19 73 30 103c-80 22 -113 109 -165 109c-22 0 -89 -31 -91 -147h-15c1 122 74 163 106 163c63 0 94 -90 172 -111c36 85 88 135 121 135c19 0 34 -19 34 -59c0 -59 -43 -84 -86 -91 c-11 -34 -21 -71 -28 -102l-37 -191c95 151 309 361 412 434c19 15 30 16 38 16c9 0 14 -5 14 -11c-7 -59 -7 -87 -64 -91c-6 0 -13 2 -19 8c-33 -11 -164 -127 -272 -252c109 -117 107 -426 191 -426c65 0 85 48 134 144l31 62'], 0x1D4A9: [783,8,832,42,896,'896 719c0 -14 -2 -27 -5 -33c-16 0 -42 12 -68 12c-29 0 -56 -14 -68 -71l-111 -572c14 -26 32 -47 58 -47c42 0 82 41 133 140l36 74l14 -7l-36 -72c-48 -94 -90 -151 -144 -151c-65 0 -134 44 -172 139l-182 458l-38 86c-50 -281 -84 -446 -110 -544 c-29 -110 -69 -139 -100 -139c-39 0 -61 35 -61 76h15c0 -38 20 -60 46 -60c32 0 56 48 76 127c25 98 60 269 116 572c-18 28 -39 44 -74 60l5 16c80 -33 159 -89 203 -190l181 -456l18 -49l105 541c21 96 95 128 158 128c2 -4 5 -21 5 -38'], 0x1D4AA: [757,8,788,53,841,'841 215c-34 -68 -71 -153 -173 -153c-30 0 -57 6 -79 11c-68 -50 -152 -81 -248 -81c-184 0 -288 115 -288 291c0 244 171 474 438 474c182 0 288 -113 288 -292c0 -146 -63 -288 -171 -378c19 -5 38 -9 60 -9c91 0 124 74 159 144zM705 502c0 142 -61 231 -214 231 c-291 0 -364 -322 -364 -487c0 -141 60 -230 214 -230c84 0 150 28 202 71c-10 5 -24 7 -24 20c0 7 5 14 13 14c6 0 10 -3 32 -16c108 108 141 289 141 397'], 0x1D4AB: [764,8,833,55,887,'887 215c-39 -76 -136 -176 -284 -176c-73 0 -144 26 -204 69c-27 -78 -103 -116 -173 -116c-87 0 -171 52 -171 138h15c0 -61 64 -112 129 -112c56 0 112 38 131 137l3 14c-51 61 -83 139 -83 226c0 104 45 229 156 299c-9 22 -25 41 -47 57l10 13c25 -18 43 -37 59 -57 c49 26 112 42 186 42c174 0 261 -130 261 -254c0 -135 -107 -308 -321 -308c-56 0 -108 15 -133 28c-14 -73 -14 -82 -17 -91c59 -45 128 -69 199 -69c141 0 234 97 271 167zM798 510c0 106 -54 215 -199 215c-63 0 -117 -13 -161 -33c37 -58 44 -115 44 -147 c0 -8 -1 -15 -1 -18c-5 -49 -6 -41 -55 -286c20 -11 58 -30 128 -30c216 0 244 238 244 299zM417 628c0 16 -1 32 -6 49c-104 -63 -146 -180 -146 -282c0 -78 28 -148 72 -205l71 364c4 23 9 47 9 74'], 0x1D4AC: [757,244,788,56,841,'707 454c0 156 -83 279 -218 279c-297 0 -358 -335 -358 -452c0 -143 72 -260 223 -291c265 111 353 304 353 464zM841 215c-112 -218 -303 -250 -408 -250c-25 0 -51 2 -73 5c-65 -27 -118 -57 -171 -105l271 -34c12 -1 22 -3 33 -3c61 0 100 26 115 58l12 19l27 -6 c-40 -95 -109 -141 -187 -141c-10 0 -21 1 -31 3c-237 32 -250 35 -263 35c-31 0 -46 -13 -66 -40l-27 2c9 14 15 26 23 37c72 100 152 148 228 182c-165 32 -268 143 -268 330c0 203 156 450 433 450c174 0 294 -141 294 -312c0 -162 -108 -350 -394 -462 c13 -1 31 -3 44 -3c247 0 351 158 394 242'], 0x1D4AE: [764,8,608,62,662,'662 215c-28 -54 -73 -103 -137 -127c-49 -61 -147 -96 -243 -96c-140 0 -220 73 -220 143c0 13 0 29 8 42c8 15 22 21 35 21c17 0 32 -11 32 -22s-13 -20 -13 -41c0 -50 24 -119 158 -119c61 0 126 23 165 70c-8 1 -15 6 -15 15c0 8 5 15 13 15c4 0 5 -2 18 -8 c12 21 18 44 18 71c0 167 -161 260 -172 411l-101 -200l-13 8l116 230c6 70 48 136 126 136l27 -44c-54 0 -93 -18 -114 -52c-3 -5 -17 -24 -17 -62c0 -131 226 -220 226 -427c0 -24 -6 -48 -18 -68c45 20 79 58 106 111'], 0x1D4AF: [897,8,555,43,971,'971 759l-12 -10c-72 60 -139 62 -183 62c-132 0 -203 -73 -301 -101c23 -35 42 -81 42 -134c0 -16 -1 -33 -5 -49l-76 -397c-4 -22 -13 -43 -24 -60c5 -1 11 -1 16 -1c93 0 126 75 166 153l14 -7c-33 -68 -79 -162 -180 -162c-9 0 -18 2 -26 3c-35 -43 -91 -64 -145 -64 c-87 0 -171 52 -171 138h15c0 -61 63 -112 128 -112c38 0 75 17 103 57c-11 3 -15 10 -15 15c0 6 7 14 13 14c0 0 3 0 13 -5c8 15 15 34 18 56l82 414c6 26 8 49 8 71s-2 43 -10 63c-15 -2 -28 -4 -43 -4c-172 0 -355 89 -355 167c0 22 23 31 58 31c77 0 210 -44 285 -99 c25 -17 47 -36 65 -57c100 29 182 96 298 96c73 0 142 -11 222 -78zM425 735c-12 16 -28 32 -48 48c-75 58 -216 98 -282 98c-22 0 -36 -5 -36 -15c0 -66 162 -138 305 -138c21 0 42 3 61 7'], 0x1D4B0: [798,8,657,51,710,'710 215l-37 -72c-62 -123 -94 -151 -167 -151c-44 0 -56 29 -56 69c0 35 9 82 17 120c-89 -111 -187 -189 -241 -189c-96 0 -175 70 -175 219c0 187 106 318 106 455c0 49 -15 87 -57 119l9 13c93 -69 115 -161 115 -212c0 -76 -107 -245 -107 -416 c0 -85 30 -154 122 -154c39 0 143 82 234 200l104 533h78l-115 -593c-10 -44 -17 -79 -17 -103c0 -22 7 -32 27 -32h1c28 1 77 69 107 127l38 74'], 0x1D4B1: [816,8,606,52,659,'659 215c-34 -68 -71 -130 -134 -130c-26 0 -47 16 -65 44c-60 -86 -138 -137 -223 -137c-98 0 -185 73 -185 226c0 47 8 102 26 163c30 100 79 190 79 285c0 49 -15 87 -57 119l9 13c93 -69 115 -161 115 -212c0 -53 -46 -144 -68 -212c-21 -59 -36 -131 -36 -195 c0 -89 32 -163 127 -163c64 0 135 58 187 167c-31 84 -44 202 -44 290c0 133 21 219 43 265c22 47 52 78 91 78c69 0 73 -89 73 -148c0 -134 -16 -356 -128 -525c16 -27 35 -43 56 -43c48 0 82 50 120 122zM523 655c0 86 -16 117 -34 117c-11 0 -24 0 -42 -39 c-26 -58 -41 -151 -41 -260c0 -97 14 -199 37 -270c48 109 80 262 80 452'], 0x1D4B2: [816,8,948,48,1001,'1001 215c-34 -68 -73 -130 -135 -130c-26 0 -47 16 -65 44c-59 -86 -130 -137 -203 -137c-83 0 -144 52 -162 163c-58 -108 -115 -163 -225 -163c-94 0 -163 67 -163 206c0 193 109 331 109 468c0 49 -15 87 -57 119l9 13c93 -69 115 -161 115 -212 c0 -75 -113 -266 -113 -434c0 -77 27 -136 113 -136c127 0 188 208 234 417l27 138h78l-39 -198c-17 -88 -26 -158 -26 -212c0 -101 32 -145 112 -145c56 0 117 56 166 161c-32 83 -45 205 -45 296c0 133 22 219 44 265c22 47 52 78 91 78c69 0 73 -88 73 -146 c0 -225 -50 -401 -129 -527c16 -27 35 -43 56 -43c48 0 83 50 121 122zM865 655c0 90 -16 117 -39 117c-7 0 -21 -2 -38 -39c-26 -58 -42 -151 -42 -260c0 -100 15 -204 39 -277c47 110 80 266 80 459'], 0x1D4B3: [757,8,672,60,725,'725 215l-44 -88c-44 -85 -82 -135 -119 -135c-78 0 -124 52 -156 219l-9 46c-65 -144 -143 -265 -234 -265c-71 0 -103 47 -103 107c0 70 44 155 114 191l11 -21c-61 -31 -102 -109 -102 -170c0 -39 16 -83 80 -83c84 0 161 132 226 283l-44 224 c-23 115 -38 210 -81 210c-31 0 -61 -26 -126 -152l-14 8c70 139 93 168 137 168c91 0 132 -73 163 -236l18 -92c33 85 61 166 87 224c34 74 103 102 160 102c12 0 23 -1 34 -4c0 0 1 -2 1 -6c0 -17 -8 -56 -15 -62c-17 3 -49 22 -81 22c-27 0 -56 -13 -77 -60 c-28 -64 -62 -161 -101 -260l34 -176c23 -118 33 -193 78 -193c26 0 61 33 113 135l36 71'], 0x1D4B4: [798,406,649,51,702,'702 215c-51 -85 -110 -146 -173 -193l-53 -274c-26 -135 -159 -154 -198 -154c-143 0 -200 107 -200 149c0 124 183 137 367 243l37 192c-92 -127 -195 -186 -256 -186c-96 0 -175 70 -175 219c0 184 106 318 106 455c0 49 -15 87 -57 119l9 13 c93 -69 115 -161 115 -212c0 -83 -107 -239 -107 -416c0 -85 30 -154 122 -154c44 0 155 58 250 200l104 533h78l-137 -703c58 44 110 101 155 176zM441 -36c-174 -97 -340 -114 -340 -221c0 -26 45 -125 177 -125c31 0 99 22 115 101'], 0x1D4B5: [764,14,673,47,732,'732 745l-87 -98l-482 -546c75 -11 321 -48 414 -48c11 0 50 2 63 28l72 141l15 -7c-70 -136 -108 -229 -150 -229c-112 0 -353 50 -460 50c-14 0 -21 -7 -55 -44l-15 16l49 54l519 588c-86 20 -211 49 -298 49c-48 0 -86 -9 -97 -33l-9 -21l-16 6c40 82 47 104 52 108 c7 4 23 5 47 5c79 0 231 -18 307 -42c19 -7 33 -10 44 -10c19 0 31 10 51 29l19 21'], 0x1D4B6: [398,8,567,23,620,'620 215l-28 -54c-84 -161 -127 -169 -171 -169c-62 0 -86 44 -88 98c-35 -57 -91 -98 -150 -98c-104 0 -160 73 -160 164c0 129 114 242 237 242c77 0 111 -56 118 -56c13 0 38 48 64 48h21l-46 -234c-5 -29 -13 -66 -13 -96c0 -29 8 -51 32 -51c57 0 110 95 143 160 l27 53zM371 325c-28 31 -63 49 -111 49c-82 0 -161 -114 -161 -232c0 -76 35 -126 84 -126c63 0 128 56 154 136'], 0x1D4B7: [788,8,465,18,519,'352 257c0 71 -28 117 -76 117c-54 0 -110 -18 -154 -54c-6 -52 -14 -108 -26 -167c-1 -10 -2 -19 -2 -28c0 -72 56 -109 111 -109c33 0 60 17 82 44c-11 2 -14 9 -14 14c0 8 7 14 14 14c0 0 3 0 16 -6c32 53 49 126 49 175zM282 694c0 61 -103 78 -118 78l-3 -2 c-21 -26 -8 -172 -32 -401c71 112 153 249 153 325zM519 215c-34 -67 -72 -159 -169 -162c-38 -37 -87 -61 -145 -61c-110 0 -187 76 -187 147c0 5 0 12 2 17c75 387 67 584 97 623c4 5 39 9 47 9c18 0 134 -9 134 -96c0 -85 -90 -230 -163 -344c42 32 93 50 141 50 c101 0 152 -69 152 -156c0 -63 -24 -125 -63 -172c76 11 106 85 139 151'], 0x1D4B8: [398,8,406,21,459,'459 215l-30 -59c-58 -116 -155 -164 -239 -164c-91 0 -169 57 -169 154c0 75 62 252 236 252c75 0 123 -29 123 -78c0 -29 -17 -50 -43 -50c-17 0 -30 13 -30 30c0 16 10 30 25 38c-10 33 -52 36 -75 36c-119 0 -161 -188 -161 -250c0 -77 43 -108 100 -108 c74 0 169 54 217 148l32 58'], 0x1D4B9: [788,8,567,23,620,'554 753c0 4 -4 19 -4 19c-25 -18 -75 -284 -109 -476c26 2 48 38 63 104c36 149 50 279 50 353zM620 215l-28 -54c-84 -161 -124 -166 -171 -169c-61 0 -83 47 -87 100c-35 -58 -91 -100 -151 -100c-104 0 -160 73 -160 164c0 121 108 242 237 242c46 0 81 -21 109 -50 c31 179 80 420 112 439l70 1c11 0 18 -14 18 -35c0 -76 -14 -207 -49 -356c-19 -84 -48 -117 -79 -117h-4l-21 -124c-5 -27 -12 -64 -12 -94c0 -29 8 -53 32 -53c57 0 107 92 143 160l27 53zM365 329c-26 29 -59 45 -105 45c-77 0 -161 -114 -161 -232 c0 -76 35 -126 84 -126c63 0 128 56 154 136l1 8c0 4 10 75 27 169'], 0x1D4BB: [788,390,247,-83,300,'300 215c-45 -88 -101 -149 -147 -149h-70l-13 -66l-75 -390h-78l75 390c76 390 76 478 107 718c9 62 22 70 84 70c55 0 113 -39 113 -104c0 -168 -96 -267 -175 -368l-33 -218h72c42 0 95 65 126 124zM281 684c0 57 -46 88 -103 88c-25 0 -30 -6 -32 -58 c-6 -102 -4 -206 -22 -368c75 93 157 186 157 338'], 0x1D4BD: [788,8,524,-10,577,'577 215l-36 -72c-63 -120 -76 -151 -152 -151c-48 0 -61 33 -61 72c0 71 46 161 46 256c0 31 -6 54 -28 54c-55 0 -146 -58 -248 -224l-29 -150h-79l31 160c75 386 66 580 96 619c4 5 39 9 47 9c18 0 134 -9 134 -96s-96 -242 -171 -358l-19 -125 c90 129 174 189 238 189c54 0 104 -43 104 -115c0 -63 -46 -172 -46 -230c0 -20 6 -35 22 -35c25 0 43 17 101 130l37 74zM282 694c0 61 -103 78 -118 78l-3 -2c-21 -26 -5 -167 -30 -397c71 112 151 245 151 321'], 0x1D4BE: [632,8,244,30,298,'213 594c0 -31 -35 -56 -57 -56s-39 17 -39 38c0 31 35 56 57 56s39 -17 39 -38zM298 215l-38 -75c-47 -89 -101 -148 -174 -148c-44 0 -56 34 -56 76c0 30 5 59 23 147l-14 7c29 58 52 117 57 152h-23l2 16h81c9 0 16 -4 16 -9c-30 -100 -69 -303 -69 -332 c0 -18 5 -26 19 -26c21 0 74 28 124 125l37 74'], 0x1D4BF: [632,398,215,-309,268,'213 594c0 -31 -35 -56 -57 -56s-39 17 -39 38c0 31 35 56 57 56s39 -17 39 -38zM268 215c-53 -89 -113 -153 -174 -201l-52 -266c-26 -135 -162 -146 -198 -146c-120 0 -153 99 -153 141c0 120 152 136 317 234l45 238l-14 7c29 58 48 117 53 152h-19l2 16h81 c9 0 16 -4 16 -9c-17 -56 -12 -22 -38 -153l-36 -189c55 46 110 104 157 183zM4 -46c-154 -88 -290 -106 -290 -211c0 -26 21 -117 130 -117c31 0 97 3 114 93'], 0x1D4C0: [788,8,456,-10,510,'282 694c0 61 -103 78 -118 78l-3 -2c-21 -26 -5 -167 -31 -399c70 111 152 247 152 323zM510 215l-32 -62c-53 -104 -78 -161 -150 -161c-103 0 -107 93 -180 193c-27 -37 -52 -81 -70 -130l-10 -55h-78l31 160c75 386 66 580 96 619c4 5 39 9 47 9c15 0 134 -8 134 -94 c0 -87 -98 -245 -173 -362l-27 -176l-4 -22c52 92 152 199 254 272c20 14 29 15 37 15c9 0 15 -5 15 -10c-6 -60 -7 -87 -65 -91c-6 0 -11 1 -18 8c-26 -9 -73 -38 -119 -86c105 -115 75 -226 130 -226c65 0 85 48 135 144l32 62'], 0x1D4C1: [788,8,265,17,319,'319 215l-38 -72c-47 -94 -104 -151 -177 -151c-68 0 -87 57 -87 119c0 16 1 33 4 49c75 386 66 580 96 619c4 5 39 9 47 9c15 0 134 -8 134 -94c0 -87 -98 -244 -172 -361c-6 -55 -15 -113 -28 -177c-4 -27 -11 -60 -11 -87c0 -30 7 -52 31 -52c30 0 93 23 149 131 l37 74zM282 694c0 61 -95 78 -113 78c0 0 -5 0 -8 -2c-21 -26 -5 -167 -31 -398c70 112 152 246 152 322'], 0x1D4C2: [398,8,753,12,806,'806 215l-36 -72c-65 -125 -82 -151 -154 -151c-48 0 -61 33 -61 72c0 71 48 167 48 256c0 31 -5 54 -28 54c-37 0 -93 -27 -169 -126c-4 -27 -11 -57 -17 -92l-30 -157l-78 2l31 159c8 45 23 109 23 160c0 31 -2 54 -26 54c-37 0 -93 -27 -171 -128l-48 -246h-78l41 215 l-14 7c29 58 52 117 57 152h-23l2 16h81c9 0 16 -4 16 -9l-22 -83c59 67 113 100 159 100c55 0 99 -38 99 -108c63 73 119 108 167 108c54 0 102 -43 102 -115c0 -62 -44 -171 -44 -228c0 -21 5 -37 22 -37c20 0 37 9 99 130l38 74'], 0x1D4C3: [398,8,520,12,573,'573 215l-36 -72c-59 -113 -78 -151 -152 -151c-48 0 -61 33 -61 72c0 71 48 167 48 256c0 31 -7 54 -29 54c-48 0 -125 -45 -213 -169l-40 -205h-78l41 215l-14 7c29 58 52 117 57 152h-23l2 16h81c9 0 16 -4 16 -9c-12 -38 -20 -68 -32 -122c77 95 149 139 203 139 c55 0 102 -43 102 -115c0 -67 -45 -176 -45 -230c0 -20 6 -35 23 -35c24 0 41 17 98 130l38 74'], 0x1D4C5: [398,398,485,-118,538,'365 248c0 73 -37 126 -100 126c-47 0 -88 -10 -122 -39c-6 -52 -26 -157 -45 -261c9 -4 16 -12 24 -22c28 -34 48 -44 99 -44c33 0 61 19 85 52c-15 0 -16 9 -16 14c0 8 6 14 14 14c0 0 3 0 16 -6c17 29 45 100 45 166zM538 215c-34 -67 -74 -159 -173 -162 c-37 -37 -87 -61 -144 -61c-55 0 -81 12 -112 50c-5 6 -10 11 -15 15c-30 -153 -89 -433 -125 -454l-70 -1c-12 0 -17 14 -17 35c0 221 79 358 134 411l31 170l-8 4l13 25c14 85 17 126 17 143h40c17 0 33 0 34 -3c3 -6 3 -10 3 -24c32 23 71 35 119 35 c119 0 177 -73 177 -165c0 -65 -30 -125 -61 -163c78 11 104 80 142 152zM10 23c-33 -44 -113 -179 -113 -386c0 -8 2 -17 3 -19c25 18 75 235 110 405'], 0x1D4C6: [398,397,486,23,540,'540 215c-30 -62 -58 -107 -136 -116l-84 -437c28 45 96 88 136 91l2 -16c-33 -5 -111 -44 -147 -128c-2 -3 -7 -6 -7 -6s-65 8 -70 10l90 475c-34 -54 -86 -96 -141 -96c-104 0 -160 73 -160 164c0 126 111 242 237 242c77 0 111 -56 118 -56c13 0 38 48 64 48h18 l-53 -276c61 8 83 41 118 108zM371 325c-28 31 -63 49 -111 49c-81 0 -161 -111 -161 -232c0 -76 35 -126 84 -126c50 0 99 33 130 87c-4 1 -19 4 -19 17c0 7 5 14 13 14c4 0 5 -3 18 -8c4 8 8 17 10 26'], 0x1D4C7: [421,0,442,39,495,'495 221c-6 -13 -18 -44 -78 -44c-40 0 -74 35 -74 84c0 21 5 43 18 67c-50 -19 -122 -107 -179 -260l-13 -68h-78l46 238c9 48 22 97 22 121c0 8 -2 13 -6 13c-23 0 -32 -26 -100 -157l-14 7c65 128 83 176 118 176c48 0 61 -46 61 -98c0 -56 -15 -119 -18 -134l-2 -10 c39 85 96 176 189 242c20 14 52 23 60 23c9 0 16 -4 16 -9c-7 -60 -30 -88 -87 -92c-12 -21 -17 -42 -17 -60c0 -39 25 -66 58 -66c50 0 60 24 65 35'], 0x1D4C8: [421,8,413,-26,467,'467 215c-29 -55 -57 -127 -121 -153c-31 -39 -92 -70 -185 -70s-187 54 -187 109c0 29 3 59 46 59c33 0 62 -20 62 -35c0 0 -3 -7 -7 -7c-6 0 -18 7 -27 7c-8 0 -14 -5 -14 -24c0 -44 45 -85 127 -85c65 0 99 18 115 46c-8 2 -16 7 -16 16c0 8 5 14 13 14c4 0 5 0 12 -4 c1 6 1 11 1 17c0 84 -170 137 -172 229l-61 -119l-14 7l87 171c12 19 31 28 56 28l27 -44c-28 0 -74 0 -74 -42c0 -66 234 -132 234 -210c0 -13 -2 -26 -9 -39c43 26 64 80 92 136'], 0x1D4C9: [655,8,286,23,339,'339 215l-36 -72c-48 -94 -104 -151 -177 -151c-66 0 -90 54 -90 115c0 39 10 74 17 108l-14 7c18 35 47 129 70 223h-86l4 23h87c17 73 30 144 30 187h59c9 0 15 -4 15 -9c-7 -25 -22 -95 -39 -178h143l-2 -23h-144l-56 -285c-4 -20 -12 -57 -12 -88c0 -30 6 -55 32 -55 c30 0 91 23 147 131l38 74'], 0x1D4CA: [390,8,540,39,593,'593 215l-37 -72c-62 -123 -79 -151 -152 -151c-44 0 -56 29 -56 69c0 30 7 66 13 100c-88 -119 -171 -169 -217 -169c-74 0 -104 48 -104 122c0 41 9 81 13 101l-14 7c29 58 52 117 57 152h-23l2 16h81c9 0 16 -4 16 -9c-17 -56 -20 -62 -46 -194 c-8 -44 -14 -84 -14 -114c0 -34 9 -57 32 -57c37 0 128 46 228 200l34 174h78l-46 -234c-9 -47 -18 -83 -18 -108c0 -21 6 -32 25 -32c27 0 66 74 96 132l38 74'], 0x1D4CB: [420,8,491,39,545,'545 215c-16 -32 -46 -43 -77 -43c-12 0 -25 1 -36 5c-61 -138 -184 -185 -245 -185c-57 0 -141 52 -141 156c0 21 3 39 7 67l-14 7c34 67 59 137 59 168h62c4 0 5 -3 5 -3l-1 -6c-4 -26 -44 -208 -44 -260c0 -77 35 -105 74 -105c53 0 145 65 183 188 c-30 24 -52 57 -52 99c0 70 49 117 88 117c46 0 49 -55 49 -98c0 -37 -4 -87 -24 -131c11 -2 21 -4 30 -4c14 0 47 4 62 35zM394 324c0 35 -9 49 -20 49c-15 0 -33 -31 -33 -70c0 -34 17 -62 40 -82c8 31 13 65 13 103'], 0x1D4CC: [420,8,649,39,702,'702 215c-16 -32 -47 -43 -78 -43c-12 0 -23 1 -35 5c-60 -138 -146 -185 -207 -185c-37 0 -75 22 -97 67c-35 -43 -80 -67 -135 -67c-54 0 -107 47 -107 142c0 32 4 49 10 81l-14 7c34 67 59 137 59 168h62c4 0 5 -3 5 -3l-1 -6l-38 -194c-5 -30 -10 -63 -10 -93 c0 -45 10 -78 45 -78c82 0 106 149 130 270h78l-19 -99c-9 -47 -12 -75 -12 -95c0 -54 18 -76 52 -76c53 0 121 62 148 184c-32 24 -56 60 -56 103c0 70 50 117 89 117c44 0 48 -46 48 -94c0 -39 -5 -89 -25 -135c11 -2 21 -4 30 -4c14 0 47 4 62 35zM551 324 c0 35 -9 49 -19 49c-16 0 -34 -31 -34 -70c0 -35 19 -65 44 -86c7 31 9 68 9 107'], 0x1D4CD: [398,8,488,25,541,'541 215l-44 -88c-45 -85 -84 -135 -121 -135c-59 0 -96 24 -126 150c-20 -46 -41 -87 -65 -117c-21 -24 -51 -33 -80 -33c-33 0 -80 22 -80 74h23c0 -41 42 -50 57 -50c25 0 45 2 63 23c28 33 52 86 74 142l-1 6c-23 116 -30 187 -67 187c-31 0 -56 -33 -121 -159l-14 7 c70 139 92 176 138 176c53 0 87 -26 114 -102l9 19c29 59 69 83 113 83c45 0 86 -35 86 -86c0 -22 -18 -36 -36 -36c-20 0 -35 17 -35 36c0 40 -4 62 -22 62c-34 0 -65 -28 -85 -70l-19 -40c6 -22 11 -48 18 -78c23 -118 22 -170 56 -170c26 0 62 33 114 135l37 71'], 0x1D4CE: [390,398,530,-39,584,'584 215c-51 -85 -111 -147 -173 -194l-52 -273c-26 -135 -164 -146 -200 -146c-143 0 -198 99 -198 141c0 124 181 137 364 243l33 170c-88 -117 -169 -164 -214 -164c-74 0 -104 48 -104 122c0 41 7 71 13 101l-14 7c29 58 52 117 57 152h-23l2 16h81c9 0 16 -4 16 -9 c-17 -56 -20 -62 -46 -194c-8 -44 -14 -84 -14 -114c0 -34 9 -57 32 -57c37 0 126 45 225 195l35 179h78l-45 -234l-21 -109c56 44 108 100 152 175zM321 -38c-173 -96 -337 -112 -337 -219c0 -26 42 -117 175 -117c31 0 98 3 115 93'], 0x1D4CF: [404,8,437,-13,490,'490 215l-71 -138c-43 -85 -87 -85 -104 -85c-110 0 -132 51 -236 51c-14 0 -32 -9 -71 -48l-21 13c26 39 55 75 99 93l217 192c-86 19 -131 48 -161 48c-17 0 -30 -9 -42 -34l-47 -92l-14 7c92 182 96 182 116 182c40 0 113 -44 180 -44c28 0 54 8 76 27l18 17l16 -18 l-323 -285c76 -2 117 -49 211 -49c10 0 57 1 79 46l64 124'], 0x1D4D0: [785,10,796,30,853,'853 216l-3 -5l-35 -69c-60 -117 -100 -152 -147 -152c-80 0 -125 75 -132 141l-12 108h-226c-34 -71 -65 -135 -91 -179s-60 -70 -99 -70c-47 0 -78 30 -78 74c0 4 1 9 3 13l19 -7c-1 -2 -1 -6 -1 -6c0 -33 23 -54 57 -54c14 0 40 10 73 65c23 42 53 99 84 164h-32l6 37 h43c79 170 164 371 181 433c-12 24 -30 42 -61 55l8 21c131 -50 179 -141 185 -203l34 -306h55l-7 -37h-44l9 -87c11 -99 21 -134 50 -134c20 0 54 34 104 132l35 71l2 5zM520 276l-34 313c-2 27 -5 49 -9 70c-31 -86 -97 -243 -162 -383h205'], 0x1D4D1: [767,10,913,44,970,'641 406c-21 7 -47 13 -77 13c-48 0 -91 -8 -91 -19c0 -19 41 -23 83 -23c33 0 62 10 85 29zM762 183c0 32 -4 138 -76 197c-37 -20 -81 -32 -130 -32c-50 0 -104 16 -104 52c0 24 43 47 112 47c35 0 69 -5 102 -15c30 36 46 87 46 136c0 77 -46 163 -169 163 c-54 0 -104 -13 -146 -34c44 -57 58 -116 58 -152c0 -7 0 -13 -1 -18l-59 -301c-5 -27 -13 -52 -24 -74c80 -52 148 -134 232 -134c47 0 86 13 113 41c-9 3 -11 10 -11 15c0 9 7 17 17 17c6 0 9 -3 15 -5c16 26 25 57 25 97zM970 216l-3 -5c-32 -65 -69 -155 -166 -160 c-51 -41 -124 -61 -198 -61c-101 0 -178 83 -249 134c-65 -97 -181 -134 -237 -134c-42 0 -73 18 -73 57c0 53 55 147 206 147c9 0 18 -2 27 -3c4 12 6 25 9 38l65 325c7 35 12 66 12 93c0 11 -2 20 -3 29c-90 -60 -140 -160 -140 -250c0 -46 11 -93 47 -128l-13 -13 c-37 36 -55 88 -55 141c0 99 57 211 155 276c-8 17 -20 33 -39 47l11 18c21 -16 38 -32 54 -49c46 26 102 41 163 41c173 0 271 -87 271 -192c0 -60 -34 -120 -89 -161c86 -47 146 -129 146 -211c0 -48 -17 -88 -44 -120c65 16 86 78 121 146l2 5zM267 164c-4 0 -10 1 -13 1 c-140 0 -189 -73 -189 -118c0 -12 5 -29 43 -29c49 0 120 44 159 146'], 0x1D4D2: [759,10,568,39,625,'625 216l-2 -5l-29 -56c-61 -120 -165 -165 -264 -165c-222 0 -291 161 -291 306c0 132 70 463 398 463c114 0 162 -65 162 -138c0 -66 -39 -139 -106 -172l-9 19c56 29 95 94 95 151c0 60 -41 112 -142 112c-271 0 -290 -384 -290 -450c0 -144 66 -263 201 -263 c85 0 176 50 227 147l28 56l3 5'], 0x1D4D3: [767,10,880,36,937,'937 216l-2 -5c-24 -47 -78 -129 -198 -129h-5c-55 -56 -126 -92 -212 -92c-81 0 -131 54 -185 98c-65 -71 -156 -98 -213 -98c-41 0 -86 18 -86 57c0 53 59 131 167 131c27 0 51 -5 71 -14c8 19 15 40 20 65l65 325c5 27 9 52 9 74s-3 43 -10 61 c-114 -57 -181 -164 -181 -260c0 -49 17 -96 53 -131l-13 -13c-40 40 -61 91 -61 145c0 106 70 220 189 282c-10 13 -23 25 -37 37l13 18c18 -14 35 -27 50 -42c49 21 105 34 167 34c211 0 327 -151 327 -320c0 -119 -41 -248 -115 -336c83 4 128 45 165 118l3 5zM263 137 c-19 7 -38 13 -60 13c-98 0 -146 -63 -146 -103c0 -20 31 -29 59 -29c48 0 108 39 147 119zM755 454c0 148 -59 277 -217 277c-54 0 -104 -9 -147 -26c50 -54 73 -108 73 -154c0 -9 -1 -17 -2 -24l-59 -301c-9 -45 -26 -84 -49 -115c52 -45 97 -93 166 -93 c48 0 92 30 129 77c-8 4 -12 8 -12 14c0 8 7 17 16 17c0 0 4 0 6 -1c3 -1 4 -3 8 -4c55 87 88 216 88 333'], 0x1D4D4: [759,10,569,39,627,'627 216l-3 -5l-29 -56c-61 -120 -170 -165 -275 -165c-150 0 -281 82 -281 205c0 95 70 176 164 218c-27 29 -46 67 -46 111c0 10 0 21 3 31c23 125 142 204 270 204c110 0 162 -56 162 -127c0 -40 -16 -85 -47 -129l-17 13c28 39 41 79 41 112c0 82 -67 103 -139 103 c-92 0 -148 -74 -167 -178c-4 -23 -7 -42 -7 -60c0 -22 3 -42 9 -57c30 7 63 11 94 11c41 0 78 -18 78 -47c0 -6 -1 -13 -5 -19c-11 -17 -46 -33 -65 -33c-48 0 -93 13 -128 36c-79 -58 -91 -169 -91 -201c0 -109 80 -165 182 -165c93 0 191 46 245 147l30 56l2 5zM411 400 c0 8 -22 19 -52 19c-29 0 -53 -4 -76 -12c12 -13 36 -30 84 -30c18 0 44 14 44 23'], 0x1D4D5: [807,10,761,52,850,'850 772c0 -23 -19 -61 -31 -78l-1 -1h-2c-26 5 -53 6 -78 6c-89 0 -171 -26 -254 -42c11 -23 19 -49 19 -77c0 -12 -1 -25 -4 -38l-44 -235c39 -3 69 -5 92 -5c71 0 86 19 113 119c6 18 21 26 37 26c25 0 54 -15 54 -40c0 -5 -1 -10 -3 -16c-20 -39 -39 -65 -59 -84 c-6 -17 -10 -33 -10 -47c0 -38 23 -68 56 -68c48 0 57 21 61 29l2 5l20 -10l-3 -5c-6 -15 -19 -41 -80 -41c-43 0 -81 46 -81 100c0 4 1 7 1 11c-23 -12 -49 -16 -79 -16c-35 0 -75 7 -127 11l-28 -146c-18 -96 -100 -140 -195 -140c-88 0 -174 52 -174 140h21 c0 -60 62 -109 126 -109c49 0 97 36 117 134l26 127h-43h-5l21 29h1h31l46 219c7 37 11 68 11 95v19c-11 -2 -24 -2 -37 -2c-172 0 -247 70 -247 115c0 36 39 50 85 50c51 0 112 -15 154 -40c34 -21 67 -43 93 -69c106 34 219 96 376 96c16 0 22 -9 22 -22zM395 683 c-9 23 -23 42 -48 63c-31 26 -84 39 -130 39c-44 0 -77 -11 -77 -28c0 -22 50 -81 194 -81c21 0 40 3 61 7'], 0x1D4D6: [759,408,664,35,722,'722 216l-3 -5c-39 -78 -98 -130 -153 -176l-56 -287c-26 -137 -154 -156 -228 -156c-144 0 -201 108 -201 151c0 124 182 140 365 240l16 77c-64 -60 -124 -70 -153 -70c-188 0 -274 106 -274 274c0 36 4 74 12 116c41 219 152 379 390 379c114 0 175 -65 175 -138 c0 -66 -40 -137 -106 -172l-9 19c57 29 95 94 95 151c0 60 -54 112 -155 112c-182 0 -242 -152 -281 -358c-12 -60 -18 -116 -18 -162c0 -119 43 -193 171 -193c50 0 114 34 160 85l12 57l31 165h109v-3c-3 -18 4 21 -32 -166l-17 -87c47 40 94 86 127 152l3 5zM441 -46 c-173 -91 -332 -107 -332 -211c0 -25 42 -123 173 -123c30 0 96 11 113 99'], 0x1D4D7: [801,22,803,39,861,'215 255c-91 -47 -155 -118 -155 -208c0 -17 5 -38 27 -38s56 25 86 83c10 21 21 63 42 163zM861 216l-3 -5l-35 -69c-48 -94 -105 -152 -179 -152c-78 0 -115 74 -115 144c0 9 0 17 1 26l42 213l-235 -71c-31 -165 -43 -210 -65 -238c-34 -56 -100 -86 -152 -86 c-46 0 -81 22 -81 69v4c0 100 78 184 182 236l60 301c5 28 9 54 9 76c0 51 -16 86 -57 119l13 18c99 -73 141 -147 141 -213c0 -9 -1 -19 -2 -28l-42 -228l234 71c41 204 63 306 86 337c26 35 72 43 101 43c17 0 69 -8 71 -97v-3c0 -99 -59 -208 -146 -264l-49 -263 c-4 -27 -11 -62 -11 -90c0 -26 5 -46 28 -46c3 0 9 0 13 1c49 13 91 45 133 129l36 71l2 5zM814 686c0 60 -27 76 -42 76s-35 -83 -76 -310c74 54 118 147 118 234'], 0x1D4D8: [789,10,590,73,663,'663 722c0 -71 -53 -94 -96 -94c-12 0 -24 3 -38 6c-17 -48 -23 -169 -88 -504c-5 -27 -16 -49 -28 -66h11c92 0 161 79 201 157l3 5l19 -10l-2 -5c-34 -68 -118 -168 -221 -168c-8 0 -18 0 -27 1c-38 -37 -93 -54 -150 -54c-88 0 -174 52 -174 140c0 74 75 153 110 172 l9 -19c-33 -16 -98 -91 -98 -153c0 -60 61 -109 124 -109c34 0 63 13 85 45c-13 4 -14 16 -14 16c0 9 7 17 17 17c0 0 3 0 6 -1l5 -3c8 17 13 36 18 60c60 303 69 464 102 516c-69 31 -144 69 -195 69c-53 0 -89 -15 -134 -103l-20 9c47 95 80 133 154 133 c67 0 145 -50 213 -83c49 62 104 93 146 93c36 0 62 -23 62 -67zM642 722c0 18 -2 42 -17 42c-14 0 -43 -24 -84 -104c9 -2 18 -3 26 -3c40 0 75 13 75 65'], 0x1D4D9: [789,408,692,39,764,'744 722c0 18 -3 42 -17 42c-15 0 -43 -24 -85 -104c9 -2 18 -3 26 -3c41 0 76 13 76 65zM764 722c0 -71 -53 -94 -96 -94c-11 0 -23 3 -37 6c-16 -45 -21 -143 -85 -478l-26 -138c79 48 159 106 207 203l2 5l20 -10l-3 -5c-54 -110 -144 -171 -231 -221l-47 -242 c-26 -138 -166 -156 -227 -156c-145 0 -202 108 -202 151c0 113 176 136 360 214l38 198c58 300 67 465 101 516c-69 32 -144 73 -195 73c-63 0 -89 -19 -134 -107l-19 9c47 95 79 133 153 133c60 0 141 -51 212 -85c50 64 105 95 147 95c36 0 62 -23 62 -67zM394 -69 c-170 -70 -326 -95 -326 -188c0 -25 41 -123 173 -123c19 0 94 6 113 99'], 0x1D4DA: [790,10,894,47,952,'952 216l-3 -5l-30 -59c-53 -104 -78 -162 -152 -162c-127 0 -190 269 -264 374c-23 -31 -44 -64 -60 -92l-27 -142c-18 -96 -92 -140 -194 -140c-88 0 -175 52 -175 140c0 74 75 153 110 172l11 -19c-34 -16 -100 -92 -100 -153c0 -60 63 -109 127 -109 c55 0 98 36 117 134l73 374c8 40 21 77 35 109c-65 30 -96 100 -142 100c-21 0 -87 -29 -88 -144h-21c1 124 75 165 109 165c57 0 89 -74 151 -102c42 78 99 127 139 127c17 0 43 -10 43 -62c0 -73 -71 -90 -91 -94c-10 -33 -19 -68 -26 -99l-35 -177 c97 151 299 351 399 423c21 14 38 15 46 15c10 0 19 -6 19 -13c-6 -61 -8 -96 -68 -100c-7 0 -16 9 -22 15c-33 -15 -160 -125 -267 -250v-1c107 -116 120 -423 201 -423c64 0 83 47 131 143l30 60l3 5zM590 722c0 22 -2 29 -5 31h-1c-9 0 -31 -34 -56 -102 c34 9 62 29 62 71'], 0x1D4DB: [758,10,789,36,846,'846 216l-2 -5l-35 -69c-48 -94 -104 -152 -178 -152c-106 0 -186 66 -257 117c-59 -76 -153 -117 -249 -117c-51 0 -89 18 -89 57c0 53 55 155 232 155c14 0 28 -2 41 -4c15 53 34 158 85 421c18 96 104 139 194 139c88 0 174 -51 174 -139c0 -74 -76 -155 -111 -173 l-9 19c34 17 99 91 99 154c0 60 -61 109 -125 109c-49 0 -97 -36 -117 -135c-48 -243 -69 -367 -92 -430c92 -51 157 -136 248 -136c33 0 90 39 133 123l36 71l3 5zM294 155c-16 6 -30 10 -46 10c-141 0 -191 -73 -191 -118c0 -21 18 -33 48 -33c67 0 138 42 188 138'], 0x1D4DC: [785,77,966,43,1040,'1040 763c0 -14 -16 -58 -25 -66c-23 0 -48 36 -82 36c-5 0 -10 0 -15 -2l-82 -575c-3 -21 -7 -47 -7 -71c0 -36 7 -62 32 -62c24 2 71 64 104 127l36 71l3 5l19 -10l-2 -5l-36 -69c-63 -125 -97 -152 -171 -152c-56 0 -89 50 -89 127c0 13 0 29 3 43l57 406l-268 -450 c0 -5 2 -11 2 -17c0 -66 -46 -137 -85 -176c-10 9 -32 29 -32 54c0 26 9 54 19 74c-2 21 -9 44 -14 74l-79 464l-9 47c-115 -618 -162 -646 -210 -646c-26 0 -50 24 -65 52l-1 2c8 5 17 11 32 11c0 0 11 0 15 -2c0 -27 11 -43 19 -43h3c17 0 62 6 188 679 c-19 42 -46 61 -80 75l8 21c79 -32 191 -86 209 -192l74 -433l318 539c37 60 101 80 149 80c17 0 49 -3 62 -12v-4'], 0x1D4DD: [785,10,852,39,917,'917 724c0 -13 0 -51 -10 -51c-18 0 -46 17 -74 17c-33 0 -48 -27 -56 -63l-111 -572c14 -26 32 -45 56 -45c39 0 78 39 130 140l35 71l2 5l20 -10l-3 -5l-35 -69c-47 -91 -90 -152 -147 -152c-90 0 -162 44 -200 140l-182 458l-29 71c-49 -272 -83 -434 -108 -529 c-28 -110 -70 -140 -102 -140c-38 0 -64 31 -64 78h21c0 -34 17 -58 43 -58c30 0 53 43 74 125c25 98 60 269 116 572c-16 29 -34 43 -67 57l8 21c79 -32 172 -76 217 -191l181 -456c5 -12 9 -26 14 -40l104 531c18 95 92 132 160 132c7 0 7 -28 7 -37'], 0x1D4DE: [759,10,801,51,858,'858 216l-3 -5c-33 -68 -71 -151 -174 -151c-27 0 -57 4 -83 9c-70 -51 -157 -79 -251 -79c-186 0 -296 115 -296 296c0 277 203 473 447 473c185 0 296 -117 296 -297c0 -166 -71 -293 -170 -374c20 -5 38 -7 57 -7c90 0 120 70 155 140l3 5zM689 501 c0 140 -58 230 -191 230c-239 0 -345 -240 -345 -505c0 -127 50 -208 194 -208c73 0 133 26 182 69l-6 3c-9 4 -13 10 -13 17c0 9 6 17 15 17c0 0 5 0 8 -2c4 -2 12 -6 22 -10c78 83 134 229 134 389'], 0x1D4DF: [767,10,846,47,904,'904 216l-3 -5c-39 -76 -135 -175 -285 -175c-71 0 -143 24 -204 65c-31 -75 -113 -111 -192 -111c-89 0 -173 53 -173 140h21c0 -60 61 -109 124 -109c55 0 103 36 123 134l6 31c-45 58 -74 130 -74 209c0 103 44 226 152 298c-6 21 -19 39 -41 56l13 18 c23 -17 42 -36 58 -56c49 26 112 40 185 40c180 0 290 -137 290 -265c0 -13 -2 -26 -4 -39c-28 -145 -139 -262 -333 -262c-55 0 -104 13 -130 26l-16 -78c-1 -4 -1 -7 -2 -11c58 -41 128 -65 197 -65c139 0 230 94 267 164l2 5zM796 510c0 105 -54 213 -197 213 c-61 0 -111 -12 -154 -31c41 -59 53 -119 53 -152c0 -6 -1 -10 -1 -13c-6 -49 -7 -41 -55 -285c20 -11 57 -29 125 -29c119 0 197 102 223 237c4 19 6 40 6 60zM406 649c0 6 0 14 -2 21c-97 -65 -136 -177 -136 -275c0 -67 22 -128 58 -180l67 339c6 36 13 67 13 95'], 0x1D4E0: [759,250,801,53,858,'858 216l-3 -5c-113 -220 -305 -249 -409 -249c-25 0 -51 3 -73 5c-62 -26 -113 -54 -165 -100c99 -5 198 -30 298 -30c61 0 101 20 115 50l15 26l31 -13c-48 -95 -108 -150 -194 -150c-44 0 -235 41 -294 41c-31 0 -44 -11 -63 -37l-33 5c9 15 16 26 24 37 c70 97 148 145 222 179c-163 35 -276 146 -276 332c0 204 151 452 449 452c188 0 309 -141 309 -314c0 -160 -125 -364 -396 -461c10 -1 22 -1 31 -1c246 0 347 155 390 238l3 5zM705 454c0 154 -80 277 -203 277c-286 0 -342 -333 -342 -450c0 -142 57 -258 207 -289 c258 115 338 303 338 462'], 0x1D4E1: [767,10,943,39,975,'975 216l-3 -5l-29 -59c-54 -104 -78 -162 -153 -162c-146 0 -222 217 -296 318c-12 16 -14 29 -14 39c0 27 28 35 47 46h1l19 2v-1c8 -9 17 -18 25 -29c12 -4 25 -11 42 -11c128 0 148 152 148 204c0 83 -70 165 -163 165c-83 0 -131 -12 -167 -33 c40 -58 53 -117 53 -150c0 -6 -1 -10 -1 -13c-7 -55 -7 -38 -76 -394c-18 -97 -115 -143 -195 -143c-88 0 -174 52 -174 140c0 75 77 155 113 173l11 -18c-36 -17 -103 -91 -103 -155c0 -60 62 -109 126 -109c55 0 96 36 116 134l78 399c5 28 9 54 9 77c0 10 -2 18 -3 27 l-45 -39c-59 -59 -86 -135 -86 -200c0 -43 12 -82 32 -110l-14 -13c-25 32 -39 75 -39 123c0 70 30 149 95 214c19 17 35 34 52 48c-8 26 -23 48 -49 68l26 18c24 -18 45 -39 61 -58c41 26 96 42 195 42c156 0 257 -96 257 -204c0 -102 -82 -222 -257 -222h-16 c74 -125 124 -307 191 -307c64 0 86 47 134 143l30 60l3 5'], 0x1D4E2: [767,10,615,60,672,'672 216l-2 -5c-25 -50 -65 -95 -123 -120c-44 -65 -140 -101 -265 -101c-142 0 -222 72 -222 145c0 13 0 29 8 43c9 17 27 24 44 24c18 0 40 -10 40 -24c0 0 0 -4 -1 -6c-7 -12 -12 -28 -12 -37c0 -49 22 -117 143 -117c55 0 116 22 152 68c-5 2 -11 8 -11 15 c0 10 6 17 15 17c0 0 4 0 7 -1l6 -4c9 20 14 42 14 66c0 163 -143 249 -157 402l-99 -195l-18 11l117 232c8 72 50 138 129 138h4l34 -49h-4c-81 0 -129 -42 -129 -112c0 -130 233 -219 233 -427c0 -20 -4 -39 -11 -57c37 22 63 54 86 99l3 5'], 0x1D4E3: [900,10,555,40,972,'972 762l-14 -16c-60 48 -114 54 -156 54h-26c-126 0 -195 -67 -288 -98c27 -39 44 -83 44 -133c0 -14 -2 -28 -4 -42l-77 -397c-4 -22 -12 -42 -21 -58c90 1 120 71 160 149l3 5l19 -10l-2 -5c-34 -68 -80 -160 -182 -160h-12c-36 -44 -94 -61 -159 -61 c-88 0 -174 52 -174 140h21c0 -60 61 -109 125 -109c51 0 77 34 88 53c-14 4 -15 12 -15 16c0 8 6 17 15 17c0 0 4 0 7 -2c0 0 4 -2 6 -4c7 16 12 33 16 54l82 414c6 34 10 64 10 89c0 12 -1 22 -2 32c-12 -1 -25 -2 -38 -2c-180 0 -358 96 -358 178c0 25 26 34 61 34 c77 0 210 -43 286 -99c24 -17 46 -37 65 -56c101 31 181 99 297 99c73 0 143 -16 223 -82zM419 736c-11 15 -25 30 -43 44c-76 57 -215 99 -281 99c-23 0 -34 -5 -34 -13c0 -63 157 -135 303 -135c18 0 38 1 55 5'], 0x1D4E4: [801,10,696,48,753,'753 216l-3 -5l-35 -69c-64 -125 -96 -152 -170 -152c-57 0 -78 57 -78 108c0 20 2 41 6 62l3 12c-85 -104 -182 -182 -237 -182c-106 0 -191 71 -191 223c0 48 8 106 27 169c26 86 84 194 84 303c0 40 -12 70 -48 99l13 17c97 -72 128 -161 128 -217 c0 -52 -43 -135 -67 -211c-24 -67 -45 -149 -45 -218c0 -99 45 -137 112 -137c37 0 139 78 232 199l104 534h109l-116 -595c-9 -44 -17 -79 -17 -101c0 -26 11 -32 26 -32c25 2 72 64 104 127l37 71l2 5'], 0x1D4E5: [819,10,632,49,689,'689 216l-3 -5c-33 -68 -70 -129 -135 -129c-24 0 -48 14 -67 39c-61 -82 -142 -131 -234 -131c-107 0 -201 75 -201 232c0 46 8 100 26 160c26 86 84 187 84 303c0 40 -12 70 -47 98l12 18c98 -74 127 -174 127 -224c0 -49 -47 -144 -66 -204c-22 -64 -41 -140 -41 -207 c0 -75 24 -148 119 -148c65 0 131 60 180 174c-35 82 -57 195 -57 281c0 133 21 220 43 267s55 79 95 79c38 0 95 -31 99 -94l2 -83c0 -203 -46 -383 -128 -503c24 -31 44 -36 54 -36c47 0 78 47 116 118l3 5zM520 655c0 94 -14 115 -35 115c-5 0 -20 -3 -35 -38 c-26 -57 -42 -150 -42 -259c0 -91 18 -183 46 -253c40 106 66 255 66 435'], 0x1D4E6: [819,10,987,49,1044,'1044 216l-3 -5c-33 -68 -71 -129 -136 -129c-25 0 -47 14 -65 39c-63 -82 -138 -131 -216 -131c-81 0 -143 48 -166 150c-56 -100 -115 -150 -221 -150c-99 0 -188 74 -188 228c0 47 8 102 26 164c26 86 84 187 84 303c0 40 -12 70 -47 98l12 18 c98 -74 127 -174 127 -224c0 -47 -47 -144 -66 -204c-24 -69 -46 -152 -46 -221s22 -134 111 -134c117 0 175 181 218 381l34 174h96l-39 -200c-18 -93 -29 -166 -29 -221c0 -93 30 -134 106 -134c56 0 119 57 169 168c-34 83 -51 200 -51 287c0 133 30 221 52 268 s59 78 99 78c83 0 88 -84 88 -166c0 -206 -56 -390 -139 -513c16 -24 33 -37 51 -37c45 0 78 44 117 118l2 5zM888 655c0 115 -12 115 -23 115c-6 0 -20 -3 -36 -38c-26 -57 -41 -150 -41 -259c0 -89 12 -180 31 -251c40 107 69 254 69 433'], 0x1D4E7: [759,10,685,57,742,'742 216l-2 -5l-43 -85c-43 -83 -82 -136 -122 -136c-112 0 -152 53 -185 221l-4 19c-60 -125 -134 -240 -223 -240c-72 0 -106 48 -106 109c0 71 46 157 116 194l13 -26c-60 -32 -100 -107 -100 -168c0 -38 14 -81 77 -81c78 0 149 116 214 260l-48 245 c-25 124 -39 208 -78 208c-29 0 -59 -25 -124 -151l-19 10c70 139 95 169 140 169c106 0 160 -74 191 -238l12 -59c29 72 52 141 76 192c33 75 104 104 162 104c14 0 44 -1 44 -13c0 -17 -8 -65 -17 -73c-11 0 -56 22 -88 22c-26 0 -53 -5 -74 -50 c-26 -59 -57 -146 -92 -237l37 -198c25 -124 35 -191 76 -191c22 0 56 28 110 134l35 69l3 5'], 0x1D4E8: [801,408,688,48,745,'745 216l-3 -5c-43 -87 -109 -142 -171 -189l-54 -274c-27 -138 -174 -156 -213 -156c-144 0 -203 108 -203 151c0 120 173 139 351 231l38 194c-91 -121 -190 -178 -251 -178c-106 0 -191 72 -191 225c0 48 8 104 27 167c26 86 84 187 84 303c0 40 -12 70 -47 98l12 18 c97 -72 128 -161 128 -217c0 -52 -47 -148 -67 -211c-22 -67 -45 -149 -45 -217c0 -70 23 -138 112 -138c43 0 152 57 247 199l104 534h109l-135 -698c55 43 108 91 147 168l3 5zM447 -53c-166 -84 -317 -104 -317 -204c0 -25 43 -123 174 -123c30 0 83 21 99 99'], 0x1D4E9: [767,17,673,43,736,'736 745l-90 -99l-460 -522c81 -13 313 -47 404 -47h8c14 0 38 2 52 30l59 114l2 5l20 -10l-3 -5c-74 -143 -108 -228 -151 -228c-112 0 -353 51 -460 51c-13 0 -19 -7 -53 -43l-21 17l52 56l498 564c-86 19 -208 48 -293 48c-71 0 -87 -19 -93 -32l-12 -24l-21 9 c15 28 54 115 67 128l5 5c8 5 28 5 48 5c79 0 233 -18 308 -43c19 -6 32 -9 43 -9c18 0 30 8 49 27l21 24'], 0x1D4EA: [400,10,606,21,663,'381 324c-29 32 -64 48 -108 48c-79 0 -146 -91 -146 -230c0 -59 15 -124 69 -124c60 0 125 54 151 134zM663 216l-3 -5l-27 -51c-87 -161 -127 -170 -173 -170c-61 0 -109 41 -115 92c-36 -53 -90 -92 -149 -92c-117 0 -175 69 -175 161c0 147 116 249 252 249 c77 0 111 -53 118 -55c12 0 50 48 77 48h36l-45 -237c-5 -29 -13 -66 -13 -96c0 -25 5 -48 29 -48c55 0 109 97 140 158l26 51l3 5'], 0x1D4EB: [790,10,491,16,549,'351 273c0 61 -19 99 -62 99c-48 0 -97 -14 -138 -44c-7 -55 -14 -112 -26 -175c-3 -11 -4 -23 -4 -33c0 -67 44 -102 97 -102c29 0 52 16 72 42c-16 4 -17 13 -17 17c0 9 8 17 17 17c0 0 4 0 6 -2c3 -1 7 -4 11 -5c15 30 28 68 36 109c5 28 8 55 8 77zM293 694 c0 57 -95 74 -115 76l-1 -2c-18 -23 -3 -163 -20 -369c68 108 136 226 136 295zM549 216l-3 -5c-32 -65 -69 -155 -166 -160c-43 -42 -99 -61 -162 -61c-123 0 -202 76 -202 150c0 6 0 11 1 16c75 387 68 584 97 624c6 8 59 10 63 10c14 0 135 -7 136 -96v-2 c0 -84 -87 -225 -156 -334c41 26 87 42 132 42c105 0 167 -62 167 -158c0 -16 -2 -31 -5 -48c-9 -48 -27 -89 -51 -120c68 16 94 83 127 147l2 5'], 0x1D4EC: [400,10,432,20,489,'489 216l-3 -5l-28 -56c-59 -115 -143 -165 -255 -165c-120 0 -183 72 -183 167c0 13 1 26 5 41c31 121 128 202 245 202c102 0 125 -48 125 -80c0 -5 0 -9 -1 -14c-5 -26 -31 -45 -52 -45c-22 0 -39 19 -39 39c0 24 19 42 34 48c-7 11 -24 24 -67 24 c-74 0 -104 -60 -133 -174c-7 -28 -11 -52 -11 -76c0 -61 25 -104 77 -104c103 0 183 52 234 147l30 56l2 5'], 0x1D4ED: [790,10,606,21,663,'376 329c-26 27 -59 43 -103 43c-79 0 -147 -126 -147 -241c0 -69 26 -113 70 -113c60 0 125 55 151 135l1 7c0 4 11 75 28 169zM663 216l-3 -5l-27 -51c-83 -163 -125 -168 -173 -170c-62 0 -99 48 -110 102c-35 -59 -93 -102 -154 -102c-109 0 -175 65 -175 165 c0 154 134 245 252 245c44 0 79 -19 108 -46c65 365 96 426 112 435l97 1c12 0 21 -14 21 -37c0 -76 -14 -207 -49 -356c-17 -73 -48 -111 -84 -119l-20 -122c-6 -27 -12 -64 -12 -94c0 -37 12 -50 29 -50c3 0 7 0 11 1c41 10 77 55 129 157l26 51l3 5zM590 753 c0 8 -2 15 -2 15c-21 -23 -64 -234 -106 -469c19 5 45 38 59 101c35 149 49 279 49 353'], 0x1D4EE: [400,10,419,18,476,'303 316c0 43 -14 56 -33 56c-49 0 -94 -35 -135 -182c17 -7 38 -11 60 -11c70 0 108 97 108 137zM476 216l-3 -5l-28 -56c-60 -116 -156 -165 -242 -165c-116 0 -185 63 -185 160c0 16 2 33 7 52c32 126 122 198 245 198c77 0 128 -31 128 -94c0 -59 -48 -149 -198 -149 c-26 0 -49 4 -70 12c-4 -19 -6 -36 -6 -53c0 -48 16 -98 85 -98c73 0 165 55 215 147l30 56l2 5'], 0x1D4EF: [790,393,274,-86,332,'332 216l-3 -5c-44 -86 -101 -147 -150 -147h-68l-12 -64l-76 -393h-109l76 393c88 387 74 478 106 718c9 63 29 72 103 72c56 0 113 -40 113 -106c0 -160 -88 -259 -165 -355l-30 -229h69c39 0 91 59 123 121l3 5zM291 684c0 56 -45 86 -101 86c-24 0 -26 -6 -29 -56 c-5 -98 2 -197 -10 -347c70 87 140 176 140 317'], 0x1D4F0: [400,400,534,-52,592,'308 -47c-171 -90 -331 -108 -331 -210c0 -25 41 -115 173 -115c29 0 96 3 113 91zM592 216l-3 -5c-39 -80 -99 -132 -156 -177l-56 -286c-27 -138 -172 -148 -227 -148c-145 0 -202 100 -202 143c0 124 182 140 365 239l16 79c-35 -43 -82 -71 -133 -71 c-108 0 -174 76 -174 171c0 148 134 239 251 239c47 0 85 -23 117 -55c10 0 36 48 62 48h50l-63 -327c49 42 97 87 130 155l3 5zM381 324c-29 32 -64 48 -108 48c-78 0 -147 -117 -147 -242c0 -69 26 -112 70 -112c60 0 121 51 149 126'], 0x1D4F1: [790,10,563,-13,620,'620 216l-38 -74c-33 -65 -53 -103 -76 -125c-25 -24 -67 -27 -91 -27c-38 0 -74 18 -74 84c0 34 9 69 14 87c25 83 35 139 35 173c0 27 -6 38 -18 38c-53 0 -143 -57 -246 -224l-28 -151h-111l31 163c76 386 65 580 96 620c6 8 59 10 63 10c14 0 135 -7 136 -96 c0 -86 -87 -227 -157 -338c-4 -43 -9 -88 -17 -136c87 122 169 180 233 180c67 0 112 -35 112 -126c0 -32 -6 -71 -17 -118c-5 -19 -21 -69 -21 -103c0 -24 8 -32 19 -32c6 0 15 2 25 10c14 11 42 57 73 119l39 76zM293 694c0 57 -95 74 -115 76l-1 -2c-8 -10 -9 -44 -9 -76 v-91c0 -54 0 -120 -7 -197c67 107 132 223 132 290'], 0x1D4F2: [649,10,270,34,328,'328 216l-39 -77c-47 -90 -103 -149 -177 -149c-60 0 -78 59 -78 117c0 18 2 36 5 53l12 53l-16 8l1 3c29 54 51 113 58 148h-37l3 21h122c9 0 18 -4 18 -12c-17 -57 -26 -94 -52 -225c-10 -52 -17 -86 -17 -107c0 -19 7 -23 17 -23c20 0 72 25 121 124l39 76zM255 597 c0 -39 -37 -73 -73 -73c-30 0 -55 23 -55 53c0 38 37 72 73 72c30 0 55 -22 55 -52'], 0x1D4F3: [647,400,228,-312,285,'285 216l-2 -3c-48 -80 -104 -139 -159 -186l-54 -279c-26 -138 -166 -148 -226 -148c-121 0 -156 100 -156 143c0 120 152 139 316 230l47 240l-16 8l1 3c29 54 49 113 54 148h-20l3 21h109c9 0 18 -4 18 -12v-1c-17 -56 -11 -21 -37 -152l-33 -168c48 41 95 95 135 164 l2 2zM241 582c-7 -29 -38 -59 -72 -59c-30 0 -55 23 -55 53c0 4 0 8 2 12c6 30 37 59 71 59c30 0 54 -23 54 -53c0 -4 1 -8 0 -12zM-1 -56c-151 -82 -282 -101 -282 -201c0 -25 19 -115 127 -115c30 0 95 3 112 91'], 0x1D4F4: [790,10,489,-3,546,'546 216l-32 -64c-54 -104 -84 -162 -158 -162c-100 0 -119 76 -187 170c-21 -31 -39 -66 -53 -105l-9 -55h-110l30 160c73 386 65 580 97 620c5 8 58 10 62 10c14 0 135 -7 136 -96v-2c0 -80 -84 -203 -154 -302c-7 -70 -16 -147 -33 -234l-1 -8c55 90 148 187 255 260 c19 13 30 16 37 16c13 0 21 -7 21 -16c0 -58 -9 -89 -70 -93c-8 0 -13 4 -19 10c-25 -9 -73 -36 -117 -79c104 -115 65 -228 115 -228c64 0 86 47 137 143l34 65zM302 694c0 57 -95 74 -115 76l-1 -2c-9 -11 -9 -50 -9 -84v-70c0 -50 0 -110 -5 -180c67 96 130 195 130 260'], 0x1D4F5: [790,10,291,16,348,'348 216l-39 -74c-49 -98 -107 -152 -179 -152c-88 0 -114 65 -114 131c0 13 0 26 2 39c74 386 65 580 96 620c6 8 59 10 63 10c14 0 135 -7 136 -96v-2c0 -80 -85 -215 -156 -321c-6 -65 -17 -136 -31 -215c-5 -26 -10 -60 -10 -87s5 -49 28 -49c4 0 9 0 13 1 c50 13 90 45 133 129l39 76zM293 694c0 57 -95 74 -115 76l-1 -2c-8 -10 -9 -45 -9 -78v-84c0 -52 -2 -115 -7 -187c68 104 132 210 132 275'], 0x1D4F6: [400,10,805,10,862,'862 216l-38 -74c-34 -65 -54 -103 -78 -125c-22 -21 -44 -27 -78 -27c-63 0 -80 58 -80 109c0 25 4 48 7 62c7 21 42 116 42 172c0 28 -9 39 -23 39c-34 0 -91 -25 -172 -130c-1 -30 -6 -60 -12 -86l-28 -157l-109 2l29 159c6 25 33 117 33 172c0 20 -4 40 -20 40 c-35 0 -92 -27 -169 -128l-46 -244h-110l41 213l-16 8c27 56 52 114 59 151h-24l3 21h109c9 0 18 -4 18 -12v-1c-6 -25 -13 -46 -19 -73c71 79 123 93 154 93c82 0 103 -59 107 -113c81 95 137 113 172 113c43 0 109 -16 109 -133c0 -39 -8 -78 -16 -111 c-5 -18 -19 -68 -19 -101c0 -25 8 -34 19 -34c5 0 13 2 24 10c14 11 41 57 72 119l39 76'], 0x1D4F7: [400,10,559,10,616,'616 216l-37 -74c-34 -65 -55 -103 -78 -125c-21 -20 -43 -27 -77 -27c-63 0 -85 31 -85 83c0 25 6 54 12 87c7 32 46 122 46 174c0 22 -7 38 -28 38c-41 0 -118 -37 -210 -167l-39 -205h-110l41 213l-16 8l1 3c29 54 51 113 58 148h-24l3 21h109c9 0 18 -4 18 -12v-1 c-10 -35 -18 -63 -28 -111c74 90 143 131 197 131c87 0 116 -50 116 -118c0 -39 -9 -84 -22 -127c-5 -18 -21 -67 -21 -100c0 -25 8 -34 20 -34c5 0 14 2 24 10c15 9 41 57 72 119l39 76'], 0x1D4F8: [400,10,497,21,554,'355 282c0 68 -30 90 -74 90c-109 0 -159 -139 -159 -268c0 -55 20 -86 83 -86c42 0 69 16 90 43c-9 4 -13 9 -13 16c0 8 7 17 16 17c0 0 4 0 6 -2l8 -4c22 46 43 128 43 194zM554 216l-1 -3c-34 -67 -72 -162 -176 -162h-10c-46 -38 -104 -61 -162 -61 c-98 0 -184 71 -184 171c0 135 140 239 260 239c106 0 177 -79 177 -170c0 -67 -30 -118 -69 -158c78 7 105 78 144 152l1 2'], 0x1D4F9: [400,400,511,-134,568,'364 259c0 71 -29 121 -86 121c-47 0 -86 -17 -119 -46c-4 -35 -13 -90 -30 -178l-17 -88c4 -4 9 -9 13 -15c26 -33 58 -43 109 -43c30 0 55 17 75 50c-6 4 -9 9 -9 14c0 9 8 17 17 17c0 0 4 0 7 -1l1 -2c14 30 25 68 33 108c3 22 6 43 6 63zM568 216l-1 -3 c-34 -67 -77 -162 -180 -162h-1c-41 -38 -95 -61 -152 -61c-58 0 -97 14 -127 50c-33 -166 -87 -418 -124 -439l-97 -1c-16 0 -20 14 -20 37c0 96 12 216 79 338c12 22 33 48 55 69l21 116c23 125 31 183 31 208v25h57c24 0 43 -2 47 -3c3 -1 5 -13 5 -17v-6 c33 22 70 33 117 33c122 0 191 -78 191 -172c0 -12 0 -23 -2 -34c-8 -46 -29 -87 -59 -121c76 10 100 78 138 151l1 2zM-8 8c-10 -15 -19 -29 -26 -42c-62 -123 -79 -233 -79 -329c0 -5 1 -13 1 -13c22 30 68 204 104 384'], 0x1D4FA: [400,399,525,21,582,'582 216l-1 -3c-30 -60 -58 -108 -136 -117l-81 -422c31 41 92 79 131 82l2 -21c-33 -5 -110 -44 -145 -128c-1 -4 -5 -6 -9 -6c-6 0 -83 5 -96 8l-3 4l88 462c-34 -49 -84 -85 -136 -85c-108 0 -175 66 -175 166c0 13 1 26 4 40c23 125 135 204 248 204 c44 0 82 -20 113 -52v3l4 -5l1 -1c11 0 35 48 61 48h50l-46 -237l-7 -39c57 8 79 42 113 107l1 2zM381 324c-29 32 -64 48 -108 48c-66 0 -117 -61 -140 -178c-6 -26 -8 -51 -8 -72c0 -69 27 -104 71 -104c47 0 95 33 126 83c-7 2 -18 8 -18 19c0 9 7 17 16 17c0 0 4 0 6 -2 c3 -1 7 -2 12 -4c3 7 5 13 8 21'], 0x1D4FB: [424,3,481,38,540,'540 222l-2 -2c-6 -15 -21 -46 -82 -46c-41 0 -76 37 -76 87c0 20 4 41 15 63c-57 -20 -128 -107 -184 -256l-13 -71h-110l46 241c9 48 22 99 22 121c0 6 -3 10 -3 10c-20 0 -26 -23 -96 -158l-19 10c63 125 82 179 119 179c65 0 81 -66 81 -132c0 -35 -4 -68 -8 -92 c39 79 94 162 182 224c21 15 52 24 61 24c13 0 29 -4 31 -9v-3c-6 -61 -31 -91 -88 -95c-10 -19 -14 -37 -14 -54c0 -39 24 -67 54 -67c48 0 60 25 64 34l1 3'], 0x1D4FC: [424,10,426,-29,484,'484 216l-3 -5c-29 -56 -56 -121 -117 -149c-32 -41 -99 -72 -196 -72c-94 0 -197 53 -197 111c0 56 36 64 55 64c36 0 83 -25 83 -40v-5s-2 -4 -6 -4h-2c-7 0 -19 6 -27 6c-6 0 -12 -4 -12 -21c0 -42 26 -83 106 -83c61 0 86 17 96 42h-1c-17 4 -19 14 -19 18 c0 9 7 17 16 17c0 0 4 0 7 -1l3 -3v14c0 78 -119 132 -131 220c-34 -4 -61 -74 -82 -114l-19 10c69 140 97 203 170 203l40 -50h-17c-41 0 -57 -15 -57 -39c0 -63 211 -119 211 -217c0 -7 -1 -15 -4 -23c36 27 55 74 81 126l2 5'], 0x1D4FD: [658,10,299,21,356,'356 216l-37 -74c-49 -94 -106 -152 -180 -152c-86 0 -104 62 -104 128c0 15 0 28 4 42l12 51l-16 7l1 3c19 35 46 129 69 221h-84l4 29h88c17 72 29 143 29 184v3h87c9 0 18 -4 18 -12v-1c-8 -24 -21 -92 -38 -174h129l-3 -29h-132l-55 -282c-6 -31 -15 -73 -15 -101 c0 -23 5 -39 23 -39c0 0 7 0 10 1c50 13 89 45 132 129l39 76'], 0x1D4FE: [393,10,579,35,636,'636 216l-3 -5l-35 -69c-64 -125 -81 -152 -155 -152c-46 0 -79 28 -79 101c0 13 1 26 3 39c-80 -99 -154 -140 -197 -140c-88 0 -132 49 -132 134c0 20 2 42 8 66l5 23l-16 8l1 3c29 54 51 113 58 148h-24l3 21h109c9 0 18 -4 18 -12v-1c-17 -56 -19 -61 -45 -193 c-4 -23 -17 -73 -17 -113c0 -41 13 -56 32 -56c33 0 113 39 207 172l39 200v3h109l-45 -237c-9 -47 -18 -84 -18 -108c0 -23 9 -30 22 -30c23 0 58 63 93 132l37 71l2 5'], 0x1D4FF: [423,10,511,35,568,'568 216l-2 -5c-20 -41 -60 -43 -78 -43c-10 0 -19 1 -29 4c-62 -136 -191 -182 -252 -182c-74 0 -164 53 -164 160c0 13 1 26 4 40l4 23l-16 8c34 65 60 139 60 169v3h91c6 0 8 -6 8 -6c0 -9 -3 -14 -4 -22l-35 -178c-5 -27 -8 -52 -8 -71c0 -72 30 -98 66 -98 c52 0 135 64 169 189c-31 23 -53 56 -53 96c0 73 60 120 108 120c39 0 60 -24 60 -81c0 -39 -7 -99 -29 -151c7 -1 13 -2 20 -2c14 0 44 2 58 32l3 5zM398 324c0 18 -4 47 -17 47s-31 -29 -31 -68c0 -30 17 -55 39 -74c6 28 9 60 9 95'], 0x1D500: [423,10,688,35,745,'745 216l-3 -5c-15 -33 -48 -43 -79 -43c-10 0 -22 2 -34 5c-60 -137 -153 -183 -221 -183c-36 0 -76 20 -100 63c-35 -41 -77 -63 -132 -63c-72 0 -136 49 -136 149c0 16 2 33 6 51l5 23l-16 8l1 3c34 66 59 136 59 166v3h91c5 0 8 -6 8 -6c0 -5 -2 -15 -3 -22l-36 -178 c-5 -30 -11 -65 -11 -93c0 -55 17 -76 43 -76c77 0 96 129 122 256l3 15h88l-19 -102c-8 -41 -12 -74 -12 -99c0 -54 18 -70 47 -70c52 0 118 63 146 186c-33 24 -56 57 -56 99c0 73 60 120 108 120c30 0 56 -19 58 -60v-12c0 -39 -10 -101 -35 -159c9 -2 18 -3 26 -3 c14 0 44 2 59 32l2 5zM575 324c0 18 -4 47 -17 47s-31 -29 -31 -68c0 -31 16 -57 40 -77c5 30 8 63 8 98'], 0x1D501: [400,10,514,35,571,'571 216l-3 -5l-43 -85c-43 -83 -83 -136 -123 -136c-60 0 -113 20 -147 127c-17 -36 -34 -69 -55 -94c-21 -24 -53 -33 -82 -33c-35 0 -83 23 -83 76h29c0 -39 39 -48 54 -48c25 0 42 3 60 22c25 28 46 72 65 120c-2 9 -4 18 -5 27c-23 124 -31 185 -64 185 c-27 0 -56 -33 -119 -161l-20 10l1 3c71 139 97 176 145 176c56 0 97 -22 128 -87l2 3c28 61 65 84 125 84c45 0 85 -36 85 -88c0 -25 -19 -42 -45 -42c-21 0 -40 17 -40 42c0 43 -2 60 -17 60c-33 0 -63 -29 -82 -69l-13 -25c9 -26 18 -56 24 -92c23 -112 25 -168 54 -168 c22 0 58 29 112 134l35 69l2 5'], 0x1D502: [393,400,558,-16,615,'615 216l-3 -5c-46 -78 -100 -134 -157 -180l-55 -283c-26 -138 -179 -148 -215 -148c-145 0 -201 100 -201 143c0 123 173 139 350 236l29 145c-78 -94 -151 -134 -193 -134c-105 0 -132 65 -132 131c0 21 2 44 8 69l5 23l-16 8l1 3c29 54 51 113 58 148h-24l3 21h109 c9 0 18 -4 18 -12v-1c-17 -56 -19 -61 -45 -193c-4 -22 -15 -71 -15 -110c0 -34 8 -59 30 -59c32 0 112 38 204 167l41 205h109v-3l-46 -231l-16 -91c48 42 91 91 130 156l2 5zM329 -49c-165 -88 -316 -107 -316 -208c0 -25 40 -115 172 -115c30 0 83 3 100 91'], 0x1D503: [408,10,437,-16,494,'494 216l-3 -5l-70 -136c-35 -66 -73 -85 -115 -85c-68 0 -146 50 -219 50c-23 0 -39 -7 -78 -47l-25 16c26 39 63 92 115 108l184 161c-81 20 -113 44 -141 44c-16 0 -35 -11 -47 -35l-40 -76l-20 10l1 3c93 182 97 183 119 183c41 0 113 -44 180 -44c28 0 52 6 75 26 l19 19l20 -22l-306 -269c52 -5 133 -52 196 -52c21 0 56 7 77 47l56 109l3 5'] }; MathJax.Ajax.loadComplete(MathJax.OutputJax.SVG.fontDir+"/Script/Regular/Main.js");
shayzluf/lazoozweb
wiki/extensions/Math/modules/MathJax/unpacked/jax/output/SVG/fonts/Gyre-Pagella/Script/Regular/Main.js
JavaScript
gpl-2.0
57,096
/* * xHCI host controller driver * * Copyright (C) 2008 Intel Corp. * * Author: Sarah Sharp * Some code borrowed from the Linux EHCI driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/usb.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/dmapool.h> #include <linux/dma-mapping.h> #include "xhci.h" #include "xhci-trace.h" /* * Allocates a generic ring segment from the ring pool, sets the dma address, * initializes the segment to zero, and sets the private next pointer to NULL. * * Section 4.11.1.1: * "All components of all Command and Transfer TRBs shall be initialized to '0'" */ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, unsigned int cycle_state, gfp_t flags) { struct xhci_segment *seg; dma_addr_t dma; int i; seg = kzalloc(sizeof *seg, flags); if (!seg) return NULL; seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); if (!seg->trbs) { kfree(seg); return NULL; } memset(seg->trbs, 0, TRB_SEGMENT_SIZE); /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ if (cycle_state == 0) { for (i = 0; i < TRBS_PER_SEGMENT; i++) seg->trbs[i].link.control |= TRB_CYCLE; } seg->dma = dma; seg->next = NULL; return seg; } static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) { if (seg->trbs) { dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); seg->trbs = NULL; } kfree(seg); } static void xhci_free_segments_for_ring(struct xhci_hcd *xhci, struct xhci_segment *first) { struct xhci_segment *seg; seg = first->next; while (seg != first) { struct xhci_segment *next = seg->next; xhci_segment_free(xhci, seg); seg = next; } xhci_segment_free(xhci, first); } /* * Make the prev segment point to the next segment. * * Change the last TRB in the prev segment to be a Link TRB which points to the * DMA address of the next segment. The caller needs to set any Link TRB * related flags, such as End TRB, Toggle Cycle, and no snoop. */ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, struct xhci_segment *next, enum xhci_ring_type type) { u32 val; if (!prev || !next) return; prev->next = next; if (type != TYPE_EVENT) { prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = cpu_to_le64(next->dma); /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); val &= ~TRB_TYPE_BITMASK; val |= TRB_TYPE(TRB_LINK); /* Always set the chain bit with 0.95 hardware */ /* Set chain bit for isoc rings on AMD 0.96 host */ if (xhci_link_trb_quirk(xhci) || (type == TYPE_ISOC && (xhci->quirks & XHCI_AMD_0x96_HOST))) val |= TRB_CHAIN; prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); } } /* * Link the ring to the new segments. * Set Toggle Cycle for the new ring if needed. */ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, struct xhci_segment *first, struct xhci_segment *last, unsigned int num_segs) { struct xhci_segment *next; if (!ring || !first || !last) return; next = ring->enq_seg->next; xhci_link_segments(xhci, ring->enq_seg, first, ring->type); xhci_link_segments(xhci, last, next, ring->type); ring->num_segs += num_segs; ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs; if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) { ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control &= ~cpu_to_le32(LINK_TOGGLE); last->trbs[TRBS_PER_SEGMENT-1].link.control |= cpu_to_le32(LINK_TOGGLE); ring->last_seg = last; } } /* XXX: Do we need the hcd structure in all these functions? */ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) { if (!ring) return; if (ring->first_seg) xhci_free_segments_for_ring(xhci, ring->first_seg); kfree(ring); } static void xhci_initialize_ring_info(struct xhci_ring *ring, unsigned int cycle_state) { /* The ring is empty, so the enqueue pointer == dequeue pointer */ ring->enqueue = ring->first_seg->trbs; ring->enq_seg = ring->first_seg; ring->dequeue = ring->enqueue; ring->deq_seg = ring->first_seg; /* The ring is initialized to 0. The producer must write 1 to the cycle * bit to handover ownership of the TRB, so PCS = 1. The consumer must * compare CCS to the cycle bit to check ownership, so CCS = 1. * * New rings are initialized with cycle state equal to 1; if we are * handling ring expansion, set the cycle state equal to the old ring. */ ring->cycle_state = cycle_state; /* Not necessary for new rings, but needed for re-initialized rings */ ring->enq_updates = 0; ring->deq_updates = 0; /* * Each segment has a link TRB, and leave an extra TRB for SW * accounting purpose */ ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; } /* Allocate segments and link them for a ring */ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, struct xhci_segment **first, struct xhci_segment **last, unsigned int num_segs, unsigned int cycle_state, enum xhci_ring_type type, gfp_t flags) { struct xhci_segment *prev; prev = xhci_segment_alloc(xhci, cycle_state, flags); if (!prev) return -ENOMEM; num_segs--; *first = prev; while (num_segs > 0) { struct xhci_segment *next; next = xhci_segment_alloc(xhci, cycle_state, flags); if (!next) { prev = *first; while (prev) { next = prev->next; xhci_segment_free(xhci, prev); prev = next; } return -ENOMEM; } xhci_link_segments(xhci, prev, next, type); prev = next; num_segs--; } xhci_link_segments(xhci, prev, *first, type); *last = prev; return 0; } /** * Create a new ring with zero or more segments. * * Link each segment together into a ring. * Set the end flag and the cycle toggle bit on the last segment. * See section 4.9.1 and figures 15 and 16. */ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, unsigned int cycle_state, enum xhci_ring_type type, gfp_t flags) { struct xhci_ring *ring; int ret; ring = kzalloc(sizeof *(ring), flags); if (!ring) return NULL; ring->num_segs = num_segs; INIT_LIST_HEAD(&ring->td_list); ring->type = type; if (num_segs == 0) return ring; ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, &ring->last_seg, num_segs, cycle_state, type, flags); if (ret) goto fail; /* Only event ring does not use link TRB */ if (type != TYPE_EVENT) { /* See section 4.9.2.1 and 6.4.4.1 */ ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= cpu_to_le32(LINK_TOGGLE); } xhci_initialize_ring_info(ring, cycle_state); return ring; fail: kfree(ring); return NULL; } void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, unsigned int ep_index) { int rings_cached; rings_cached = virt_dev->num_rings_cached; if (rings_cached < XHCI_MAX_RINGS_CACHED) { virt_dev->ring_cache[rings_cached] = virt_dev->eps[ep_index].ring; virt_dev->num_rings_cached++; xhci_dbg(xhci, "Cached old ring, " "%d ring%s cached\n", virt_dev->num_rings_cached, (virt_dev->num_rings_cached > 1) ? "s" : ""); } else { xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); xhci_dbg(xhci, "Ring cache full (%d rings), " "freeing ring\n", virt_dev->num_rings_cached); } virt_dev->eps[ep_index].ring = NULL; } /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue * pointers to the beginning of the ring. */ static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, unsigned int cycle_state, enum xhci_ring_type type) { struct xhci_segment *seg = ring->first_seg; int i; do { memset(seg->trbs, 0, sizeof(union xhci_trb)*TRBS_PER_SEGMENT); if (cycle_state == 0) { for (i = 0; i < TRBS_PER_SEGMENT; i++) seg->trbs[i].link.control |= TRB_CYCLE; } /* All endpoint rings have link TRBs */ xhci_link_segments(xhci, seg, seg->next, type); seg = seg->next; } while (seg != ring->first_seg); ring->type = type; xhci_initialize_ring_info(ring, cycle_state); /* td list should be empty since all URBs have been cancelled, * but just in case... */ INIT_LIST_HEAD(&ring->td_list); } /* * Expand an existing ring. * Look for a cached ring or allocate a new ring which has same segment numbers * and link the two rings. */ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, unsigned int num_trbs, gfp_t flags) { struct xhci_segment *first; struct xhci_segment *last; unsigned int num_segs; unsigned int num_segs_needed; int ret; num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) / (TRBS_PER_SEGMENT - 1); /* Allocate number of segments we needed, or double the ring size */ num_segs = ring->num_segs > num_segs_needed ? ring->num_segs : num_segs_needed; ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_segs, ring->cycle_state, ring->type, flags); if (ret) return -ENOMEM; xhci_link_rings(xhci, ring, first, last, num_segs); xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, "ring expansion succeed, now has %d segments", ring->num_segs); return 0; } #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, int type, gfp_t flags) { struct xhci_container_ctx *ctx; if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)) return NULL; ctx = kzalloc(sizeof(*ctx), flags); if (!ctx) return NULL; ctx->type = type; ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; if (type == XHCI_CTX_TYPE_INPUT) ctx->size += CTX_SIZE(xhci->hcc_params); ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); if (!ctx->bytes) { kfree(ctx); return NULL; } memset(ctx->bytes, 0, ctx->size); return ctx; } static void xhci_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { if (!ctx) return; dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); kfree(ctx); } struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { if (ctx->type != XHCI_CTX_TYPE_INPUT) return NULL; return (struct xhci_input_control_ctx *)ctx->bytes; } struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { if (ctx->type == XHCI_CTX_TYPE_DEVICE) return (struct xhci_slot_ctx *)ctx->bytes; return (struct xhci_slot_ctx *) (ctx->bytes + CTX_SIZE(xhci->hcc_params)); } struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index) { /* increment ep index by offset of start of ep ctx array */ ep_index++; if (ctx->type == XHCI_CTX_TYPE_INPUT) ep_index++; return (struct xhci_ep_ctx *) (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); } /***************** Streams structures manipulation *************************/ static void xhci_free_stream_ctx(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) dma_free_coherent(&pdev->dev, sizeof(struct xhci_stream_ctx)*num_stream_ctxs, stream_ctx, dma); else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) return dma_pool_free(xhci->small_streams_pool, stream_ctx, dma); else return dma_pool_free(xhci->medium_streams_pool, stream_ctx, dma); } /* * The stream context array for each endpoint with bulk streams enabled can * vary in size, based on: * - how many streams the endpoint supports, * - the maximum primary stream array size the host controller supports, * - and how many streams the device driver asks for. * * The stream context array must be a power of 2, and can be as small as * 64 bytes or as large as 1MB. */ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, dma_addr_t *dma, gfp_t mem_flags) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) return dma_alloc_coherent(&pdev->dev, sizeof(struct xhci_stream_ctx)*num_stream_ctxs, dma, mem_flags); else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) return dma_pool_alloc(xhci->small_streams_pool, mem_flags, dma); else return dma_pool_alloc(xhci->medium_streams_pool, mem_flags, dma); } struct xhci_ring *xhci_dma_to_transfer_ring( struct xhci_virt_ep *ep, u64 address) { if (ep->ep_state & EP_HAS_STREAMS) return radix_tree_lookup(&ep->stream_info->trb_address_map, address >> TRB_SEGMENT_SHIFT); return ep->ring; } struct xhci_ring *xhci_stream_id_to_ring( struct xhci_virt_device *dev, unsigned int ep_index, unsigned int stream_id) { struct xhci_virt_ep *ep = &dev->eps[ep_index]; if (stream_id == 0) return ep->ring; if (!ep->stream_info) return NULL; if (stream_id > ep->stream_info->num_streams) return NULL; return ep->stream_info->stream_rings[stream_id]; } /* * Change an endpoint's internal structure so it supports stream IDs. The * number of requested streams includes stream 0, which cannot be used by device * drivers. * * The number of stream contexts in the stream context array may be bigger than * the number of streams the driver wants to use. This is because the number of * stream context array entries must be a power of two. * * We need a radix tree for mapping physical addresses of TRBs to which stream * ID they belong to. We need to do this because the host controller won't tell * us which stream ring the TRB came from. We could store the stream ID in an * event data TRB, but that doesn't help us for the cancellation case, since the * endpoint may stop before it reaches that event data TRB. * * The radix tree maps the upper portion of the TRB DMA address to a ring * segment that has the same upper portion of DMA addresses. For example, say I * have segments of size 1KB, that are always 64-byte aligned. A segment may * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the * key to the stream ID is 0x43244. I can use the DMA address of the TRB to * pass the radix tree a key to get the right stream ID: * * 0x10c90fff >> 10 = 0x43243 * 0x10c912c0 >> 10 = 0x43244 * 0x10c91400 >> 10 = 0x43245 * * Obviously, only those TRBs with DMA addresses that are within the segment * will make the radix tree return the stream ID for that ring. * * Caveats for the radix tree: * * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit * extended systems (where the DMA address can be bigger than 32-bits), * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that. */ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, unsigned int num_streams, gfp_t mem_flags) { struct xhci_stream_info *stream_info; u32 cur_stream; struct xhci_ring *cur_ring; unsigned long key; u64 addr; int ret; xhci_dbg(xhci, "Allocating %u streams and %u " "stream context array entries.\n", num_streams, num_stream_ctxs); if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) { xhci_dbg(xhci, "Command ring has no reserved TRBs available\n"); return NULL; } xhci->cmd_ring_reserved_trbs++; stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags); if (!stream_info) goto cleanup_trbs; stream_info->num_streams = num_streams; stream_info->num_stream_ctxs = num_stream_ctxs; /* Initialize the array of virtual pointers to stream rings. */ stream_info->stream_rings = kzalloc( sizeof(struct xhci_ring *)*num_streams, mem_flags); if (!stream_info->stream_rings) goto cleanup_info; /* Initialize the array of DMA addresses for stream rings for the HW. */ stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci, num_stream_ctxs, &stream_info->ctx_array_dma, mem_flags); if (!stream_info->stream_ctx_array) goto cleanup_ctx; memset(stream_info->stream_ctx_array, 0, sizeof(struct xhci_stream_ctx)*num_stream_ctxs); /* Allocate everything needed to free the stream rings later */ stream_info->free_streams_command = xhci_alloc_command(xhci, true, true, mem_flags); if (!stream_info->free_streams_command) goto cleanup_ctx; INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC); /* Allocate rings for all the streams that the driver will use, * and add their segment DMA addresses to the radix tree. * Stream 0 is reserved. */ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { stream_info->stream_rings[cur_stream] = xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags); cur_ring = stream_info->stream_rings[cur_stream]; if (!cur_ring) goto cleanup_rings; cur_ring->stream_id = cur_stream; /* Set deq ptr, cycle bit, and stream context type */ addr = cur_ring->first_seg->dma | SCT_FOR_CTX(SCT_PRI_TR) | cur_ring->cycle_state; stream_info->stream_ctx_array[cur_stream].stream_ring = cpu_to_le64(addr); xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", cur_stream, (unsigned long long) addr); key = (unsigned long) (cur_ring->first_seg->dma >> TRB_SEGMENT_SHIFT); ret = radix_tree_insert(&stream_info->trb_address_map, key, cur_ring); if (ret) { xhci_ring_free(xhci, cur_ring); stream_info->stream_rings[cur_stream] = NULL; goto cleanup_rings; } } /* Leave the other unused stream ring pointers in the stream context * array initialized to zero. This will cause the xHC to give us an * error if the device asks for a stream ID we don't have setup (if it * was any other way, the host controller would assume the ring is * "empty" and wait forever for data to be queued to that stream ID). */ return stream_info; cleanup_rings: for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { cur_ring = stream_info->stream_rings[cur_stream]; if (cur_ring) { addr = cur_ring->first_seg->dma; radix_tree_delete(&stream_info->trb_address_map, addr >> TRB_SEGMENT_SHIFT); xhci_ring_free(xhci, cur_ring); stream_info->stream_rings[cur_stream] = NULL; } } xhci_free_command(xhci, stream_info->free_streams_command); cleanup_ctx: kfree(stream_info->stream_rings); cleanup_info: kfree(stream_info); cleanup_trbs: xhci->cmd_ring_reserved_trbs--; return NULL; } /* * Sets the MaxPStreams field and the Linear Stream Array field. * Sets the dequeue pointer to the stream context array. */ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, struct xhci_ep_ctx *ep_ctx, struct xhci_stream_info *stream_info) { u32 max_primary_streams; /* MaxPStreams is the number of stream context array entries, not the * number we're actually using. Must be in 2^(MaxPstreams + 1) format. * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. */ max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, "Setting number of stream ctx array entries to %u", 1 << (max_primary_streams + 1)); ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) | EP_HAS_LSA); ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); } /* * Sets the MaxPStreams field and the Linear Stream Array field to 0. * Reinstalls the "normal" endpoint ring (at its previous dequeue mark, * not at the beginning of the ring). */ void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, struct xhci_ep_ctx *ep_ctx, struct xhci_virt_ep *ep) { dma_addr_t addr; ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA)); addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state); } /* Frees all stream contexts associated with the endpoint, * * Caller should fix the endpoint context streams fields. */ void xhci_free_stream_info(struct xhci_hcd *xhci, struct xhci_stream_info *stream_info) { int cur_stream; struct xhci_ring *cur_ring; dma_addr_t addr; if (!stream_info) return; for (cur_stream = 1; cur_stream < stream_info->num_streams; cur_stream++) { cur_ring = stream_info->stream_rings[cur_stream]; if (cur_ring) { addr = cur_ring->first_seg->dma; radix_tree_delete(&stream_info->trb_address_map, addr >> TRB_SEGMENT_SHIFT); xhci_ring_free(xhci, cur_ring); stream_info->stream_rings[cur_stream] = NULL; } } xhci_free_command(xhci, stream_info->free_streams_command); xhci->cmd_ring_reserved_trbs--; if (stream_info->stream_ctx_array) xhci_free_stream_ctx(xhci, stream_info->num_stream_ctxs, stream_info->stream_ctx_array, stream_info->ctx_array_dma); if (stream_info) kfree(stream_info->stream_rings); kfree(stream_info); } /***************** Device context manipulation *************************/ static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, struct xhci_virt_ep *ep) { init_timer(&ep->stop_cmd_timer); ep->stop_cmd_timer.data = (unsigned long) ep; ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog; ep->xhci = xhci; } static void xhci_free_tt_info(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, int slot_id) { struct list_head *tt_list_head; struct xhci_tt_bw_info *tt_info, *next; bool slot_found = false; /* If the device never made it past the Set Address stage, * it may not have the real_port set correctly. */ if (virt_dev->real_port == 0 || virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) { xhci_dbg(xhci, "Bad real port.\n"); return; } tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { /* Multi-TT hubs will have more than one entry */ if (tt_info->slot_id == slot_id) { slot_found = true; list_del(&tt_info->tt_list); kfree(tt_info); } else if (slot_found) { break; } } } int xhci_alloc_tt_info(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags) { struct xhci_tt_bw_info *tt_info; unsigned int num_ports; int i, j; if (!tt->multi) num_ports = 1; else num_ports = hdev->maxchild; for (i = 0; i < num_ports; i++, tt_info++) { struct xhci_interval_bw_table *bw_table; tt_info = kzalloc(sizeof(*tt_info), mem_flags); if (!tt_info) goto free_tts; INIT_LIST_HEAD(&tt_info->tt_list); list_add(&tt_info->tt_list, &xhci->rh_bw[virt_dev->real_port - 1].tts); tt_info->slot_id = virt_dev->udev->slot_id; if (tt->multi) tt_info->ttport = i+1; bw_table = &tt_info->bw_table; for (j = 0; j < XHCI_MAX_INTERVAL; j++) INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); } return 0; free_tts: xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id); return -ENOMEM; } /* All the xhci_tds in the ring's TD list should be freed at this point. * Should be called with xhci->lock held if there is any chance the TT lists * will be manipulated by the configure endpoint, allocate device, or update * hub functions while this function is removing the TT entries from the list. */ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) { struct xhci_virt_device *dev; int i; int old_active_eps = 0; /* Slot ID 0 is reserved */ if (slot_id == 0 || !xhci->devs[slot_id]) return; dev = xhci->devs[slot_id]; xhci->dcbaa->dev_context_ptrs[slot_id] = 0; if (!dev) return; if (dev->tt_info) old_active_eps = dev->tt_info->active_eps; for (i = 0; i < 31; ++i) { if (dev->eps[i].ring) xhci_ring_free(xhci, dev->eps[i].ring); if (dev->eps[i].stream_info) xhci_free_stream_info(xhci, dev->eps[i].stream_info); /* Endpoints on the TT/root port lists should have been removed * when usb_disable_device() was called for the device. * We can't drop them anyway, because the udev might have gone * away by this point, and we can't tell what speed it was. */ if (!list_empty(&dev->eps[i].bw_endpoint_list)) xhci_warn(xhci, "Slot %u endpoint %u " "not removed from BW list!\n", slot_id, i); } /* If this is a hub, free the TT(s) from the TT list */ xhci_free_tt_info(xhci, dev, slot_id); /* If necessary, update the number of active TTs on this root port */ xhci_update_tt_active_eps(xhci, dev, old_active_eps); if (dev->ring_cache) { for (i = 0; i < dev->num_rings_cached; i++) xhci_ring_free(xhci, dev->ring_cache[i]); kfree(dev->ring_cache); } if (dev->in_ctx) xhci_free_container_ctx(xhci, dev->in_ctx); if (dev->out_ctx) xhci_free_container_ctx(xhci, dev->out_ctx); kfree(xhci->devs[slot_id]); xhci->devs[slot_id] = NULL; } int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags) { struct xhci_virt_device *dev; int i; /* Slot ID 0 is reserved */ if (slot_id == 0 || xhci->devs[slot_id]) { xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); return 0; } xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); if (!xhci->devs[slot_id]) return 0; dev = xhci->devs[slot_id]; /* Allocate the (output) device context that will be used in the HC. */ dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); if (!dev->out_ctx) goto fail; xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, (unsigned long long)dev->out_ctx->dma); /* Allocate the (input) device context for address device command */ dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); if (!dev->in_ctx) goto fail; xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, (unsigned long long)dev->in_ctx->dma); /* Initialize the cancellation list and watchdog timers for each ep */ for (i = 0; i < 31; i++) { xhci_init_endpoint_timer(xhci, &dev->eps[i]); INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list); } /* Allocate endpoint 0 ring */ dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags); if (!dev->eps[0].ring) goto fail; /* Allocate pointers to the ring cache */ dev->ring_cache = kzalloc( sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED, flags); if (!dev->ring_cache) goto fail; dev->num_rings_cached = 0; init_completion(&dev->cmd_completion); INIT_LIST_HEAD(&dev->cmd_list); dev->udev = udev; /* Point to output device context in dcbaa. */ xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma); xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", slot_id, &xhci->dcbaa->dev_context_ptrs[slot_id], le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); return 1; fail: xhci_free_virt_device(xhci, slot_id); return 0; } void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, struct usb_device *udev) { struct xhci_virt_device *virt_dev; struct xhci_ep_ctx *ep0_ctx; struct xhci_ring *ep_ring; virt_dev = xhci->devs[udev->slot_id]; ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0); ep_ring = virt_dev->eps[0].ring; /* * FIXME we don't keep track of the dequeue pointer very well after a * Set TR dequeue pointer, so we're setting the dequeue pointer of the * host to our enqueue pointer. This should only be called after a * configured device has reset, so all control transfers should have * been completed or cancelled before the reset. */ ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue) | ep_ring->cycle_state); } /* * The xHCI roothub may have ports of differing speeds in any order in the port * status registers. xhci->port_array provides an array of the port speed for * each offset into the port status registers. * * The xHCI hardware wants to know the roothub port number that the USB device * is attached to (or the roothub port its ancestor hub is attached to). All we * know is the index of that port under either the USB 2.0 or the USB 3.0 * roothub, but that doesn't give us the real index into the HW port status * registers. Call xhci_find_raw_port_number() to get real index. */ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, struct usb_device *udev) { struct usb_device *top_dev; struct usb_hcd *hcd; if (udev->speed == USB_SPEED_SUPER) hcd = xhci->shared_hcd; else hcd = xhci->main_hcd; for (top_dev = udev; top_dev->parent && top_dev->parent->parent; top_dev = top_dev->parent) /* Found device below root hub */; return xhci_find_raw_port_number(hcd, top_dev->portnum); } /* Setup an xHCI virtual device for a Set Address command */ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) { struct xhci_virt_device *dev; struct xhci_ep_ctx *ep0_ctx; struct xhci_slot_ctx *slot_ctx; u32 port_num; u32 max_packets; struct usb_device *top_dev; dev = xhci->devs[udev->slot_id]; /* Slot ID 0 is reserved */ if (udev->slot_id == 0 || !dev) { xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", udev->slot_id); return -EINVAL; } ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); /* 3) Only the control endpoint is valid - one endpoint context */ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); switch (udev->speed) { case USB_SPEED_SUPER: slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); max_packets = MAX_PACKET(512); break; case USB_SPEED_HIGH: slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); max_packets = MAX_PACKET(64); break; /* USB core guesses at a 64-byte max packet first for FS devices */ case USB_SPEED_FULL: slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); max_packets = MAX_PACKET(64); break; case USB_SPEED_LOW: slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); max_packets = MAX_PACKET(8); break; case USB_SPEED_WIRELESS: xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); return -EINVAL; break; default: /* Speed was set earlier, this shouldn't happen. */ return -EINVAL; } /* Find the root hub port this device is under */ port_num = xhci_find_real_port_number(xhci, udev); if (!port_num) return -EINVAL; slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num)); /* Set the port number in the virtual_device to the faked port number */ for (top_dev = udev; top_dev->parent && top_dev->parent->parent; top_dev = top_dev->parent) /* Found device below root hub */; dev->fake_port = top_dev->portnum; dev->real_port = port_num; xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num); xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port); /* Find the right bandwidth table that this device will be a part of. * If this is a full speed device attached directly to a root port (or a * decendent of one), it counts as a primary bandwidth domain, not a * secondary bandwidth domain under a TT. An xhci_tt_info structure * will never be created for the HS root hub. */ if (!udev->tt || !udev->tt->hub->parent) { dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table; } else { struct xhci_root_port_bw_info *rh_bw; struct xhci_tt_bw_info *tt_bw; rh_bw = &xhci->rh_bw[port_num - 1]; /* Find the right TT. */ list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) { if (tt_bw->slot_id != udev->tt->hub->slot_id) continue; if (!dev->udev->tt->multi || (udev->tt->multi && tt_bw->ttport == dev->udev->ttport)) { dev->bw_table = &tt_bw->bw_table; dev->tt_info = tt_bw; break; } } if (!dev->tt_info) xhci_warn(xhci, "WARN: Didn't find a matching TT\n"); } /* Is this a LS/FS device under an external HS hub? */ if (udev->tt && udev->tt->hub->parent) { slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id | (udev->ttport << 8)); if (udev->tt->multi) slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); } xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); /* Step 4 - ring already allocated */ /* Step 5 */ ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP)); /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) | max_packets); ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | dev->eps[0].ring->cycle_state); /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ return 0; } /* * Convert interval expressed as 2^(bInterval - 1) == interval into * straight exponent value 2^n == interval. * */ static unsigned int xhci_parse_exponent_interval(struct usb_device *udev, struct usb_host_endpoint *ep) { unsigned int interval; interval = clamp_val(ep->desc.bInterval, 1, 16) - 1; if (interval != ep->desc.bInterval - 1) dev_warn(&udev->dev, "ep %#x - rounding interval to %d %sframes\n", ep->desc.bEndpointAddress, 1 << interval, udev->speed == USB_SPEED_FULL ? "" : "micro"); if (udev->speed == USB_SPEED_FULL) { /* * Full speed isoc endpoints specify interval in frames, * not microframes. We are using microframes everywhere, * so adjust accordingly. */ interval += 3; /* 1 frame = 2^3 uframes */ } return interval; } /* * Convert bInterval expressed in microframes (in 1-255 range) to exponent of * microframes, rounded down to nearest power of 2. */ static unsigned int xhci_microframes_to_exponent(struct usb_device *udev, struct usb_host_endpoint *ep, unsigned int desc_interval, unsigned int min_exponent, unsigned int max_exponent) { unsigned int interval; interval = fls(desc_interval) - 1; interval = clamp_val(interval, min_exponent, max_exponent); if ((1 << interval) != desc_interval) dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", ep->desc.bEndpointAddress, 1 << interval, desc_interval); return interval; } static unsigned int xhci_parse_microframe_interval(struct usb_device *udev, struct usb_host_endpoint *ep) { if (ep->desc.bInterval == 0) return 0; return xhci_microframes_to_exponent(udev, ep, ep->desc.bInterval, 0, 15); } static unsigned int xhci_parse_frame_interval(struct usb_device *udev, struct usb_host_endpoint *ep) { return xhci_microframes_to_exponent(udev, ep, ep->desc.bInterval * 8, 3, 10); } /* Return the polling or NAK interval. * * The polling interval is expressed in "microframes". If xHCI's Interval field * is set to N, it will service the endpoint every 2^(Interval)*125us. * * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval * is set to 0. */ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, struct usb_host_endpoint *ep) { unsigned int interval = 0; switch (udev->speed) { case USB_SPEED_HIGH: /* Max NAK rate */ if (usb_endpoint_xfer_control(&ep->desc) || usb_endpoint_xfer_bulk(&ep->desc)) { interval = xhci_parse_microframe_interval(udev, ep); break; } /* Fall through - SS and HS isoc/int have same decoding */ case USB_SPEED_SUPER: if (usb_endpoint_xfer_int(&ep->desc) || usb_endpoint_xfer_isoc(&ep->desc)) { interval = xhci_parse_exponent_interval(udev, ep); } break; case USB_SPEED_FULL: if (usb_endpoint_xfer_isoc(&ep->desc)) { interval = xhci_parse_exponent_interval(udev, ep); break; } /* * Fall through for interrupt endpoint interval decoding * since it uses the same rules as low speed interrupt * endpoints. */ case USB_SPEED_LOW: if (usb_endpoint_xfer_int(&ep->desc) || usb_endpoint_xfer_isoc(&ep->desc)) { interval = xhci_parse_frame_interval(udev, ep); } break; default: BUG(); } return EP_INTERVAL(interval); } /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps. * High speed endpoint descriptors can define "the number of additional * transaction opportunities per microframe", but that goes in the Max Burst * endpoint context field. */ static u32 xhci_get_endpoint_mult(struct usb_device *udev, struct usb_host_endpoint *ep) { if (udev->speed != USB_SPEED_SUPER || !usb_endpoint_xfer_isoc(&ep->desc)) return 0; return ep->ss_ep_comp.bmAttributes; } static u32 xhci_get_endpoint_type(struct usb_device *udev, struct usb_host_endpoint *ep) { int in; u32 type; in = usb_endpoint_dir_in(&ep->desc); if (usb_endpoint_xfer_control(&ep->desc)) { type = EP_TYPE(CTRL_EP); } else if (usb_endpoint_xfer_bulk(&ep->desc)) { if (in) type = EP_TYPE(BULK_IN_EP); else type = EP_TYPE(BULK_OUT_EP); } else if (usb_endpoint_xfer_isoc(&ep->desc)) { if (in) type = EP_TYPE(ISOC_IN_EP); else type = EP_TYPE(ISOC_OUT_EP); } else if (usb_endpoint_xfer_int(&ep->desc)) { if (in) type = EP_TYPE(INT_IN_EP); else type = EP_TYPE(INT_OUT_EP); } else { type = 0; } return type; } /* Return the maximum endpoint service interval time (ESIT) payload. * Basically, this is the maxpacket size, multiplied by the burst size * and mult size. */ static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, struct usb_device *udev, struct usb_host_endpoint *ep) { int max_burst; int max_packet; /* Only applies for interrupt or isochronous endpoints */ if (usb_endpoint_xfer_control(&ep->desc) || usb_endpoint_xfer_bulk(&ep->desc)) return 0; if (udev->speed == USB_SPEED_SUPER) return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11; /* A 0 in max burst means 1 transfer per ESIT */ return max_packet * (max_burst + 1); } /* Set up an endpoint with one ring segment. Do not allocate stream rings. * Drivers will have to call usb_alloc_streams() to do that. */ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *udev, struct usb_host_endpoint *ep, gfp_t mem_flags) { unsigned int ep_index; struct xhci_ep_ctx *ep_ctx; struct xhci_ring *ep_ring; unsigned int max_packet; unsigned int max_burst; enum xhci_ring_type type; u32 max_esit_payload; u32 endpoint_type; ep_index = xhci_get_endpoint_index(&ep->desc); ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); endpoint_type = xhci_get_endpoint_type(udev, ep); if (!endpoint_type) return -EINVAL; ep_ctx->ep_info2 = cpu_to_le32(endpoint_type); type = usb_endpoint_type(&ep->desc); /* Set up the endpoint ring */ virt_dev->eps[ep_index].new_ring = xhci_ring_alloc(xhci, 2, 1, type, mem_flags); if (!virt_dev->eps[ep_index].new_ring) { /* Attempt to use the ring cache */ if (virt_dev->num_rings_cached == 0) return -ENOMEM; virt_dev->eps[ep_index].new_ring = virt_dev->ring_cache[virt_dev->num_rings_cached]; virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; virt_dev->num_rings_cached--; xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, 1, type); } virt_dev->eps[ep_index].skip = false; ep_ring = virt_dev->eps[ep_index].new_ring; ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state); ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep) | EP_MULT(xhci_get_endpoint_mult(udev, ep))); /* FIXME dig Mult and streams info out of ep companion desc */ /* Allow 3 retries for everything but isoc; * CErr shall be set to 0 for Isoch endpoints. */ if (!usb_endpoint_xfer_isoc(&ep->desc)) ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(3)); else ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(0)); /* Set the max packet size and max burst */ max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); max_burst = 0; switch (udev->speed) { case USB_SPEED_SUPER: /* dig out max burst from ep companion desc */ max_burst = ep->ss_ep_comp.bMaxBurst; break; case USB_SPEED_HIGH: /* Some devices get this wrong */ if (usb_endpoint_xfer_bulk(&ep->desc)) max_packet = 512; /* bits 11:12 specify the number of additional transaction * opportunities per microframe (USB 2.0, section 9.6.6) */ if (usb_endpoint_xfer_isoc(&ep->desc) || usb_endpoint_xfer_int(&ep->desc)) { max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11; } break; case USB_SPEED_FULL: case USB_SPEED_LOW: break; default: BUG(); } ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) | MAX_BURST(max_burst)); max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload)); /* * XXX no idea how to calculate the average TRB buffer length for bulk * endpoints, as the driver gives us no clue how big each scatter gather * list entry (or buffer) is going to be. * * For isochronous and interrupt endpoints, we set it to the max * available, until we have new API in the USB core to allow drivers to * declare how much bandwidth they actually need. * * Normally, it would be calculated by taking the total of the buffer * lengths in the TD and then dividing by the number of TRBs in a TD, * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't * use Event Data TRBs, and we don't chain in a link TRB on short * transfers, we're basically dividing by 1. * * xHCI 1.0 specification indicates that the Average TRB Length should * be set to 8 for control endpoints. */ if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100) ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8)); else ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload)); /* FIXME Debug endpoint context */ return 0; } void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep) { unsigned int ep_index; struct xhci_ep_ctx *ep_ctx; ep_index = xhci_get_endpoint_index(&ep->desc); ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); ep_ctx->ep_info = 0; ep_ctx->ep_info2 = 0; ep_ctx->deq = 0; ep_ctx->tx_info = 0; /* Don't free the endpoint ring until the set interface or configuration * request succeeds. */ } void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info) { bw_info->ep_interval = 0; bw_info->mult = 0; bw_info->num_packets = 0; bw_info->max_packet_size = 0; bw_info->type = 0; bw_info->max_esit_payload = 0; } void xhci_update_bw_info(struct xhci_hcd *xhci, struct xhci_container_ctx *in_ctx, struct xhci_input_control_ctx *ctrl_ctx, struct xhci_virt_device *virt_dev) { struct xhci_bw_info *bw_info; struct xhci_ep_ctx *ep_ctx; unsigned int ep_type; int i; for (i = 1; i < 31; ++i) { bw_info = &virt_dev->eps[i].bw_info; /* We can't tell what endpoint type is being dropped, but * unconditionally clearing the bandwidth info for non-periodic * endpoints should be harmless because the info will never be * set in the first place. */ if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) { /* Dropped endpoint */ xhci_clear_endpoint_bw_info(bw_info); continue; } if (EP_IS_ADDED(ctrl_ctx, i)) { ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i); ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2)); /* Ignore non-periodic endpoints */ if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && ep_type != ISOC_IN_EP && ep_type != INT_IN_EP) continue; /* Added or changed endpoint */ bw_info->ep_interval = CTX_TO_EP_INTERVAL( le32_to_cpu(ep_ctx->ep_info)); /* Number of packets and mult are zero-based in the * input context, but we want one-based for the * interval table. */ bw_info->mult = CTX_TO_EP_MULT( le32_to_cpu(ep_ctx->ep_info)) + 1; bw_info->num_packets = CTX_TO_MAX_BURST( le32_to_cpu(ep_ctx->ep_info2)) + 1; bw_info->max_packet_size = MAX_PACKET_DECODED( le32_to_cpu(ep_ctx->ep_info2)); bw_info->type = ep_type; bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD( le32_to_cpu(ep_ctx->tx_info)); } } } /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. * Useful when you want to change one particular aspect of the endpoint and then * issue a configure endpoint command. */ void xhci_endpoint_copy(struct xhci_hcd *xhci, struct xhci_container_ctx *in_ctx, struct xhci_container_ctx *out_ctx, unsigned int ep_index) { struct xhci_ep_ctx *out_ep_ctx; struct xhci_ep_ctx *in_ep_ctx; out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); in_ep_ctx->ep_info = out_ep_ctx->ep_info; in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; in_ep_ctx->deq = out_ep_ctx->deq; in_ep_ctx->tx_info = out_ep_ctx->tx_info; } /* Copy output xhci_slot_ctx to the input xhci_slot_ctx. * Useful when you want to change one particular aspect of the endpoint and then * issue a configure endpoint command. Only the context entries field matters, * but we'll copy the whole thing anyway. */ void xhci_slot_copy(struct xhci_hcd *xhci, struct xhci_container_ctx *in_ctx, struct xhci_container_ctx *out_ctx) { struct xhci_slot_ctx *in_slot_ctx; struct xhci_slot_ctx *out_slot_ctx; in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx); in_slot_ctx->dev_info = out_slot_ctx->dev_info; in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; in_slot_ctx->tt_info = out_slot_ctx->tt_info; in_slot_ctx->dev_state = out_slot_ctx->dev_state; } /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) { int i; struct device *dev = xhci_to_hcd(xhci)->self.controller; int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Allocating %d scratchpad buffers", num_sp); if (!num_sp) return 0; xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); if (!xhci->scratchpad) goto fail_sp; xhci->scratchpad->sp_array = dma_alloc_coherent(dev, num_sp * sizeof(u64), &xhci->scratchpad->sp_dma, flags); if (!xhci->scratchpad->sp_array) goto fail_sp2; xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); if (!xhci->scratchpad->sp_buffers) goto fail_sp3; xhci->scratchpad->sp_dma_buffers = kzalloc(sizeof(dma_addr_t) * num_sp, flags); if (!xhci->scratchpad->sp_dma_buffers) goto fail_sp4; xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); for (i = 0; i < num_sp; i++) { dma_addr_t dma; void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, flags); if (!buf) goto fail_sp5; xhci->scratchpad->sp_array[i] = dma; xhci->scratchpad->sp_buffers[i] = buf; xhci->scratchpad->sp_dma_buffers[i] = dma; } return 0; fail_sp5: for (i = i - 1; i >= 0; i--) { dma_free_coherent(dev, xhci->page_size, xhci->scratchpad->sp_buffers[i], xhci->scratchpad->sp_dma_buffers[i]); } kfree(xhci->scratchpad->sp_dma_buffers); fail_sp4: kfree(xhci->scratchpad->sp_buffers); fail_sp3: dma_free_coherent(dev, num_sp * sizeof(u64), xhci->scratchpad->sp_array, xhci->scratchpad->sp_dma); fail_sp2: kfree(xhci->scratchpad); xhci->scratchpad = NULL; fail_sp: return -ENOMEM; } static void scratchpad_free(struct xhci_hcd *xhci) { int num_sp; int i; struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); if (!xhci->scratchpad) return; num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); for (i = 0; i < num_sp; i++) { dma_free_coherent(&pdev->dev, xhci->page_size, xhci->scratchpad->sp_buffers[i], xhci->scratchpad->sp_dma_buffers[i]); } kfree(xhci->scratchpad->sp_dma_buffers); kfree(xhci->scratchpad->sp_buffers); dma_free_coherent(&pdev->dev, num_sp * sizeof(u64), xhci->scratchpad->sp_array, xhci->scratchpad->sp_dma); kfree(xhci->scratchpad); xhci->scratchpad = NULL; } struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, bool allocate_in_ctx, bool allocate_completion, gfp_t mem_flags) { struct xhci_command *command; command = kzalloc(sizeof(*command), mem_flags); if (!command) return NULL; if (allocate_in_ctx) { command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags); if (!command->in_ctx) { kfree(command); return NULL; } } if (allocate_completion) { command->completion = kzalloc(sizeof(struct completion), mem_flags); if (!command->completion) { xhci_free_container_ctx(xhci, command->in_ctx); kfree(command); return NULL; } init_completion(command->completion); } command->status = 0; INIT_LIST_HEAD(&command->cmd_list); return command; } void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv) { if (urb_priv) { kfree(urb_priv->td[0]); kfree(urb_priv); } } void xhci_free_command(struct xhci_hcd *xhci, struct xhci_command *command) { xhci_free_container_ctx(xhci, command->in_ctx); kfree(command->completion); kfree(command); } void xhci_mem_cleanup(struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); struct xhci_cd *cur_cd, *next_cd; int size; int i, j, num_ports; /* Free the Event Ring Segment Table and the actual Event Ring */ size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); if (xhci->erst.entries) dma_free_coherent(&pdev->dev, size, xhci->erst.entries, xhci->erst.erst_dma_addr); xhci->erst.entries = NULL; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST"); if (xhci->event_ring) xhci_ring_free(xhci, xhci->event_ring); xhci->event_ring = NULL; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring"); if (xhci->lpm_command) xhci_free_command(xhci, xhci->lpm_command); xhci->cmd_ring_reserved_trbs = 0; if (xhci->cmd_ring) xhci_ring_free(xhci, xhci->cmd_ring); xhci->cmd_ring = NULL; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring"); list_for_each_entry_safe(cur_cd, next_cd, &xhci->cancel_cmd_list, cancel_cmd_list) { list_del(&cur_cd->cancel_cmd_list); kfree(cur_cd); } num_ports = HCS_MAX_PORTS(xhci->hcs_params1); for (i = 0; i < num_ports && xhci->rh_bw; i++) { struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; for (j = 0; j < XHCI_MAX_INTERVAL; j++) { struct list_head *ep = &bwt->interval_bw[j].endpoints; while (!list_empty(ep)) list_del_init(ep->next); } } for (i = 1; i < MAX_HC_SLOTS; ++i) xhci_free_virt_device(xhci, i); if (xhci->segment_pool) dma_pool_destroy(xhci->segment_pool); xhci->segment_pool = NULL; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool"); if (xhci->device_pool) dma_pool_destroy(xhci->device_pool); xhci->device_pool = NULL; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool"); if (xhci->small_streams_pool) dma_pool_destroy(xhci->small_streams_pool); xhci->small_streams_pool = NULL; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed small stream array pool"); if (xhci->medium_streams_pool) dma_pool_destroy(xhci->medium_streams_pool); xhci->medium_streams_pool = NULL; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed medium stream array pool"); if (xhci->dcbaa) dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa), xhci->dcbaa, xhci->dcbaa->dma); xhci->dcbaa = NULL; scratchpad_free(xhci); if (!xhci->rh_bw) goto no_bw; for (i = 0; i < num_ports; i++) { struct xhci_tt_bw_info *tt, *n; list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { list_del(&tt->tt_list); kfree(tt); } } no_bw: xhci->num_usb2_ports = 0; xhci->num_usb3_ports = 0; xhci->num_active_eps = 0; kfree(xhci->usb2_ports); kfree(xhci->usb3_ports); kfree(xhci->port_array); kfree(xhci->rh_bw); kfree(xhci->ext_caps); xhci->page_size = 0; xhci->page_shift = 0; xhci->bus_state[0].bus_suspended = 0; xhci->bus_state[1].bus_suspended = 0; } static int xhci_test_trb_in_td(struct xhci_hcd *xhci, struct xhci_segment *input_seg, union xhci_trb *start_trb, union xhci_trb *end_trb, dma_addr_t input_dma, struct xhci_segment *result_seg, char *test_name, int test_number) { unsigned long long start_dma; unsigned long long end_dma; struct xhci_segment *seg; start_dma = xhci_trb_virt_to_dma(input_seg, start_trb); end_dma = xhci_trb_virt_to_dma(input_seg, end_trb); seg = trb_in_td(input_seg, start_trb, end_trb, input_dma); if (seg != result_seg) { xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n", test_name, test_number); xhci_warn(xhci, "Tested TRB math w/ seg %p and " "input DMA 0x%llx\n", input_seg, (unsigned long long) input_dma); xhci_warn(xhci, "starting TRB %p (0x%llx DMA), " "ending TRB %p (0x%llx DMA)\n", start_trb, start_dma, end_trb, end_dma); xhci_warn(xhci, "Expected seg %p, got seg %p\n", result_seg, seg); return -1; } return 0; } /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags) { struct { dma_addr_t input_dma; struct xhci_segment *result_seg; } simple_test_vector [] = { /* A zeroed DMA field should fail */ { 0, NULL }, /* One TRB before the ring start should fail */ { xhci->event_ring->first_seg->dma - 16, NULL }, /* One byte before the ring start should fail */ { xhci->event_ring->first_seg->dma - 1, NULL }, /* Starting TRB should succeed */ { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg }, /* Ending TRB should succeed */ { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16, xhci->event_ring->first_seg }, /* One byte after the ring end should fail */ { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL }, /* One TRB after the ring end should fail */ { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL }, /* An address of all ones should fail */ { (dma_addr_t) (~0), NULL }, }; struct { struct xhci_segment *input_seg; union xhci_trb *start_trb; union xhci_trb *end_trb; dma_addr_t input_dma; struct xhci_segment *result_seg; } complex_test_vector [] = { /* Test feeding a valid DMA address from a different ring */ { .input_seg = xhci->event_ring->first_seg, .start_trb = xhci->event_ring->first_seg->trbs, .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], .input_dma = xhci->cmd_ring->first_seg->dma, .result_seg = NULL, }, /* Test feeding a valid end TRB from a different ring */ { .input_seg = xhci->event_ring->first_seg, .start_trb = xhci->event_ring->first_seg->trbs, .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], .input_dma = xhci->cmd_ring->first_seg->dma, .result_seg = NULL, }, /* Test feeding a valid start and end TRB from a different ring */ { .input_seg = xhci->event_ring->first_seg, .start_trb = xhci->cmd_ring->first_seg->trbs, .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], .input_dma = xhci->cmd_ring->first_seg->dma, .result_seg = NULL, }, /* TRB in this ring, but after this TD */ { .input_seg = xhci->event_ring->first_seg, .start_trb = &xhci->event_ring->first_seg->trbs[0], .end_trb = &xhci->event_ring->first_seg->trbs[3], .input_dma = xhci->event_ring->first_seg->dma + 4*16, .result_seg = NULL, }, /* TRB in this ring, but before this TD */ { .input_seg = xhci->event_ring->first_seg, .start_trb = &xhci->event_ring->first_seg->trbs[3], .end_trb = &xhci->event_ring->first_seg->trbs[6], .input_dma = xhci->event_ring->first_seg->dma + 2*16, .result_seg = NULL, }, /* TRB in this ring, but after this wrapped TD */ { .input_seg = xhci->event_ring->first_seg, .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], .end_trb = &xhci->event_ring->first_seg->trbs[1], .input_dma = xhci->event_ring->first_seg->dma + 2*16, .result_seg = NULL, }, /* TRB in this ring, but before this wrapped TD */ { .input_seg = xhci->event_ring->first_seg, .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], .end_trb = &xhci->event_ring->first_seg->trbs[1], .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16, .result_seg = NULL, }, /* TRB not in this ring, and we have a wrapped TD */ { .input_seg = xhci->event_ring->first_seg, .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], .end_trb = &xhci->event_ring->first_seg->trbs[1], .input_dma = xhci->cmd_ring->first_seg->dma + 2*16, .result_seg = NULL, }, }; unsigned int num_tests; int i, ret; num_tests = ARRAY_SIZE(simple_test_vector); for (i = 0; i < num_tests; i++) { ret = xhci_test_trb_in_td(xhci, xhci->event_ring->first_seg, xhci->event_ring->first_seg->trbs, &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], simple_test_vector[i].input_dma, simple_test_vector[i].result_seg, "Simple", i); if (ret < 0) return ret; } num_tests = ARRAY_SIZE(complex_test_vector); for (i = 0; i < num_tests; i++) { ret = xhci_test_trb_in_td(xhci, complex_test_vector[i].input_seg, complex_test_vector[i].start_trb, complex_test_vector[i].end_trb, complex_test_vector[i].input_dma, complex_test_vector[i].result_seg, "Complex", i); if (ret < 0) return ret; } xhci_dbg(xhci, "TRB math tests passed.\n"); return 0; } static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) { u64 temp; dma_addr_t deq; deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, xhci->event_ring->dequeue); if (deq == 0 && !in_interrupt()) xhci_warn(xhci, "WARN something wrong with SW event ring " "dequeue ptr.\n"); /* Update HC event ring dequeue pointer */ temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); temp &= ERST_PTR_MASK; /* Don't clear the EHB bit (which is RW1C) because * there might be more events to service. */ temp &= ~ERST_EHB; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Write event ring dequeue pointer, " "preserving EHB bit"); xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, &xhci->ir_set->erst_dequeue); } static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, __le32 __iomem *addr, u8 major_revision, int max_caps) { u32 temp, port_offset, port_count; int i; if (major_revision > 0x03) { xhci_warn(xhci, "Ignoring unknown port speed, " "Ext Cap %p, revision = 0x%x\n", addr, major_revision); /* Ignoring port protocol we can't understand. FIXME */ return; } /* Port offset and count in the third dword, see section 7.2 */ temp = xhci_readl(xhci, addr + 2); port_offset = XHCI_EXT_PORT_OFF(temp); port_count = XHCI_EXT_PORT_COUNT(temp); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Ext Cap %p, port offset = %u, " "count = %u, revision = 0x%x", addr, port_offset, port_count, major_revision); /* Port count includes the current port offset */ if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) /* WTF? "Valid values are ‘1’ to MaxPorts" */ return; /* cache usb2 port capabilities */ if (major_revision < 0x03 && xhci->num_ext_caps < max_caps) xhci->ext_caps[xhci->num_ext_caps++] = temp; /* Check the host's USB2 LPM capability */ if ((xhci->hci_version == 0x96) && (major_revision != 0x03) && (temp & XHCI_L1C)) { xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xHCI 0.96: support USB2 software lpm"); xhci->sw_lpm_support = 1; } if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) { xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xHCI 1.0: support USB2 software lpm"); xhci->sw_lpm_support = 1; if (temp & XHCI_HLC) { xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xHCI 1.0: support USB2 hardware lpm"); xhci->hw_lpm_support = 1; } } port_offset--; for (i = port_offset; i < (port_offset + port_count); i++) { /* Duplicate entry. Ignore the port if the revisions differ. */ if (xhci->port_array[i] != 0) { xhci_warn(xhci, "Duplicate port entry, Ext Cap %p," " port %u\n", addr, i); xhci_warn(xhci, "Port was marked as USB %u, " "duplicated as USB %u\n", xhci->port_array[i], major_revision); /* Only adjust the roothub port counts if we haven't * found a similar duplicate. */ if (xhci->port_array[i] != major_revision && xhci->port_array[i] != DUPLICATE_ENTRY) { if (xhci->port_array[i] == 0x03) xhci->num_usb3_ports--; else xhci->num_usb2_ports--; xhci->port_array[i] = DUPLICATE_ENTRY; } /* FIXME: Should we disable the port? */ continue; } xhci->port_array[i] = major_revision; if (major_revision == 0x03) xhci->num_usb3_ports++; else xhci->num_usb2_ports++; } /* FIXME: Should we disable ports not in the Extended Capabilities? */ } /* * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that * specify what speeds each port is supposed to be. We can't count on the port * speed bits in the PORTSC register being correct until a device is connected, * but we need to set up the two fake roothubs with the correct number of USB * 3.0 and USB 2.0 ports at host controller initialization time. */ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) { __le32 __iomem *addr, *tmp_addr; u32 offset, tmp_offset; unsigned int num_ports; int i, j, port_index; int cap_count = 0; addr = &xhci->cap_regs->hcc_params; offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr)); if (offset == 0) { xhci_err(xhci, "No Extended Capability registers, " "unable to set up roothub.\n"); return -ENODEV; } num_ports = HCS_MAX_PORTS(xhci->hcs_params1); xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags); if (!xhci->port_array) return -ENOMEM; xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags); if (!xhci->rh_bw) return -ENOMEM; for (i = 0; i < num_ports; i++) { struct xhci_interval_bw_table *bw_table; INIT_LIST_HEAD(&xhci->rh_bw[i].tts); bw_table = &xhci->rh_bw[i].bw_table; for (j = 0; j < XHCI_MAX_INTERVAL; j++) INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); } /* * For whatever reason, the first capability offset is from the * capability register base, not from the HCCPARAMS register. * See section 5.3.6 for offset calculation. */ addr = &xhci->cap_regs->hc_capbase + offset; tmp_addr = addr; tmp_offset = offset; /* count extended protocol capability entries for later caching */ do { u32 cap_id; cap_id = xhci_readl(xhci, tmp_addr); if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL) cap_count++; tmp_offset = XHCI_EXT_CAPS_NEXT(cap_id); tmp_addr += tmp_offset; } while (tmp_offset); xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags); if (!xhci->ext_caps) return -ENOMEM; while (1) { u32 cap_id; cap_id = xhci_readl(xhci, addr); if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL) xhci_add_in_port(xhci, num_ports, addr, (u8) XHCI_EXT_PORT_MAJOR(cap_id), cap_count); offset = XHCI_EXT_CAPS_NEXT(cap_id); if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports) == num_ports) break; /* * Once you're into the Extended Capabilities, the offset is * always relative to the register holding the offset. */ addr += offset; } if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) { xhci_warn(xhci, "No ports on the roothubs?\n"); return -ENODEV; } xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Found %u USB 2.0 ports and %u USB 3.0 ports.", xhci->num_usb2_ports, xhci->num_usb3_ports); /* Place limits on the number of roothub ports so that the hub * descriptors aren't longer than the USB core will allocate. */ if (xhci->num_usb3_ports > 15) { xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Limiting USB 3.0 roothub ports to 15."); xhci->num_usb3_ports = 15; } if (xhci->num_usb2_ports > USB_MAXCHILDREN) { xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Limiting USB 2.0 roothub ports to %u.", USB_MAXCHILDREN); xhci->num_usb2_ports = USB_MAXCHILDREN; } /* * Note we could have all USB 3.0 ports, or all USB 2.0 ports. * Not sure how the USB core will handle a hub with no ports... */ if (xhci->num_usb2_ports) { xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)* xhci->num_usb2_ports, flags); if (!xhci->usb2_ports) return -ENOMEM; port_index = 0; for (i = 0; i < num_ports; i++) { if (xhci->port_array[i] == 0x03 || xhci->port_array[i] == 0 || xhci->port_array[i] == DUPLICATE_ENTRY) continue; xhci->usb2_ports[port_index] = &xhci->op_regs->port_status_base + NUM_PORT_REGS*i; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "USB 2.0 port at index %u, " "addr = %p", i, xhci->usb2_ports[port_index]); port_index++; if (port_index == xhci->num_usb2_ports) break; } } if (xhci->num_usb3_ports) { xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)* xhci->num_usb3_ports, flags); if (!xhci->usb3_ports) return -ENOMEM; port_index = 0; for (i = 0; i < num_ports; i++) if (xhci->port_array[i] == 0x03) { xhci->usb3_ports[port_index] = &xhci->op_regs->port_status_base + NUM_PORT_REGS*i; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "USB 3.0 port at index %u, " "addr = %p", i, xhci->usb3_ports[port_index]); port_index++; if (port_index == xhci->num_usb3_ports) break; } } return 0; } int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) { dma_addr_t dma; struct device *dev = xhci_to_hcd(xhci)->self.controller; unsigned int val, val2; u64 val_64; struct xhci_segment *seg; u32 page_size, temp; int i; INIT_LIST_HEAD(&xhci->cancel_cmd_list); page_size = xhci_readl(xhci, &xhci->op_regs->page_size); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Supported page size register = 0x%x", page_size); for (i = 0; i < 16; i++) { if ((0x1 & page_size) != 0) break; page_size = page_size >> 1; } if (i < 16) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Supported page size of %iK", (1 << (i+12)) / 1024); else xhci_warn(xhci, "WARN: no supported page size\n"); /* Use 4K pages, since that's common and the minimum the HC supports */ xhci->page_shift = 12; xhci->page_size = 1 << xhci->page_shift; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK", xhci->page_size / 1024); /* * Program the Number of Device Slots Enabled field in the CONFIG * register with the max value of slots the HC can handle. */ val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// xHC can handle at most %d device slots.", val); val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); val |= (val2 & ~HCS_SLOTS_MASK); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Setting Max device slots reg = 0x%x.", val); xhci_writel(xhci, val, &xhci->op_regs->config_reg); /* * Section 5.4.8 - doorbell array must be * "physically contiguous and 64-byte (cache line) aligned". */ xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, GFP_KERNEL); if (!xhci->dcbaa) goto fail; memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); xhci->dcbaa->dma = dma; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Device context base array address = 0x%llx (DMA), %p (virt)", (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); /* * Initialize the ring segment pool. The ring must be a contiguous * structure comprised of TRBs. The TRBs must be 16 byte aligned, * however, the command ring segment needs 64-byte aligned segments, * so we pick the greater alignment need. */ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, TRB_SEGMENT_SIZE, 64, xhci->page_size); /* See Table 46 and Note on Figure 55 */ xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 2112, 64, xhci->page_size); if (!xhci->segment_pool || !xhci->device_pool) goto fail; /* Linear stream context arrays don't have any boundary restrictions, * and only need to be 16-byte aligned. */ xhci->small_streams_pool = dma_pool_create("xHCI 256 byte stream ctx arrays", dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); xhci->medium_streams_pool = dma_pool_create("xHCI 1KB stream ctx arrays", dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE * will be allocated with dma_alloc_coherent() */ if (!xhci->small_streams_pool || !xhci->medium_streams_pool) goto fail; /* Set up the command ring to have one segments for now. */ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags); if (!xhci->cmd_ring) goto fail; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Allocated command ring at %p", xhci->cmd_ring); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx", (unsigned long long)xhci->cmd_ring->first_seg->dma); /* Set the address in the Command Ring Control register */ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | xhci->cmd_ring->cycle_state; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Setting command ring address to 0x%x", val); xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); xhci_dbg_cmd_ptrs(xhci); xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags); if (!xhci->lpm_command) goto fail; /* Reserve one command ring TRB for disabling LPM. * Since the USB core grabs the shared usb_bus bandwidth mutex before * disabling LPM, we only need to reserve one TRB for all devices. */ xhci->cmd_ring_reserved_trbs++; val = xhci_readl(xhci, &xhci->cap_regs->db_off); val &= DBOFF_MASK; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Doorbell array is located at offset 0x%x" " from cap regs base addr", val); xhci->dba = (void __iomem *) xhci->cap_regs + val; xhci_dbg_regs(xhci); xhci_print_run_regs(xhci); /* Set ir_set to interrupt register set 0 */ xhci->ir_set = &xhci->run_regs->ir_set[0]; /* * Event ring setup: Allocate a normal ring, but also setup * the event ring segment table (ERST). Section 4.9.3. */ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring"); xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, flags); if (!xhci->event_ring) goto fail; if (xhci_check_trb_in_td_math(xhci, flags) < 0) goto fail; xhci->erst.entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma, GFP_KERNEL); if (!xhci->erst.entries) goto fail; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocated event ring segment table at 0x%llx", (unsigned long long)dma); memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); xhci->erst.num_entries = ERST_NUM_SEGS; xhci->erst.erst_dma_addr = dma; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx", xhci->erst.num_entries, xhci->erst.entries, (unsigned long long)xhci->erst.erst_dma_addr); /* set ring base address and size for each segment table entry */ for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { struct xhci_erst_entry *entry = &xhci->erst.entries[val]; entry->seg_addr = cpu_to_le64(seg->dma); entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); entry->rsvd = 0; seg = seg->next; } /* set ERST count with the number of entries in the segment table */ val = xhci_readl(xhci, &xhci->ir_set->erst_size); val &= ERST_SIZE_MASK; val |= ERST_NUM_SEGS; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Write ERST size = %i to ir_set 0 (some bits preserved)", val); xhci_writel(xhci, val, &xhci->ir_set->erst_size); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Set ERST entries to point to event ring."); /* set the segment table base address */ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Set ERST base address for ir_set 0 = 0x%llx", (unsigned long long)xhci->erst.erst_dma_addr); val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); val_64 &= ERST_PTR_MASK; val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); /* Set the event ring dequeue address */ xhci_set_hc_event_deq(xhci); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Wrote ERST address to ir_set 0."); xhci_print_ir_set(xhci, 0); /* * XXX: Might need to set the Interrupter Moderation Register to * something other than the default (~1ms minimum between interrupts). * See section 5.5.1.2. */ init_completion(&xhci->addr_dev); for (i = 0; i < MAX_HC_SLOTS; ++i) xhci->devs[i] = NULL; for (i = 0; i < USB_MAXCHILDREN; ++i) { xhci->bus_state[0].resume_done[i] = 0; xhci->bus_state[1].resume_done[i] = 0; /* Only the USB 2.0 completions will ever be used. */ init_completion(&xhci->bus_state[1].rexit_done[i]); } if (scratchpad_alloc(xhci, flags)) goto fail; if (xhci_setup_port_arrays(xhci, flags)) goto fail; /* Enable USB 3.0 device notifications for function remote wake, which * is necessary for allowing USB 3.0 devices to do remote wakeup from * U3 (device suspend). */ temp = xhci_readl(xhci, &xhci->op_regs->dev_notification); temp &= ~DEV_NOTE_MASK; temp |= DEV_NOTE_FWAKE; xhci_writel(xhci, temp, &xhci->op_regs->dev_notification); return 0; fail: xhci_warn(xhci, "Couldn't initialize memory\n"); xhci_halt(xhci); xhci_reset(xhci); xhci_mem_cleanup(xhci); return -ENOMEM; }
ULL-ETSII-SistemasEmpotrados/Mirasberry
drivers/usb/host/xhci-mem.c
C
gpl-2.0
74,811
#content{ height:auto!important; height:120px; min-height:120px; font-size:16px; margin:0 auto 20px auto; width:100%; } #maintable{ text-align:center; font-size:14px; width:100%; margin:10px auto; } #maintable tr{ height:19px; } .tbheader{ font-family:Arial, Helvetica, sans-serif; font-size:12px; height:16px; background-color:#1a5cc8; color:white; } .tbheader a{ color:white; } .contype{ font-size:14px; margin:10px auto; text-align:center; } .ctpublic{ color:red; } .ctprivate{ color:green; } .cspend{ color:green; } .csend{ color:gray; } .csrun{ color:red; } #content h4{ font-weight:normal; margin:3px auto 0; text-align:center; color:#1a5cc8; font-size:14px; } #content h2{ margin:5px auto; text-align:center; font-size:18px; } #content #errormsg{ font-family:Arial, Helvetica, sans-serif; font-size:13px; font-weight:bold; width:100%; color:red; text-align:center; margin:5px auto; padding:0; } /*WEB Contest µÄÏà¹Ø±íµ¥*/ #content #mkcon{ width:68%; margin:20px auto 0 200px; } #content #mkcon p{ margin:10px auto; } #content #mkcon .timeinfo input{ width:40px; } #content #mkcon .timeinfo span{ border:#b7cbff 1px dashed; } #content #mkcon #webconname{ width:414px; margin-left:11px; } #content #mkcon #webyear{ margin-left:36px; } #content #mkcon #weblday{ margin-left:21px; } #content #contesttype{ margin-left:16px; border:none; } #content #contesttype2{ margin-left:16px; border:none; } #content #mkcon #password{ margin-left:33px; width:152px; } #content #mkpro{ width:50%; margin:20px auto 20px 310px; } #content #mkpro h1{ font-size:16px; } #content #mkpro p{ text-align:center; } #content #mkpro #webspid{ margin-left:0px; width:80px; } #content #mknotice{ margin:auto; width:100%; height:100px; }
fangtanchen/Learning
1_Algorithm/hdu/problem/1019 Least Common Multiple/Problem - 1019_files/webinhead.css
CSS
gpl-2.0
1,921
/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt "\n", __func__ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/wait.h> #include <linux/cdev.h> #include <linux/ktime.h> #include <linux/debugfs.h> #include <linux/usb/ccid_bridge.h> #define CCID_CLASS_DECRIPTOR_TYPE 0x21 #define CCID_NOTIFY_SLOT_CHANGE 0x50 #define CCID_NOTIFY_HARDWARE_ERROR 0x51 #define CCID_ABORT_REQ 0x1 #define CCID_GET_CLK_FREQ_REQ 0x2 #define CCID_GET_DATA_RATES 0x3 #define CCID_BRIDGE_MSG_SZ 512 #define CCID_BRIDGE_OPEN_TIMEOUT 500 /* msec */ #define CCID_CONTROL_TIMEOUT 500 /* msec */ #define CCID_BRIDGE_MSG_TIMEOUT 1000 /* msec */ static unsigned ccid_bulk_msg_timeout = CCID_BRIDGE_MSG_TIMEOUT; module_param_named(bulk_msg_timeout, ccid_bulk_msg_timeout, uint, 0644); MODULE_PARM_DESC(bulk_msg_timeout, "Bulk message timeout (msecs)"); struct ccid_bridge { struct usb_device *udev; struct usb_interface *intf; unsigned int in_pipe; unsigned int out_pipe; unsigned int int_pipe; struct urb *inturb; struct urb *readurb; struct urb *writeurb; bool opened; bool events_supported; bool is_suspended; struct mutex open_mutex; struct mutex write_mutex; struct mutex read_mutex; struct mutex event_mutex; int write_result; int read_result; int event_result; wait_queue_head_t open_wq; wait_queue_head_t write_wq; wait_queue_head_t read_wq; wait_queue_head_t event_wq; struct usb_ccid_event cur_event; void *intbuf; dev_t chrdev; struct cdev cdev; struct class *class; struct device *device; struct dentry *dbg_root; unsigned n_write; unsigned n_read; unsigned n_write_timeout; unsigned n_read_timeout; unsigned long write_max_time; unsigned long read_max_time; }; static struct ccid_bridge *__ccid_bridge_dev; static void ccid_bridge_out_cb(struct urb *urb) { struct ccid_bridge *ccid = urb->context; if (urb->dev->state == USB_STATE_NOTATTACHED) ccid->write_result = -ENODEV; else ccid->write_result = urb->status ? : urb->actual_length; pr_debug("write result = %d", ccid->write_result); wake_up(&ccid->write_wq); } static void ccid_bridge_in_cb(struct urb *urb) { struct ccid_bridge *ccid = urb->context; if (urb->dev->state == USB_STATE_NOTATTACHED) ccid->read_result = -ENODEV; else ccid->read_result = urb->status ? : urb->actual_length; pr_debug("read result = %d", ccid->read_result); wake_up(&ccid->read_wq); } static void ccid_bridge_int_cb(struct urb *urb) { struct ccid_bridge *ccid = urb->context; u8 *msg_type; bool wakeup = true; if (urb->dev->state == USB_STATE_NOTATTACHED || (urb->status && urb->status != -ENOENT)) { ccid->event_result = -ENODEV; wakeup = true; goto out; } /* * Don't wakeup the event ioctl process during suspend. * The suspend state is not visible to user space. * we wake up the process after resume to send RESUME * event if the device supports remote wakeup. */ if (urb->status == -ENOENT && !urb->actual_length) { ccid->event_result = -ENOENT; wakeup = false; goto out; } ccid->event_result = 0; msg_type = urb->transfer_buffer; switch (*msg_type) { case CCID_NOTIFY_SLOT_CHANGE: pr_debug("NOTIFY_SLOT_CHANGE event arrived"); ccid->cur_event.event = USB_CCID_NOTIFY_SLOT_CHANGE_EVENT; ccid->cur_event.u.notify.slot_icc_state = *(++msg_type); break; case CCID_NOTIFY_HARDWARE_ERROR: pr_debug("NOTIFY_HARDWARE_ERROR event arrived"); ccid->cur_event.event = USB_CCID_HARDWARE_ERROR_EVENT; ccid->cur_event.u.error.slot = *(++msg_type); ccid->cur_event.u.error.seq = *(++msg_type); ccid->cur_event.u.error.error_code = *(++msg_type); break; default: pr_err("UNKNOWN event arrived\n"); ccid->event_result = -EINVAL; } out: pr_debug("returning %d", ccid->event_result); if (wakeup) wake_up(&ccid->event_wq); } static int ccid_bridge_submit_inturb(struct ccid_bridge *ccid) { int ret = 0; /* * Don't resume the bus to submit an interrupt URB. * We submit the URB in resume path. This is important. * Because the device will be in suspend state during * multiple system suspend/resume cycles. The user space * process comes here during system resume after it is * unfrozen. */ if (!ccid->int_pipe || ccid->is_suspended) goto out; ret = usb_autopm_get_interface(ccid->intf); if (ret < 0) { pr_debug("fail to get autopm with %d\n", ret); goto out; } ret = usb_submit_urb(ccid->inturb, GFP_KERNEL); if (ret < 0) pr_err("fail to submit int urb with %d\n", ret); usb_autopm_put_interface(ccid->intf); out: pr_debug("returning %d", ret); return ret; } static int ccid_bridge_get_event(struct ccid_bridge *ccid) { int ret = 0; /* * The first event returned after the device resume * will be RESUME event. This event is set by * the resume. */ if (ccid->cur_event.event) goto out; ccid->event_result = -EINPROGRESS; ret = ccid_bridge_submit_inturb(ccid); if (ret < 0) goto out; /* * Wait for the notification on interrupt endpoint * or remote wakeup event from the resume. The * int urb completion handler and resume callback * take care of setting the current event. */ mutex_unlock(&ccid->event_mutex); ret = wait_event_interruptible(ccid->event_wq, (ccid->event_result != -EINPROGRESS)); mutex_lock(&ccid->event_mutex); if (ret == -ERESTARTSYS) /* interrupted */ usb_kill_urb(ccid->inturb); else ret = ccid->event_result; out: pr_debug("returning %d", ret); return ret; } static int ccid_bridge_open(struct inode *ip, struct file *fp) { struct ccid_bridge *ccid = container_of(ip->i_cdev, struct ccid_bridge, cdev); int ret; pr_debug("called"); mutex_lock(&ccid->open_mutex); if (ccid->opened) { ret = -EBUSY; goto out; } mutex_unlock(&ccid->open_mutex); ret = wait_event_interruptible_timeout(ccid->open_wq, ccid->intf != NULL, msecs_to_jiffies( CCID_BRIDGE_OPEN_TIMEOUT)); mutex_lock(&ccid->open_mutex); if (ret != -ERESTARTSYS && ccid->intf) { fp->private_data = ccid; ccid->opened = true; ret = 0; } else if (!ret) { /* timed out */ ret = -ENODEV; } out: mutex_unlock(&ccid->open_mutex); pr_debug("returning %d", ret); return ret; } static ssize_t ccid_bridge_write(struct file *fp, const char __user *ubuf, size_t count, loff_t *pos) { struct ccid_bridge *ccid = fp->private_data; int ret; char *kbuf; ktime_t start_t, delta_t; pr_debug("called with %zu", count); if (!ccid->intf) { pr_debug("intf is not active"); return -ENODEV; } mutex_lock(&ccid->write_mutex); ccid->n_write++; start_t = ktime_get(); if (!count || count > CCID_BRIDGE_MSG_SZ) { pr_err("invalid count"); ret = -EINVAL; goto out; } kbuf = kmalloc(count, GFP_KERNEL); if (!kbuf) { pr_err("fail to allocate memory"); ret = -ENOMEM; goto out; } ret = copy_from_user(kbuf, ubuf, count); if (ret) { pr_err("fail to copy user buf"); ret = -EFAULT; goto free_kbuf; } ret = usb_autopm_get_interface(ccid->intf); if (ret) { pr_err("fail to get autopm with %d", ret); goto free_kbuf; } ccid->write_result = 0; usb_fill_bulk_urb(ccid->writeurb, ccid->udev, ccid->out_pipe, kbuf, count, ccid_bridge_out_cb, ccid); ret = usb_submit_urb(ccid->writeurb, GFP_KERNEL); if (ret < 0) { pr_err("urb submit fail with %d", ret); goto put_pm; } ret = wait_event_interruptible_timeout(ccid->write_wq, ccid->write_result != 0, msecs_to_jiffies(ccid_bulk_msg_timeout)); if (!ret || ret == -ERESTARTSYS) { /* timedout or interrupted */ usb_kill_urb(ccid->writeurb); if (!ret) { ccid->n_write_timeout++; ret = -ETIMEDOUT; } } else { ret = ccid->write_result; } if (ret >= 0) { delta_t = ktime_sub(ktime_get(), start_t); if (ktime_to_ms(delta_t) > ccid->write_max_time) ccid->write_max_time = ktime_to_ms(delta_t); } pr_debug("returning %d", ret); put_pm: if (ret != -ENODEV) usb_autopm_put_interface(ccid->intf); free_kbuf: kfree(kbuf); out: mutex_unlock(&ccid->write_mutex); return ret; } static ssize_t ccid_bridge_read(struct file *fp, char __user *ubuf, size_t count, loff_t *pos) { struct ccid_bridge *ccid = fp->private_data; int ret; char *kbuf; ktime_t start_t, delta_t; pr_debug("called with %zu", count); if (!ccid->intf) { pr_debug("intf is not active"); return -ENODEV; } mutex_lock(&ccid->read_mutex); ccid->n_read++; start_t = ktime_get(); if (!count || count > CCID_BRIDGE_MSG_SZ) { pr_err("invalid count"); ret = -EINVAL; goto out; } kbuf = kmalloc(count, GFP_KERNEL); if (!kbuf) { pr_err("fail to allocate memory"); ret = -ENOMEM; goto out; } ret = usb_autopm_get_interface(ccid->intf); if (ret) { pr_err("fail to get autopm with %d", ret); goto free_kbuf; } ccid->read_result = 0; usb_fill_bulk_urb(ccid->readurb, ccid->udev, ccid->in_pipe, kbuf, count, ccid_bridge_in_cb, ccid); ret = usb_submit_urb(ccid->readurb, GFP_KERNEL); if (ret < 0) { pr_err("urb submit fail with %d", ret); if (ret != -ENODEV) usb_autopm_put_interface(ccid->intf); goto free_kbuf; } ret = wait_event_interruptible_timeout(ccid->read_wq, ccid->read_result != 0, msecs_to_jiffies(ccid_bulk_msg_timeout)); if (!ret || ret == -ERESTARTSYS) { /* timedout or interrupted */ usb_kill_urb(ccid->readurb); if (!ret) { ccid->n_read_timeout++; ret = -ETIMEDOUT; } } else { ret = ccid->read_result; } if (ret > 0) { if (copy_to_user(ubuf, kbuf, ret)) ret = -EFAULT; delta_t = ktime_sub(ktime_get(), start_t); if (ktime_to_ms(delta_t) > ccid->read_max_time) ccid->read_max_time = ktime_to_ms(delta_t); } usb_autopm_put_interface(ccid->intf); pr_debug("returning %d", ret); free_kbuf: kfree(kbuf); out: mutex_unlock(&ccid->read_mutex); return ret; } static long ccid_bridge_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { struct ccid_bridge *ccid = fp->private_data; char *buf; struct usb_ccid_data data; struct usb_ccid_abort abort; struct usb_descriptor_header *header; int ret; struct usb_device *udev = ccid->udev; __u8 intf = ccid->intf->cur_altsetting->desc.bInterfaceNumber; __u8 breq = 0; if (!ccid->intf) { pr_debug("intf is not active"); return -ENODEV; } mutex_lock(&ccid->event_mutex); switch (cmd) { case USB_CCID_GET_CLASS_DESC: pr_debug("GET_CLASS_DESC ioctl called"); ret = copy_from_user(&data, (void __user *)arg, sizeof(data)); if (ret) { ret = -EFAULT; break; } ret = __usb_get_extra_descriptor(udev->rawdescriptors[0], le16_to_cpu(udev->config[0].desc.wTotalLength), CCID_CLASS_DECRIPTOR_TYPE, (void **) &buf); if (ret) { ret = -ENOENT; break; } header = (struct usb_descriptor_header *) buf; if (data.length != header->bLength) { ret = -EINVAL; break; } ret = copy_to_user((void __user *)data.data, buf, data.length); if (ret) ret = -EFAULT; break; case USB_CCID_GET_CLOCK_FREQUENCIES: pr_debug("GET_CLOCK_FREQUENCIES ioctl called"); breq = CCID_GET_CLK_FREQ_REQ; /* fall through */ case USB_CCID_GET_DATA_RATES: if (!breq) { pr_debug("GET_DATA_RATES ioctl called"); breq = CCID_GET_DATA_RATES; } ret = copy_from_user(&data, (void __user *)arg, sizeof(data)); if (ret) { ret = -EFAULT; break; } buf = kmalloc(data.length, GFP_KERNEL); if (!buf) { ret = -ENOMEM; break; } ret = usb_autopm_get_interface(ccid->intf); if (ret < 0) { pr_debug("fail to get autopm with %d", ret); break; } ret = usb_control_msg(ccid->udev, usb_rcvctrlpipe(ccid->udev, 0), breq, (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE), 0, intf, buf, data.length, CCID_CONTROL_TIMEOUT); usb_autopm_put_interface(ccid->intf); if (ret == data.length) { ret = copy_to_user((void __user *)data.data, buf, data.length); if (ret) ret = -EFAULT; } else { if (ret > 0) ret = -EPIPE; } kfree(buf); break; case USB_CCID_ABORT: pr_debug("ABORT ioctl called"); breq = CCID_ABORT_REQ; ret = copy_from_user(&abort, (void __user *)arg, sizeof(abort)); if (ret) { ret = -EFAULT; break; } ret = usb_autopm_get_interface(ccid->intf); if (ret < 0) { pr_debug("fail to get autopm with %d", ret); break; } ret = usb_control_msg(ccid->udev, usb_sndctrlpipe(ccid->udev, 0), breq, (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE), (abort.seq << 8) | abort.slot, intf, NULL, 0, CCID_CONTROL_TIMEOUT); if (ret < 0) pr_err("abort request failed with err %d\n", ret); usb_autopm_put_interface(ccid->intf); break; case USB_CCID_GET_EVENT: pr_debug("GET_EVENT ioctl called"); if (!ccid->events_supported) { ret = -ENOENT; break; } ret = ccid_bridge_get_event(ccid); if (ret == 0) { ret = copy_to_user((void __user *)arg, &ccid->cur_event, sizeof(ccid->cur_event)); if (ret) ret = -EFAULT; } ccid->cur_event.event = 0; break; default: pr_err("UNKNOWN ioctl called"); ret = -EINVAL; break; } mutex_unlock(&ccid->event_mutex); pr_debug("returning %d", ret); return ret; } static void ccid_bridge_reset_stats(struct ccid_bridge *ccid) { ccid->n_write = 0; ccid->n_read = 0; ccid->n_write_timeout = 0; ccid->n_read_timeout = 0; ccid->write_max_time = 0; ccid->read_max_time = 0; } static int ccid_bridge_release(struct inode *ip, struct file *fp) { struct ccid_bridge *ccid = fp->private_data; pr_debug("called"); mutex_lock(&ccid->open_mutex); if (ccid->intf == NULL) { ccid->opened = false; mutex_unlock(&ccid->open_mutex); goto done; } mutex_unlock(&ccid->open_mutex); usb_kill_urb(ccid->writeurb); usb_kill_urb(ccid->readurb); if (ccid->int_pipe) usb_kill_urb(ccid->inturb); ccid->event_result = -EIO; wake_up(&ccid->event_wq); mutex_lock(&ccid->open_mutex); ccid->opened = false; mutex_unlock(&ccid->open_mutex); done: ccid_bridge_reset_stats(ccid); return 0; } static const struct file_operations ccid_bridge_fops = { .owner = THIS_MODULE, .open = ccid_bridge_open, .write = ccid_bridge_write, .read = ccid_bridge_read, .unlocked_ioctl = ccid_bridge_ioctl, .release = ccid_bridge_release, }; static int ccid_bridge_suspend(struct usb_interface *intf, pm_message_t message) { struct ccid_bridge *ccid = usb_get_intfdata(intf); int ret = 0; pr_debug("called"); if (!ccid->opened) goto out; mutex_lock(&ccid->event_mutex); if (ccid->int_pipe) { usb_kill_urb(ccid->inturb); if (ccid->event_result != -ENOENT) { ret = -EBUSY; goto rel_mutex; } } ccid->is_suspended = true; rel_mutex: mutex_unlock(&ccid->event_mutex); out: pr_debug("returning %d", ret); return ret; } static int ccid_bridge_resume(struct usb_interface *intf) { struct ccid_bridge *ccid = usb_get_intfdata(intf); int ret; pr_debug("called"); if (!ccid->opened) goto out; mutex_lock(&ccid->event_mutex); ccid->is_suspended = false; if (device_can_wakeup(&ccid->udev->dev)) { ccid->event_result = 0; ccid->cur_event.event = USB_CCID_RESUME_EVENT; wake_up(&ccid->event_wq); } else if (ccid->int_pipe) { ccid->event_result = -EINPROGRESS; ret = usb_submit_urb(ccid->inturb, GFP_KERNEL); if (ret < 0) pr_debug("fail to submit inturb with %d\n", ret); } mutex_unlock(&ccid->event_mutex); out: return 0; } static int ccid_bridge_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct ccid_bridge *ccid = __ccid_bridge_dev; struct usb_host_interface *intf_desc; struct usb_endpoint_descriptor *ep_desc; struct usb_host_endpoint *ep; __u8 epin_addr = 0, epout_addr = 0, epint_addr = 0; int i, ret; intf_desc = intf->cur_altsetting; if (intf_desc->desc.bNumEndpoints > 3) return -ENODEV; for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { ep_desc = &intf_desc->endpoint[i].desc; if (usb_endpoint_is_bulk_in(ep_desc)) epin_addr = ep_desc->bEndpointAddress; else if (usb_endpoint_is_bulk_out(ep_desc)) epout_addr = ep_desc->bEndpointAddress; else if (usb_endpoint_is_int_in(ep_desc)) epint_addr = ep_desc->bEndpointAddress; else return -ENODEV; } if (!epin_addr || !epout_addr) return -ENODEV; ccid->udev = usb_get_dev(interface_to_usbdev(intf)); ccid->in_pipe = usb_rcvbulkpipe(ccid->udev, epin_addr); ccid->out_pipe = usb_sndbulkpipe(ccid->udev, epout_addr); if (epint_addr) ccid->int_pipe = usb_rcvbulkpipe(ccid->udev, epint_addr); ccid->writeurb = usb_alloc_urb(0, GFP_KERNEL); if (!ccid->writeurb) { pr_err("fail to allocate write urb"); ret = -ENOMEM; goto put_udev; } ccid->readurb = usb_alloc_urb(0, GFP_KERNEL); if (!ccid->readurb) { pr_err("fail to allocate read urb"); ret = -ENOMEM; goto free_writeurb; } if (ccid->int_pipe) { pr_debug("interrupt endpoint is present"); ep = usb_pipe_endpoint(ccid->udev, ccid->int_pipe); ccid->inturb = usb_alloc_urb(0, GFP_KERNEL); if (!ccid->inturb) { pr_err("fail to allocate int urb"); ret = -ENOMEM; goto free_readurb; } ccid->intbuf = kmalloc(usb_endpoint_maxp(&ep->desc), GFP_KERNEL); if (!ccid->intbuf) { pr_err("fail to allocated int buf"); ret = -ENOMEM; goto free_inturb; } usb_fill_int_urb(ccid->inturb, ccid->udev, usb_rcvintpipe(ccid->udev, epint_addr), ccid->intbuf, usb_endpoint_maxp(&ep->desc), ccid_bridge_int_cb, ccid, ep->desc.bInterval); } if (ccid->int_pipe || device_can_wakeup(&ccid->udev->dev)) { pr_debug("event support is present"); ccid->events_supported = true; } usb_set_intfdata(intf, ccid); usb_enable_autosuspend(ccid->udev); mutex_lock(&ccid->open_mutex); ccid->intf = intf; wake_up(&ccid->open_wq); mutex_unlock(&ccid->open_mutex); pr_info("success"); return 0; free_inturb: if (ccid->int_pipe) usb_free_urb(ccid->inturb); free_readurb: usb_free_urb(ccid->readurb); free_writeurb: usb_free_urb(ccid->writeurb); put_udev: usb_put_dev(ccid->udev); return ret; } static void ccid_bridge_disconnect(struct usb_interface *intf) { struct ccid_bridge *ccid = usb_get_intfdata(intf); pr_debug("called"); usb_kill_urb(ccid->writeurb); usb_kill_urb(ccid->readurb); if (ccid->int_pipe) usb_kill_urb(ccid->inturb); ccid->event_result = -ENODEV; wake_up(&ccid->event_wq); /* * This would synchronize any ongoing read/write/ioctl. * After acquiring the mutex, we can safely set * intf to NULL. */ mutex_lock(&ccid->open_mutex); mutex_lock(&ccid->write_mutex); mutex_lock(&ccid->read_mutex); mutex_lock(&ccid->event_mutex); usb_free_urb(ccid->writeurb); usb_free_urb(ccid->readurb); if (ccid->int_pipe) { usb_free_urb(ccid->inturb); kfree(ccid->intbuf); ccid->int_pipe = 0; } ccid->intf = NULL; usb_put_dev(ccid->udev); mutex_unlock(&ccid->event_mutex); mutex_unlock(&ccid->read_mutex); mutex_unlock(&ccid->write_mutex); mutex_unlock(&ccid->open_mutex); } static const struct usb_device_id ccid_bridge_ids[] = { { USB_INTERFACE_INFO(USB_CLASS_CSCID, 0, 0) }, {} /* terminating entry */ }; MODULE_DEVICE_TABLE(usb, ccid_bridge_ids); static struct usb_driver ccid_bridge_driver = { .name = "ccid_bridge", .probe = ccid_bridge_probe, .disconnect = ccid_bridge_disconnect, .suspend = ccid_bridge_suspend, .resume = ccid_bridge_resume, .id_table = ccid_bridge_ids, .supports_autosuspend = 1, }; static int ccid_bridge_stats_show(struct seq_file *s, void *unused) { struct ccid_bridge *ccid = s->private; seq_printf(s, "ccid_bridge: %s\n", ccid->intf ? "connected" : "disconnected"); seq_printf(s, "ccid_bridge: %s\n", ccid->opened ? "opened" : "closed"); seq_printf(s, "total writes: %u\n", ccid->n_write); seq_printf(s, "total reads: %u\n", ccid->n_write); seq_printf(s, "write/read timeout val: %u\n", ccid_bulk_msg_timeout); seq_printf(s, "write_timeout: %u\n", ccid->n_write_timeout); seq_printf(s, "read_timeout: %u\n", ccid->n_read_timeout); seq_printf(s, "write_max_time (msec): %lu\n", ccid->write_max_time); seq_printf(s, "read_max_time: (msec): %lu\n", ccid->read_max_time); return 0; } static int ccid_bridge_stats_open(struct inode *inode, struct file *file) { return single_open(file, ccid_bridge_stats_show, inode->i_private); } static ssize_t ccid_bridge_stats_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; struct ccid_bridge *ccid = s->private; ccid_bridge_reset_stats(ccid); return count; } const struct file_operations ccid_bridge_stats_ops = { .open = ccid_bridge_stats_open, .read = seq_read, .write = ccid_bridge_stats_write, .llseek = seq_lseek, .release = single_release, }; static int ccid_bridge_debugfs_init(struct ccid_bridge *ccid) { struct dentry *dir; int ret = 0; dir = debugfs_create_dir("ccid_bridge", NULL); if (!dir || IS_ERR(dir)) { ret = -ENODEV; goto out; } ccid->dbg_root = dir; dir = debugfs_create_file("stats", 0644, ccid->dbg_root, ccid, &ccid_bridge_stats_ops); if (!dir) { debugfs_remove_recursive(ccid->dbg_root); ccid->dbg_root = NULL; ret = -ENODEV; } out: return ret; } static int __init ccid_bridge_init(void) { int ret; struct ccid_bridge *ccid; ccid = kzalloc(sizeof(*ccid), GFP_KERNEL); if (!ccid) { pr_err("Fail to allocate ccid"); ret = -ENOMEM; goto out; } __ccid_bridge_dev = ccid; mutex_init(&ccid->open_mutex); mutex_init(&ccid->write_mutex); mutex_init(&ccid->read_mutex); mutex_init(&ccid->event_mutex); init_waitqueue_head(&ccid->open_wq); init_waitqueue_head(&ccid->write_wq); init_waitqueue_head(&ccid->read_wq); init_waitqueue_head(&ccid->event_wq); ret = usb_register(&ccid_bridge_driver); if (ret < 0) { pr_err("Fail to register ccid usb driver with %d", ret); goto free_ccid; } ret = alloc_chrdev_region(&ccid->chrdev, 0, 1, "ccid_bridge"); if (ret < 0) { pr_err("Fail to allocate ccid char dev region with %d", ret); goto unreg_driver; } ccid->class = class_create(THIS_MODULE, "ccid_bridge"); if (IS_ERR(ccid->class)) { ret = PTR_ERR(ccid->class); pr_err("Fail to create ccid class with %d", ret); goto unreg_chrdev; } cdev_init(&ccid->cdev, &ccid_bridge_fops); ccid->cdev.owner = THIS_MODULE; ret = cdev_add(&ccid->cdev, ccid->chrdev, 1); if (ret < 0) { pr_err("Fail to add ccid cdev with %d", ret); goto destroy_class; } ccid->device = device_create(ccid->class, NULL, ccid->chrdev, NULL, "ccid_bridge"); if (IS_ERR(ccid->device)) { ret = PTR_ERR(ccid->device); pr_err("Fail to create ccid device with %d", ret); goto del_cdev; } ccid_bridge_debugfs_init(ccid); pr_info("success"); return 0; del_cdev: cdev_del(&ccid->cdev); destroy_class: class_destroy(ccid->class); unreg_chrdev: unregister_chrdev_region(ccid->chrdev, 1); unreg_driver: usb_deregister(&ccid_bridge_driver); free_ccid: mutex_destroy(&ccid->open_mutex); mutex_destroy(&ccid->write_mutex); mutex_destroy(&ccid->read_mutex); mutex_destroy(&ccid->event_mutex); kfree(ccid); __ccid_bridge_dev = NULL; out: return ret; } static void __exit ccid_bridge_exit(void) { struct ccid_bridge *ccid = __ccid_bridge_dev; pr_debug("called"); debugfs_remove_recursive(ccid->dbg_root); device_destroy(ccid->class, ccid->chrdev); cdev_del(&ccid->cdev); class_destroy(ccid->class); unregister_chrdev_region(ccid->chrdev, 1); usb_deregister(&ccid_bridge_driver); mutex_destroy(&ccid->open_mutex); mutex_destroy(&ccid->write_mutex); mutex_destroy(&ccid->read_mutex); mutex_destroy(&ccid->event_mutex); kfree(ccid); __ccid_bridge_dev = NULL; } module_init(ccid_bridge_init); module_exit(ccid_bridge_exit); MODULE_DESCRIPTION("USB CCID bridge driver"); MODULE_LICENSE("GPL v2");
TeamWin/android_kernel_samsung_serranoveltexx
drivers/usb/class/ccid_bridge.c
C
gpl-2.0
24,197
/* * PCI support for Xilinx plbv46_pci soft-core which can be used on * Xilinx Virtex ML410 / ML510 boards. * * Copyright 2009 Roderick Colenbrander * Copyright 2009 Secret Lab Technologies Ltd. * * The pci bridge fixup code was copied from ppc4xx_pci.c and was written * by Benjamin Herrenschmidt. * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/ioport.h> #include <linux/of.h> #include <linux/pci.h> #include <asm/io.h> #define XPLB_PCI_ADDR 0x10c #define XPLB_PCI_DATA 0x110 #define XPLB_PCI_BUS 0x114 #define PCI_HOST_ENABLE_CMD (PCI_COMMAND_SERR | PCI_COMMAND_PARITY | \ PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY) static struct of_device_id xilinx_pci_match[] = { { .compatible = "xlnx,plbv46-pci-1.03.a", }, {} }; /** * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration. */ static void xilinx_pci_fixup_bridge(struct pci_dev *dev) { struct pci_controller *hose; int i; if (dev->devfn || dev->bus->self) return; hose = pci_bus_to_host(dev->bus); if (!hose) return; if (!of_match_node(xilinx_pci_match, hose->dn)) return; /* Hide the PCI host BARs from the kernel as their content doesn't * fit well in the resource management */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { dev->resource[i].start = 0; dev->resource[i].end = 0; dev->resource[i].flags = 0; } dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n", pci_name(dev)); } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge); #ifdef DEBUG /** * xilinx_pci_exclude_device - Don't do config access for non-root bus * * This is a hack. Config access to any bus other than bus 0 does not * currently work on the ML510 so we prevent it here. */ static int xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn) { return (bus != 0); } /** * xilinx_early_pci_scan - List pci config space for available devices * * List pci devices in very early phase. */ void __init xilinx_early_pci_scan(struct pci_controller *hose) { u32 bus = 0; u32 val, dev, func, offset; /* Currently we have only 2 device connected - up-to 32 devices */ for (dev = 0; dev < 2; dev++) { /* List only first function number - up-to 8 functions */ for (func = 0; func < 1; func++) { printk(KERN_INFO "%02x:%02x:%02x", bus, dev, func); /* read the first 64 standardized bytes */ /* Up-to 192 bytes can be list of capabilities */ for (offset = 0; offset < 64; offset += 4) { early_read_config_dword(hose, bus, PCI_DEVFN(dev, func), offset, &val); if (offset == 0 && val == 0xFFFFFFFF) { printk(KERN_CONT "\nABSENT"); break; } if (!(offset % 0x10)) printk(KERN_CONT "\n%04x: ", offset); printk(KERN_CONT "%08x ", val); } printk(KERN_INFO "\n"); } } } #else void __init xilinx_early_pci_scan(struct pci_controller *hose) { } #endif /** * xilinx_pci_init - Find and register a Xilinx PCI host bridge */ void __init xilinx_pci_init(void) { struct pci_controller *hose; struct resource r; void __iomem *pci_reg; struct device_node *pci_node; pci_node = of_find_matching_node(NULL, xilinx_pci_match); if (!pci_node) return; if (of_address_to_resource(pci_node, 0, &r)) { pr_err("xilinx-pci: cannot resolve base address\n"); return; } hose = pcibios_alloc_controller(pci_node); if (!hose) { pr_err("xilinx-pci: pcibios_alloc_controller() failed\n"); return; } /* Setup config space */ setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR, r.start + XPLB_PCI_DATA, INDIRECT_TYPE_SET_CFG_TYPE); /* According to the xilinx plbv46_pci documentation the soft-core starts * a self-init when the bus master enable bit is set. Without this bit * set the pci bus can't be scanned. */ early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD); /* Set the max latency timer to 255 */ early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff); /* Set the max bus number to 255, and bus/subbus no's to 0 */ pci_reg = of_iomap(pci_node, 0); out_be32(pci_reg + XPLB_PCI_BUS, 0x000000ff); iounmap(pci_reg); /* Register the host bridge with the linux kernel! */ pci_process_bridge_OF_ranges(hose, pci_node, INDIRECT_TYPE_SET_CFG_TYPE); pr_info("xilinx-pci: Registered PCI host bridge\n"); xilinx_early_pci_scan(hose); }
sminki/android_kernel_sony_u8500
arch/microblaze/pci/xilinx_pci.c
C
gpl-2.0
4,549
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include <core/object.h> #include <core/ramht.h> #include <subdev/bar.h> static u32 nouveau_ramht_hash(struct nouveau_ramht *ramht, int chid, u32 handle) { u32 hash = 0; while (handle) { hash ^= (handle & ((1 << ramht->bits) - 1)); handle >>= ramht->bits; } hash ^= chid << (ramht->bits - 4); hash = hash << 3; return hash; } int nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid, u32 handle, u32 context) { struct nouveau_bar *bar = nouveau_bar(ramht); u32 co, ho; co = ho = nouveau_ramht_hash(ramht, chid, handle); do { if (!nv_ro32(ramht, co + 4)) { nv_wo32(ramht, co + 0, handle); nv_wo32(ramht, co + 4, context); if (bar) bar->flush(bar); return co; } co += 8; if (co >= nv_gpuobj(ramht)->size) co = 0; } while (co != ho); return -ENOMEM; } void nouveau_ramht_remove(struct nouveau_ramht *ramht, int cookie) { struct nouveau_bar *bar = nouveau_bar(ramht); nv_wo32(ramht, cookie + 0, 0x00000000); nv_wo32(ramht, cookie + 4, 0x00000000); if (bar) bar->flush(bar); } static struct nouveau_oclass nouveau_ramht_oclass = { .handle = 0x0000abcd, .ofuncs = &(struct nouveau_ofuncs) { .ctor = NULL, .dtor = _nouveau_gpuobj_dtor, .init = _nouveau_gpuobj_init, .fini = _nouveau_gpuobj_fini, .rd32 = _nouveau_gpuobj_rd32, .wr32 = _nouveau_gpuobj_wr32, }, }; int nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu, u32 size, u32 align, struct nouveau_ramht **pramht) { struct nouveau_ramht *ramht; int ret; ret = nouveau_gpuobj_create(parent, parent->engine ? parent->engine : parent, /* <nv50 ramht */ &nouveau_ramht_oclass, 0, pargpu, size, align, NVOBJ_FLAG_ZERO_ALLOC, &ramht); *pramht = ramht; if (ret) return ret; ramht->bits = order_base_2(nv_gpuobj(ramht)->size >> 3); return 0; }
krzycz/prd
drivers/gpu/drm/nouveau/core/core/ramht.c
C
gpl-2.0
2,960
/* * leds-lp3944.c - driver for National Semiconductor LP3944 Funlight Chip * * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ /* * I2C driver for National Semiconductor LP3944 Funlight Chip * http://www.national.com/pf/LP/LP3944.html * * This helper chip can drive up to 8 leds, with two programmable DIM modes; * it could even be used as a gpio expander but this driver assumes it is used * as a led controller. * * The DIM modes are used to set _blink_ patterns for leds, the pattern is * specified supplying two parameters: * - period: from 0s to 1.6s * - duty cycle: percentage of the period the led is on, from 0 to 100 * * LP3944 can be found on Motorola A910 smartphone, where it drives the rgb * leds, the camera flash light and the displays backlights. */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/leds.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/leds-lp3944.h> /* Read Only Registers */ #define LP3944_REG_INPUT1 0x00 /* LEDs 0-7 InputRegister (Read Only) */ #define LP3944_REG_REGISTER1 0x01 /* None (Read Only) */ #define LP3944_REG_PSC0 0x02 /* Frequency Prescaler 0 (R/W) */ #define LP3944_REG_PWM0 0x03 /* PWM Register 0 (R/W) */ #define LP3944_REG_PSC1 0x04 /* Frequency Prescaler 1 (R/W) */ #define LP3944_REG_PWM1 0x05 /* PWM Register 1 (R/W) */ #define LP3944_REG_LS0 0x06 /* LEDs 0-3 Selector (R/W) */ #define LP3944_REG_LS1 0x07 /* LEDs 4-7 Selector (R/W) */ /* These registers are not used to control leds in LP3944, they can store * arbitrary values which the chip will ignore. */ #define LP3944_REG_REGISTER8 0x08 #define LP3944_REG_REGISTER9 0x09 #define LP3944_DIM0 0 #define LP3944_DIM1 1 /* period in ms */ #define LP3944_PERIOD_MIN 0 #define LP3944_PERIOD_MAX 1600 /* duty cycle is a percentage */ #define LP3944_DUTY_CYCLE_MIN 0 #define LP3944_DUTY_CYCLE_MAX 100 #define ldev_to_led(c) container_of(c, struct lp3944_led_data, ldev) /* Saved data */ struct lp3944_led_data { u8 id; enum lp3944_type type; enum lp3944_status status; struct led_classdev ldev; struct i2c_client *client; struct work_struct work; }; struct lp3944_data { struct mutex lock; struct i2c_client *client; struct lp3944_led_data leds[LP3944_LEDS_MAX]; }; static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value) { int tmp; tmp = i2c_smbus_read_byte_data(client, reg); if (tmp < 0) return tmp; *value = tmp; return 0; } static int lp3944_reg_write(struct i2c_client *client, u8 reg, u8 value) { return i2c_smbus_write_byte_data(client, reg, value); } /** * Set the period for DIM status * * @client: the i2c client * @dim: either LP3944_DIM0 or LP3944_DIM1 * @period: period of a blink, that is a on/off cycle, expressed in ms. */ static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period) { u8 psc_reg; u8 psc_value; int err; if (dim == LP3944_DIM0) psc_reg = LP3944_REG_PSC0; else if (dim == LP3944_DIM1) psc_reg = LP3944_REG_PSC1; else return -EINVAL; /* Convert period to Prescaler value */ if (period > LP3944_PERIOD_MAX) return -EINVAL; psc_value = (period * 255) / LP3944_PERIOD_MAX; err = lp3944_reg_write(client, psc_reg, psc_value); return err; } /** * Set the duty cycle for DIM status * * @client: the i2c client * @dim: either LP3944_DIM0 or LP3944_DIM1 * @duty_cycle: percentage of a period during which a led is ON */ static int lp3944_dim_set_dutycycle(struct i2c_client *client, u8 dim, u8 duty_cycle) { u8 pwm_reg; u8 pwm_value; int err; if (dim == LP3944_DIM0) pwm_reg = LP3944_REG_PWM0; else if (dim == LP3944_DIM1) pwm_reg = LP3944_REG_PWM1; else return -EINVAL; /* Convert duty cycle to PWM value */ if (duty_cycle > LP3944_DUTY_CYCLE_MAX) return -EINVAL; pwm_value = (duty_cycle * 255) / LP3944_DUTY_CYCLE_MAX; err = lp3944_reg_write(client, pwm_reg, pwm_value); return err; } /** * Set the led status * * @led: a lp3944_led_data structure * @status: one of LP3944_LED_STATUS_OFF * LP3944_LED_STATUS_ON * LP3944_LED_STATUS_DIM0 * LP3944_LED_STATUS_DIM1 */ static int lp3944_led_set(struct lp3944_led_data *led, u8 status) { struct lp3944_data *data = i2c_get_clientdata(led->client); u8 id = led->id; u8 reg; u8 val = 0; int err; dev_dbg(&led->client->dev, "%s: %s, status before normalization:%d\n", __func__, led->ldev.name, status); switch (id) { case LP3944_LED0: case LP3944_LED1: case LP3944_LED2: case LP3944_LED3: reg = LP3944_REG_LS0; break; case LP3944_LED4: case LP3944_LED5: case LP3944_LED6: case LP3944_LED7: id -= LP3944_LED4; reg = LP3944_REG_LS1; break; default: return -EINVAL; } if (status > LP3944_LED_STATUS_DIM1) return -EINVAL; /* invert only 0 and 1, leave unchanged the other values, * remember we are abusing status to set blink patterns */ if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2) status = 1 - status; mutex_lock(&data->lock); lp3944_reg_read(led->client, reg, &val); val &= ~(LP3944_LED_STATUS_MASK << (id << 1)); val |= (status << (id << 1)); dev_dbg(&led->client->dev, "%s: %s, reg:%d id:%d status:%d val:%#x\n", __func__, led->ldev.name, reg, id, status, val); /* set led status */ err = lp3944_reg_write(led->client, reg, val); mutex_unlock(&data->lock); return err; } static int lp3944_led_set_blink(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct lp3944_led_data *led = ldev_to_led(led_cdev); u16 period; u8 duty_cycle; int err; /* units are in ms */ if (*delay_on + *delay_off > LP3944_PERIOD_MAX) return -EINVAL; if (*delay_on == 0 && *delay_off == 0) { /* Special case: the leds subsystem requires a default user * friendly blink pattern for the LED. Let's blink the led * slowly (1Hz). */ *delay_on = 500; *delay_off = 500; } period = (*delay_on) + (*delay_off); /* duty_cycle is the percentage of period during which the led is ON */ duty_cycle = 100 * (*delay_on) / period; /* invert duty cycle for inverted leds, this has the same effect of * swapping delay_on and delay_off */ if (led->type == LP3944_LED_TYPE_LED_INVERTED) duty_cycle = 100 - duty_cycle; /* NOTE: using always the first DIM mode, this means that all leds * will have the same blinking pattern. * * We could find a way later to have two leds blinking in hardware * with different patterns at the same time, falling back to software * control for the other ones. */ err = lp3944_dim_set_period(led->client, LP3944_DIM0, period); if (err) return err; err = lp3944_dim_set_dutycycle(led->client, LP3944_DIM0, duty_cycle); if (err) return err; dev_dbg(&led->client->dev, "%s: OK hardware accelerated blink!\n", __func__); led->status = LP3944_LED_STATUS_DIM0; schedule_work(&led->work); return 0; } static void lp3944_led_set_brightness(struct led_classdev *led_cdev, enum led_brightness brightness) { struct lp3944_led_data *led = ldev_to_led(led_cdev); dev_dbg(&led->client->dev, "%s: %s, %d\n", __func__, led_cdev->name, brightness); led->status = !!brightness; schedule_work(&led->work); } static void lp3944_led_work(struct work_struct *work) { struct lp3944_led_data *led; led = container_of(work, struct lp3944_led_data, work); lp3944_led_set(led, led->status); } static int lp3944_configure(struct i2c_client *client, struct lp3944_data *data, struct lp3944_platform_data *pdata) { int i, err = 0; for (i = 0; i < pdata->leds_size; i++) { struct lp3944_led *pled = &pdata->leds[i]; struct lp3944_led_data *led = &data->leds[i]; led->client = client; led->id = i; switch (pled->type) { case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led->type = pled->type; led->status = pled->status; led->ldev.name = pled->name; led->ldev.max_brightness = 1; led->ldev.brightness_set = lp3944_led_set_brightness; led->ldev.blink_set = lp3944_led_set_blink; led->ldev.flags = LED_CORE_SUSPENDRESUME; INIT_WORK(&led->work, lp3944_led_work); err = led_classdev_register(&client->dev, &led->ldev); if (err < 0) { dev_err(&client->dev, "couldn't register LED %s\n", led->ldev.name); goto exit; } /* to expose the default value to userspace */ led->ldev.brightness = (enum led_brightness) led->status; /* Set the default led status */ err = lp3944_led_set(led, led->status); if (err < 0) { dev_err(&client->dev, "%s couldn't set STATUS %d\n", led->ldev.name, led->status); goto exit; } break; case LP3944_LED_TYPE_NONE: default: break; } } return 0; exit: if (i > 0) for (i = i - 1; i >= 0; i--) switch (pdata->leds[i].type) { case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led_classdev_unregister(&data->leds[i].ldev); cancel_work_sync(&data->leds[i].work); break; case LP3944_LED_TYPE_NONE: default: break; } return err; } static int lp3944_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lp3944_platform_data *lp3944_pdata = dev_get_platdata(&client->dev); struct lp3944_data *data; int err; if (lp3944_pdata == NULL) { dev_err(&client->dev, "no platform data\n"); return -EINVAL; } /* Let's see whether this adapter can support what we need. */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&client->dev, "insufficient functionality!\n"); return -ENODEV; } data = devm_kzalloc(&client->dev, sizeof(struct lp3944_data), GFP_KERNEL); if (!data) return -ENOMEM; data->client = client; i2c_set_clientdata(client, data); mutex_init(&data->lock); err = lp3944_configure(client, data, lp3944_pdata); if (err < 0) return err; dev_info(&client->dev, "lp3944 enabled\n"); return 0; } static int lp3944_remove(struct i2c_client *client) { struct lp3944_platform_data *pdata = dev_get_platdata(&client->dev); struct lp3944_data *data = i2c_get_clientdata(client); int i; for (i = 0; i < pdata->leds_size; i++) switch (data->leds[i].type) { case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led_classdev_unregister(&data->leds[i].ldev); cancel_work_sync(&data->leds[i].work); break; case LP3944_LED_TYPE_NONE: default: break; } return 0; } /* lp3944 i2c driver struct */ static const struct i2c_device_id lp3944_id[] = { {"lp3944", 0}, {} }; MODULE_DEVICE_TABLE(i2c, lp3944_id); static struct i2c_driver lp3944_driver = { .driver = { .name = "lp3944", }, .probe = lp3944_probe, .remove = lp3944_remove, .id_table = lp3944_id, }; module_i2c_driver(lp3944_driver); MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>"); MODULE_DESCRIPTION("LP3944 Fun Light Chip"); MODULE_LICENSE("GPL");
RafaelRMachado/linux
drivers/leds/leds-lp3944.c
C
gpl-2.0
11,188
/* Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/types.h> #include <linux/list.h> #include <linux/ioctl.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/version.h> #include "mdp4_wfd_writeback_util.h" #include "msm_fb.h" static int writeback_on(struct platform_device *pdev) { return 0; } static int writeback_off(struct platform_device *pdev) { return 0; } static int writeback_probe(struct platform_device *pdev) { struct msm_fb_data_type *mfd; struct platform_device *mdp_dev = NULL; struct msm_fb_panel_data *pdata = NULL; int rc = 0; WRITEBACK_MSG_ERR("Inside writeback_probe\n"); mfd = platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; mdp_dev = platform_device_alloc("mdp", pdev->id); if (!mdp_dev) return -ENOMEM; /* * link to the latest pdev */ mfd->pdev = mdp_dev; mfd->dest = DISPLAY_LCD; if (platform_device_add_data (mdp_dev, pdev->dev.platform_data, sizeof(struct msm_fb_panel_data))) { pr_err("writeback_probe: " "platform_device_add_data failed!\n"); platform_device_put(mdp_dev); return -ENOMEM; } pdata = (struct msm_fb_panel_data *)mdp_dev->dev.platform_data; pdata->on = writeback_on; pdata->off = writeback_off; pdata->next = pdev; /* * get/set panel specific fb info */ mfd->panel_info = pdata->panel_info; mfd->fb_imgType = MDP_RGB_565; platform_set_drvdata(mdp_dev, mfd); mfd->writeback_sdev.name = "wfd"; rc = switch_dev_register(&mfd->writeback_sdev); if (rc) { pr_err("Failed to setup switch dev for writeback panel"); return rc; } rc = platform_device_add(mdp_dev); if (rc) { WRITEBACK_MSG_ERR("failed to add device"); platform_device_put(mdp_dev); return rc; } return rc; } static int writeback_remove(struct platform_device *pdev) { struct msm_fb_data_type *mfd = platform_get_drvdata(pdev); switch_dev_unregister(&mfd->writeback_sdev); return 0; } static struct platform_driver writeback_driver = { .probe = writeback_probe, .remove = writeback_remove, .driver = { .name = "writeback", }, }; static int __init writeback_driver_init(void) { int rc = 0; WRITEBACK_MSG_ERR("Inside writeback_driver_init\n"); rc = platform_driver_register(&writeback_driver); return rc; } module_init(writeback_driver_init);
noobnl/android_kernel_samsung_d2-jb_2.5.1
drivers/video/msm/mdp4_wfd_writeback.c
C
gpl-2.0
2,836
/* * linux/drivers/video/stifb.c - * Low level Frame buffer driver for HP workstations with * STI (standard text interface) video firmware. * * Copyright (C) 2001-2006 Helge Deller <deller@gmx.de> * Portions Copyright (C) 2001 Thomas Bogendoerfer <tsbogend@alpha.franken.de> * * Based on: * - linux/drivers/video/artistfb.c -- Artist frame buffer driver * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> * - based on skeletonfb, which was * Created 28 Dec 1997 by Geert Uytterhoeven * - HP Xhp cfb-based X11 window driver for XFree86 * (c)Copyright 1992 Hewlett-Packard Co. * * * The following graphics display devices (NGLE family) are supported by this driver: * * HPA4070A known as "HCRX", a 1280x1024 color device with 8 planes * HPA4071A known as "HCRX24", a 1280x1024 color device with 24 planes, * optionally available with a hardware accelerator as HPA4071A_Z * HPA1659A known as "CRX", a 1280x1024 color device with 8 planes * HPA1439A known as "CRX24", a 1280x1024 color device with 24 planes, * optionally available with a hardware accelerator. * HPA1924A known as "GRX", a 1280x1024 grayscale device with 8 planes * HPA2269A known as "Dual CRX", a 1280x1024 color device with 8 planes, * implements support for two displays on a single graphics card. * HP710C internal graphics support optionally available on the HP9000s710 SPU, * supports 1280x1024 color displays with 8 planes. * HP710G same as HP710C, 1280x1024 grayscale only * HP710L same as HP710C, 1024x768 color only * HP712 internal graphics support on HP9000s712 SPU, supports 640x480, * 1024x768 or 1280x1024 color displays on 8 planes (Artist) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ /* TODO: * - 1bpp mode is completely untested * - add support for h/w acceleration * - add hardware cursor * - automatically disable double buffering (e.g. on RDI precisionbook laptop) */ /* on supported graphic devices you may: * #define FALLBACK_TO_1BPP to fall back to 1 bpp, or * #undef FALLBACK_TO_1BPP to reject support for unsupported cards */ #undef FALLBACK_TO_1BPP #undef DEBUG_STIFB_REGS /* debug sti register accesses */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/ioport.h> #include <asm/grfioctl.h> /* for HP-UX compatibility */ #include <asm/uaccess.h> #include "sticore.h" /* REGION_BASE(fb_info, index) returns the virtual address for region <index> */ #define REGION_BASE(fb_info, index) \ F_EXTEND(fb_info->sti->glob_cfg->region_ptrs[index]) #define NGLEDEVDEPROM_CRT_REGION 1 #define NR_PALETTE 256 typedef struct { __s32 video_config_reg; __s32 misc_video_start; __s32 horiz_timing_fmt; __s32 serr_timing_fmt; __s32 vert_timing_fmt; __s32 horiz_state; __s32 vert_state; __s32 vtg_state_elements; __s32 pipeline_delay; __s32 misc_video_end; } video_setup_t; typedef struct { __s16 sizeof_ngle_data; __s16 x_size_visible; /* visible screen dim in pixels */ __s16 y_size_visible; __s16 pad2[15]; __s16 cursor_pipeline_delay; __s16 video_interleaves; __s32 pad3[11]; } ngle_rom_t; struct stifb_info { struct fb_info info; unsigned int id; ngle_rom_t ngle_rom; struct sti_struct *sti; int deviceSpecificConfig; u32 pseudo_palette[16]; }; static int __initdata stifb_bpp_pref[MAX_STI_ROMS]; /* ------------------- chipset specific functions -------------------------- */ /* offsets to graphic-chip internal registers */ #define REG_1 0x000118 #define REG_2 0x000480 #define REG_3 0x0004a0 #define REG_4 0x000600 #define REG_6 0x000800 #define REG_8 0x000820 #define REG_9 0x000a04 #define REG_10 0x018000 #define REG_11 0x018004 #define REG_12 0x01800c #define REG_13 0x018018 #define REG_14 0x01801c #define REG_15 0x200000 #define REG_15b0 0x200000 #define REG_16b1 0x200005 #define REG_16b3 0x200007 #define REG_21 0x200218 #define REG_22 0x0005a0 #define REG_23 0x0005c0 #define REG_26 0x200118 #define REG_27 0x200308 #define REG_32 0x21003c #define REG_33 0x210040 #define REG_34 0x200008 #define REG_35 0x018010 #define REG_38 0x210020 #define REG_39 0x210120 #define REG_40 0x210130 #define REG_42 0x210028 #define REG_43 0x21002c #define REG_44 0x210030 #define REG_45 0x210034 #define READ_BYTE(fb,reg) gsc_readb((fb)->info.fix.mmio_start + (reg)) #define READ_WORD(fb,reg) gsc_readl((fb)->info.fix.mmio_start + (reg)) #ifndef DEBUG_STIFB_REGS # define DEBUG_OFF() # define DEBUG_ON() # define WRITE_BYTE(value,fb,reg) gsc_writeb((value),(fb)->info.fix.mmio_start + (reg)) # define WRITE_WORD(value,fb,reg) gsc_writel((value),(fb)->info.fix.mmio_start + (reg)) #else static int debug_on = 1; # define DEBUG_OFF() debug_on=0 # define DEBUG_ON() debug_on=1 # define WRITE_BYTE(value,fb,reg) do { if (debug_on) \ printk(KERN_DEBUG "%30s: WRITE_BYTE(0x%06x) = 0x%02x (old=0x%02x)\n", \ __func__, reg, value, READ_BYTE(fb,reg)); \ gsc_writeb((value),(fb)->info.fix.mmio_start + (reg)); } while (0) # define WRITE_WORD(value,fb,reg) do { if (debug_on) \ printk(KERN_DEBUG "%30s: WRITE_WORD(0x%06x) = 0x%08x (old=0x%08x)\n", \ __func__, reg, value, READ_WORD(fb,reg)); \ gsc_writel((value),(fb)->info.fix.mmio_start + (reg)); } while (0) #endif /* DEBUG_STIFB_REGS */ #define ENABLE 1 /* for enabling/disabling screen */ #define DISABLE 0 #define NGLE_LOCK(fb_info) do { } while (0) #define NGLE_UNLOCK(fb_info) do { } while (0) static void SETUP_HW(struct stifb_info *fb) { char stat; do { stat = READ_BYTE(fb, REG_15b0); if (!stat) stat = READ_BYTE(fb, REG_15b0); } while (stat); } static void SETUP_FB(struct stifb_info *fb) { unsigned int reg10_value = 0; SETUP_HW(fb); switch (fb->id) { case CRT_ID_VISUALIZE_EG: case S9000_ID_ARTIST: case S9000_ID_A1659A: reg10_value = 0x13601000; break; case S9000_ID_A1439A: if (fb->info.var.bits_per_pixel == 32) reg10_value = 0xBBA0A000; else reg10_value = 0x13601000; break; case S9000_ID_HCRX: if (fb->info.var.bits_per_pixel == 32) reg10_value = 0xBBA0A000; else reg10_value = 0x13602000; break; case S9000_ID_TIMBER: case CRX24_OVERLAY_PLANES: reg10_value = 0x13602000; break; } if (reg10_value) WRITE_WORD(reg10_value, fb, REG_10); WRITE_WORD(0x83000300, fb, REG_14); SETUP_HW(fb); WRITE_BYTE(1, fb, REG_16b1); } static void START_IMAGE_COLORMAP_ACCESS(struct stifb_info *fb) { SETUP_HW(fb); WRITE_WORD(0xBBE0F000, fb, REG_10); WRITE_WORD(0x03000300, fb, REG_14); WRITE_WORD(~0, fb, REG_13); } static void WRITE_IMAGE_COLOR(struct stifb_info *fb, int index, int color) { SETUP_HW(fb); WRITE_WORD(((0x100+index)<<2), fb, REG_3); WRITE_WORD(color, fb, REG_4); } static void FINISH_IMAGE_COLORMAP_ACCESS(struct stifb_info *fb) { WRITE_WORD(0x400, fb, REG_2); if (fb->info.var.bits_per_pixel == 32) { WRITE_WORD(0x83000100, fb, REG_1); } else { if (fb->id == S9000_ID_ARTIST || fb->id == CRT_ID_VISUALIZE_EG) WRITE_WORD(0x80000100, fb, REG_26); else WRITE_WORD(0x80000100, fb, REG_1); } SETUP_FB(fb); } static void SETUP_RAMDAC(struct stifb_info *fb) { SETUP_HW(fb); WRITE_WORD(0x04000000, fb, 0x1020); WRITE_WORD(0xff000000, fb, 0x1028); } static void CRX24_SETUP_RAMDAC(struct stifb_info *fb) { SETUP_HW(fb); WRITE_WORD(0x04000000, fb, 0x1000); WRITE_WORD(0x02000000, fb, 0x1004); WRITE_WORD(0xff000000, fb, 0x1008); WRITE_WORD(0x05000000, fb, 0x1000); WRITE_WORD(0x02000000, fb, 0x1004); WRITE_WORD(0x03000000, fb, 0x1008); } #if 0 static void HCRX_SETUP_RAMDAC(struct stifb_info *fb) { WRITE_WORD(0xffffffff, fb, REG_32); } #endif static void CRX24_SET_OVLY_MASK(struct stifb_info *fb) { SETUP_HW(fb); WRITE_WORD(0x13a02000, fb, REG_11); WRITE_WORD(0x03000300, fb, REG_14); WRITE_WORD(0x000017f0, fb, REG_3); WRITE_WORD(0xffffffff, fb, REG_13); WRITE_WORD(0xffffffff, fb, REG_22); WRITE_WORD(0x00000000, fb, REG_23); } static void ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) { unsigned int value = enable ? 0x43000000 : 0x03000000; SETUP_HW(fb); WRITE_WORD(0x06000000, fb, 0x1030); WRITE_WORD(value, fb, 0x1038); } static void CRX24_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) { unsigned int value = enable ? 0x10000000 : 0x30000000; SETUP_HW(fb); WRITE_WORD(0x01000000, fb, 0x1000); WRITE_WORD(0x02000000, fb, 0x1004); WRITE_WORD(value, fb, 0x1008); } static void ARTIST_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) { u32 DregsMiscVideo = REG_21; u32 DregsMiscCtl = REG_27; SETUP_HW(fb); if (enable) { WRITE_WORD(READ_WORD(fb, DregsMiscVideo) | 0x0A000000, fb, DregsMiscVideo); WRITE_WORD(READ_WORD(fb, DregsMiscCtl) | 0x00800000, fb, DregsMiscCtl); } else { WRITE_WORD(READ_WORD(fb, DregsMiscVideo) & ~0x0A000000, fb, DregsMiscVideo); WRITE_WORD(READ_WORD(fb, DregsMiscCtl) & ~0x00800000, fb, DregsMiscCtl); } } #define GET_ROMTABLE_INDEX(fb) \ (READ_BYTE(fb, REG_16b3) - 1) #define HYPER_CONFIG_PLANES_24 0x00000100 #define IS_24_DEVICE(fb) \ (fb->deviceSpecificConfig & HYPER_CONFIG_PLANES_24) #define IS_888_DEVICE(fb) \ (!(IS_24_DEVICE(fb))) #define GET_FIFO_SLOTS(fb, cnt, numslots) \ { while (cnt < numslots) \ cnt = READ_WORD(fb, REG_34); \ cnt -= numslots; \ } #define IndexedDcd 0 /* Pixel data is indexed (pseudo) color */ #define Otc04 2 /* Pixels in each longword transfer (4) */ #define Otc32 5 /* Pixels in each longword transfer (32) */ #define Ots08 3 /* Each pixel is size (8)d transfer (1) */ #define OtsIndirect 6 /* Each bit goes through FG/BG color(8) */ #define AddrLong 5 /* FB address is Long aligned (pixel) */ #define BINovly 0x2 /* 8 bit overlay */ #define BINapp0I 0x0 /* Application Buffer 0, Indexed */ #define BINapp1I 0x1 /* Application Buffer 1, Indexed */ #define BINapp0F8 0xa /* Application Buffer 0, Fractional 8-8-8 */ #define BINattr 0xd /* Attribute Bitmap */ #define RopSrc 0x3 #define BitmapExtent08 3 /* Each write hits ( 8) bits in depth */ #define BitmapExtent32 5 /* Each write hits (32) bits in depth */ #define DataDynamic 0 /* Data register reloaded by direct access */ #define MaskDynamic 1 /* Mask register reloaded by direct access */ #define MaskOtc 0 /* Mask contains Object Count valid bits */ #define MaskAddrOffset(offset) (offset) #define StaticReg(en) (en) #define BGx(en) (en) #define FGx(en) (en) #define BAJustPoint(offset) (offset) #define BAIndexBase(base) (base) #define BA(F,C,S,A,J,B,I) \ (((F)<<31)|((C)<<27)|((S)<<24)|((A)<<21)|((J)<<16)|((B)<<12)|(I)) #define IBOvals(R,M,X,S,D,L,B,F) \ (((R)<<8)|((M)<<16)|((X)<<24)|((S)<<29)|((D)<<28)|((L)<<31)|((B)<<1)|(F)) #define NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb, val) \ WRITE_WORD(val, fb, REG_14) #define NGLE_QUICK_SET_DST_BM_ACCESS(fb, val) \ WRITE_WORD(val, fb, REG_11) #define NGLE_QUICK_SET_CTL_PLN_REG(fb, val) \ WRITE_WORD(val, fb, REG_12) #define NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, plnmsk32) \ WRITE_WORD(plnmsk32, fb, REG_13) #define NGLE_REALLY_SET_IMAGE_FG_COLOR(fb, fg32) \ WRITE_WORD(fg32, fb, REG_35) #define NGLE_SET_TRANSFERDATA(fb, val) \ WRITE_WORD(val, fb, REG_8) #define NGLE_SET_DSTXY(fb, val) \ WRITE_WORD(val, fb, REG_6) #define NGLE_LONG_FB_ADDRESS(fbaddrbase, x, y) ( \ (u32) (fbaddrbase) + \ ( (unsigned int) ( (y) << 13 ) | \ (unsigned int) ( (x) << 2 ) ) \ ) #define NGLE_BINC_SET_DSTADDR(fb, addr) \ WRITE_WORD(addr, fb, REG_3) #define NGLE_BINC_SET_SRCADDR(fb, addr) \ WRITE_WORD(addr, fb, REG_2) #define NGLE_BINC_SET_DSTMASK(fb, mask) \ WRITE_WORD(mask, fb, REG_22) #define NGLE_BINC_WRITE32(fb, data32) \ WRITE_WORD(data32, fb, REG_23) #define START_COLORMAPLOAD(fb, cmapBltCtlData32) \ WRITE_WORD((cmapBltCtlData32), fb, REG_38) #define SET_LENXY_START_RECFILL(fb, lenxy) \ WRITE_WORD(lenxy, fb, REG_9) static void HYPER_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) { u32 DregsHypMiscVideo = REG_33; unsigned int value; SETUP_HW(fb); value = READ_WORD(fb, DregsHypMiscVideo); if (enable) value |= 0x0A000000; else value &= ~0x0A000000; WRITE_WORD(value, fb, DregsHypMiscVideo); } /* BufferNumbers used by SETUP_ATTR_ACCESS() */ #define BUFF0_CMAP0 0x00001e02 #define BUFF1_CMAP0 0x02001e02 #define BUFF1_CMAP3 0x0c001e02 #define ARTIST_CMAP0 0x00000102 #define HYPER_CMAP8 0x00000100 #define HYPER_CMAP24 0x00000800 static void SETUP_ATTR_ACCESS(struct stifb_info *fb, unsigned BufferNumber) { SETUP_HW(fb); WRITE_WORD(0x2EA0D000, fb, REG_11); WRITE_WORD(0x23000302, fb, REG_14); WRITE_WORD(BufferNumber, fb, REG_12); WRITE_WORD(0xffffffff, fb, REG_8); } static void SET_ATTR_SIZE(struct stifb_info *fb, int width, int height) { /* REG_6 seems to have special values when run on a RDI precisionbook parisc laptop (INTERNAL_EG_DX1024 or INTERNAL_EG_X1024). The values are: 0x2f0: internal (LCD) & external display enabled 0x2a0: external display only 0x000: zero on standard artist graphic cards */ WRITE_WORD(0x00000000, fb, REG_6); WRITE_WORD((width<<16) | height, fb, REG_9); WRITE_WORD(0x05000000, fb, REG_6); WRITE_WORD(0x00040001, fb, REG_9); } static void FINISH_ATTR_ACCESS(struct stifb_info *fb) { SETUP_HW(fb); WRITE_WORD(0x00000000, fb, REG_12); } static void elkSetupPlanes(struct stifb_info *fb) { SETUP_RAMDAC(fb); SETUP_FB(fb); } static void ngleSetupAttrPlanes(struct stifb_info *fb, int BufferNumber) { SETUP_ATTR_ACCESS(fb, BufferNumber); SET_ATTR_SIZE(fb, fb->info.var.xres, fb->info.var.yres); FINISH_ATTR_ACCESS(fb); SETUP_FB(fb); } static void rattlerSetupPlanes(struct stifb_info *fb) { int saved_id, y; /* Write RAMDAC pixel read mask register so all overlay * planes are display-enabled. (CRX24 uses Bt462 pixel * read mask register for overlay planes, not image planes). */ CRX24_SETUP_RAMDAC(fb); /* change fb->id temporarily to fool SETUP_FB() */ saved_id = fb->id; fb->id = CRX24_OVERLAY_PLANES; SETUP_FB(fb); fb->id = saved_id; for (y = 0; y < fb->info.var.yres; ++y) memset(fb->info.screen_base + y * fb->info.fix.line_length, 0xff, fb->info.var.xres * fb->info.var.bits_per_pixel/8); CRX24_SET_OVLY_MASK(fb); SETUP_FB(fb); } #define HYPER_CMAP_TYPE 0 #define NGLE_CMAP_INDEXED0_TYPE 0 #define NGLE_CMAP_OVERLAY_TYPE 3 /* typedef of LUT (Colormap) BLT Control Register */ typedef union /* Note assumption that fields are packed left-to-right */ { u32 all; struct { unsigned enable : 1; unsigned waitBlank : 1; unsigned reserved1 : 4; unsigned lutOffset : 10; /* Within destination LUT */ unsigned lutType : 2; /* Cursor, image, overlay */ unsigned reserved2 : 4; unsigned length : 10; } fields; } NgleLutBltCtl; #if 0 static NgleLutBltCtl setNgleLutBltCtl(struct stifb_info *fb, int offsetWithinLut, int length) { NgleLutBltCtl lutBltCtl; /* set enable, zero reserved fields */ lutBltCtl.all = 0x80000000; lutBltCtl.fields.length = length; switch (fb->id) { case S9000_ID_A1439A: /* CRX24 */ if (fb->var.bits_per_pixel == 8) { lutBltCtl.fields.lutType = NGLE_CMAP_OVERLAY_TYPE; lutBltCtl.fields.lutOffset = 0; } else { lutBltCtl.fields.lutType = NGLE_CMAP_INDEXED0_TYPE; lutBltCtl.fields.lutOffset = 0 * 256; } break; case S9000_ID_ARTIST: lutBltCtl.fields.lutType = NGLE_CMAP_INDEXED0_TYPE; lutBltCtl.fields.lutOffset = 0 * 256; break; default: lutBltCtl.fields.lutType = NGLE_CMAP_INDEXED0_TYPE; lutBltCtl.fields.lutOffset = 0; break; } /* Offset points to start of LUT. Adjust for within LUT */ lutBltCtl.fields.lutOffset += offsetWithinLut; return lutBltCtl; } #endif static NgleLutBltCtl setHyperLutBltCtl(struct stifb_info *fb, int offsetWithinLut, int length) { NgleLutBltCtl lutBltCtl; /* set enable, zero reserved fields */ lutBltCtl.all = 0x80000000; lutBltCtl.fields.length = length; lutBltCtl.fields.lutType = HYPER_CMAP_TYPE; /* Expect lutIndex to be 0 or 1 for image cmaps, 2 or 3 for overlay cmaps */ if (fb->info.var.bits_per_pixel == 8) lutBltCtl.fields.lutOffset = 2 * 256; else lutBltCtl.fields.lutOffset = 0 * 256; /* Offset points to start of LUT. Adjust for within LUT */ lutBltCtl.fields.lutOffset += offsetWithinLut; return lutBltCtl; } static void hyperUndoITE(struct stifb_info *fb) { int nFreeFifoSlots = 0; u32 fbAddr; NGLE_LOCK(fb); GET_FIFO_SLOTS(fb, nFreeFifoSlots, 1); WRITE_WORD(0xffffffff, fb, REG_32); /* Write overlay transparency mask so only entry 255 is transparent */ /* Hardware setup for full-depth write to "magic" location */ GET_FIFO_SLOTS(fb, nFreeFifoSlots, 7); NGLE_QUICK_SET_DST_BM_ACCESS(fb, BA(IndexedDcd, Otc04, Ots08, AddrLong, BAJustPoint(0), BINovly, BAIndexBase(0))); NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb, IBOvals(RopSrc, MaskAddrOffset(0), BitmapExtent08, StaticReg(0), DataDynamic, MaskOtc, BGx(0), FGx(0))); /* Now prepare to write to the "magic" location */ fbAddr = NGLE_LONG_FB_ADDRESS(0, 1532, 0); NGLE_BINC_SET_DSTADDR(fb, fbAddr); NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffff); NGLE_BINC_SET_DSTMASK(fb, 0xffffffff); /* Finally, write a zero to clear the mask */ NGLE_BINC_WRITE32(fb, 0); NGLE_UNLOCK(fb); } static void ngleDepth8_ClearImagePlanes(struct stifb_info *fb) { /* FIXME! */ } static void ngleDepth24_ClearImagePlanes(struct stifb_info *fb) { /* FIXME! */ } static void ngleResetAttrPlanes(struct stifb_info *fb, unsigned int ctlPlaneReg) { int nFreeFifoSlots = 0; u32 packed_dst; u32 packed_len; NGLE_LOCK(fb); GET_FIFO_SLOTS(fb, nFreeFifoSlots, 4); NGLE_QUICK_SET_DST_BM_ACCESS(fb, BA(IndexedDcd, Otc32, OtsIndirect, AddrLong, BAJustPoint(0), BINattr, BAIndexBase(0))); NGLE_QUICK_SET_CTL_PLN_REG(fb, ctlPlaneReg); NGLE_SET_TRANSFERDATA(fb, 0xffffffff); NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb, IBOvals(RopSrc, MaskAddrOffset(0), BitmapExtent08, StaticReg(1), DataDynamic, MaskOtc, BGx(0), FGx(0))); packed_dst = 0; packed_len = (fb->info.var.xres << 16) | fb->info.var.yres; GET_FIFO_SLOTS(fb, nFreeFifoSlots, 2); NGLE_SET_DSTXY(fb, packed_dst); SET_LENXY_START_RECFILL(fb, packed_len); /* * In order to work around an ELK hardware problem (Buffy doesn't * always flush it's buffers when writing to the attribute * planes), at least 4 pixels must be written to the attribute * planes starting at (X == 1280) and (Y != to the last Y written * by BIF): */ if (fb->id == S9000_ID_A1659A) { /* ELK_DEVICE_ID */ /* It's safe to use scanline zero: */ packed_dst = (1280 << 16); GET_FIFO_SLOTS(fb, nFreeFifoSlots, 2); NGLE_SET_DSTXY(fb, packed_dst); packed_len = (4 << 16) | 1; SET_LENXY_START_RECFILL(fb, packed_len); } /* ELK Hardware Kludge */ /**** Finally, set the Control Plane Register back to zero: ****/ GET_FIFO_SLOTS(fb, nFreeFifoSlots, 1); NGLE_QUICK_SET_CTL_PLN_REG(fb, 0); NGLE_UNLOCK(fb); } static void ngleClearOverlayPlanes(struct stifb_info *fb, int mask, int data) { int nFreeFifoSlots = 0; u32 packed_dst; u32 packed_len; NGLE_LOCK(fb); /* Hardware setup */ GET_FIFO_SLOTS(fb, nFreeFifoSlots, 8); NGLE_QUICK_SET_DST_BM_ACCESS(fb, BA(IndexedDcd, Otc04, Ots08, AddrLong, BAJustPoint(0), BINovly, BAIndexBase(0))); NGLE_SET_TRANSFERDATA(fb, 0xffffffff); /* Write foreground color */ NGLE_REALLY_SET_IMAGE_FG_COLOR(fb, data); NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, mask); packed_dst = 0; packed_len = (fb->info.var.xres << 16) | fb->info.var.yres; NGLE_SET_DSTXY(fb, packed_dst); /* Write zeroes to overlay planes */ NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb, IBOvals(RopSrc, MaskAddrOffset(0), BitmapExtent08, StaticReg(0), DataDynamic, MaskOtc, BGx(0), FGx(0))); SET_LENXY_START_RECFILL(fb, packed_len); NGLE_UNLOCK(fb); } static void hyperResetPlanes(struct stifb_info *fb, int enable) { unsigned int controlPlaneReg; NGLE_LOCK(fb); if (IS_24_DEVICE(fb)) if (fb->info.var.bits_per_pixel == 32) controlPlaneReg = 0x04000F00; else controlPlaneReg = 0x00000F00; /* 0x00000800 should be enough, but lets clear all 4 bits */ else controlPlaneReg = 0x00000F00; /* 0x00000100 should be enough, but lets clear all 4 bits */ switch (enable) { case ENABLE: /* clear screen */ if (IS_24_DEVICE(fb)) ngleDepth24_ClearImagePlanes(fb); else ngleDepth8_ClearImagePlanes(fb); /* Paint attribute planes for default case. * On Hyperdrive, this means all windows using overlay cmap 0. */ ngleResetAttrPlanes(fb, controlPlaneReg); /* clear overlay planes */ ngleClearOverlayPlanes(fb, 0xff, 255); /************************************************** ** Also need to counteract ITE settings **************************************************/ hyperUndoITE(fb); break; case DISABLE: /* clear screen */ if (IS_24_DEVICE(fb)) ngleDepth24_ClearImagePlanes(fb); else ngleDepth8_ClearImagePlanes(fb); ngleResetAttrPlanes(fb, controlPlaneReg); ngleClearOverlayPlanes(fb, 0xff, 0); break; case -1: /* RESET */ hyperUndoITE(fb); ngleResetAttrPlanes(fb, controlPlaneReg); break; } NGLE_UNLOCK(fb); } /* Return pointer to in-memory structure holding ELK device-dependent ROM values. */ static void ngleGetDeviceRomData(struct stifb_info *fb) { #if 0 XXX: FIXME: !!! int *pBytePerLongDevDepData;/* data byte == LSB */ int *pRomTable; NgleDevRomData *pPackedDevRomData; int sizePackedDevRomData = sizeof(*pPackedDevRomData); char *pCard8; int i; char *mapOrigin = NULL; int romTableIdx; pPackedDevRomData = fb->ngle_rom; SETUP_HW(fb); if (fb->id == S9000_ID_ARTIST) { pPackedDevRomData->cursor_pipeline_delay = 4; pPackedDevRomData->video_interleaves = 4; } else { /* Get pointer to unpacked byte/long data in ROM */ pBytePerLongDevDepData = fb->sti->regions[NGLEDEVDEPROM_CRT_REGION]; /* Tomcat supports several resolutions: 1280x1024, 1024x768, 640x480 */ if (fb->id == S9000_ID_TOMCAT) { /* jump to the correct ROM table */ GET_ROMTABLE_INDEX(romTableIdx); while (romTableIdx > 0) { pCard8 = (Card8 *) pPackedDevRomData; pRomTable = pBytePerLongDevDepData; /* Pack every fourth byte from ROM into structure */ for (i = 0; i < sizePackedDevRomData; i++) { *pCard8++ = (Card8) (*pRomTable++); } pBytePerLongDevDepData = (Card32 *) ((Card8 *) pBytePerLongDevDepData + pPackedDevRomData->sizeof_ngle_data); romTableIdx--; } } pCard8 = (Card8 *) pPackedDevRomData; /* Pack every fourth byte from ROM into structure */ for (i = 0; i < sizePackedDevRomData; i++) { *pCard8++ = (Card8) (*pBytePerLongDevDepData++); } } SETUP_FB(fb); #endif } #define HYPERBOWL_MODE_FOR_8_OVER_88_LUT0_NO_TRANSPARENCIES 4 #define HYPERBOWL_MODE01_8_24_LUT0_TRANSPARENT_LUT1_OPAQUE 8 #define HYPERBOWL_MODE01_8_24_LUT0_OPAQUE_LUT1_OPAQUE 10 #define HYPERBOWL_MODE2_8_24 15 /* HCRX specific boot-time initialization */ static void __init SETUP_HCRX(struct stifb_info *fb) { int hyperbowl; int nFreeFifoSlots = 0; if (fb->id != S9000_ID_HCRX) return; /* Initialize Hyperbowl registers */ GET_FIFO_SLOTS(fb, nFreeFifoSlots, 7); if (IS_24_DEVICE(fb)) { hyperbowl = (fb->info.var.bits_per_pixel == 32) ? HYPERBOWL_MODE01_8_24_LUT0_TRANSPARENT_LUT1_OPAQUE : HYPERBOWL_MODE01_8_24_LUT0_OPAQUE_LUT1_OPAQUE; /* First write to Hyperbowl must happen twice (bug) */ WRITE_WORD(hyperbowl, fb, REG_40); WRITE_WORD(hyperbowl, fb, REG_40); WRITE_WORD(HYPERBOWL_MODE2_8_24, fb, REG_39); WRITE_WORD(0x014c0148, fb, REG_42); /* Set lut 0 to be the direct color */ WRITE_WORD(0x404c4048, fb, REG_43); WRITE_WORD(0x034c0348, fb, REG_44); WRITE_WORD(0x444c4448, fb, REG_45); } else { hyperbowl = HYPERBOWL_MODE_FOR_8_OVER_88_LUT0_NO_TRANSPARENCIES; /* First write to Hyperbowl must happen twice (bug) */ WRITE_WORD(hyperbowl, fb, REG_40); WRITE_WORD(hyperbowl, fb, REG_40); WRITE_WORD(0x00000000, fb, REG_42); WRITE_WORD(0x00000000, fb, REG_43); WRITE_WORD(0x00000000, fb, REG_44); WRITE_WORD(0x444c4048, fb, REG_45); } } /* ------------------- driver specific functions --------------------------- */ static int stifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct stifb_info *fb = (struct stifb_info *) info; u32 color; if (regno >= NR_PALETTE) return 1; red >>= 8; green >>= 8; blue >>= 8; DEBUG_OFF(); START_IMAGE_COLORMAP_ACCESS(fb); if (unlikely(fb->info.var.grayscale)) { /* gray = 0.30*R + 0.59*G + 0.11*B */ color = ((red * 77) + (green * 151) + (blue * 28)) >> 8; } else { color = ((red << 16) | (green << 8) | (blue)); } if (fb->info.fix.visual == FB_VISUAL_DIRECTCOLOR) { struct fb_var_screeninfo *var = &fb->info.var; if (regno < 16) ((u32 *)fb->info.pseudo_palette)[regno] = regno << var->red.offset | regno << var->green.offset | regno << var->blue.offset; } WRITE_IMAGE_COLOR(fb, regno, color); if (fb->id == S9000_ID_HCRX) { NgleLutBltCtl lutBltCtl; lutBltCtl = setHyperLutBltCtl(fb, 0, /* Offset w/i LUT */ 256); /* Load entire LUT */ NGLE_BINC_SET_SRCADDR(fb, NGLE_LONG_FB_ADDRESS(0, 0x100, 0)); /* 0x100 is same as used in WRITE_IMAGE_COLOR() */ START_COLORMAPLOAD(fb, lutBltCtl.all); SETUP_FB(fb); } else { /* cleanup colormap hardware */ FINISH_IMAGE_COLORMAP_ACCESS(fb); } DEBUG_ON(); return 0; } static int stifb_blank(int blank_mode, struct fb_info *info) { struct stifb_info *fb = (struct stifb_info *) info; int enable = (blank_mode == 0) ? ENABLE : DISABLE; switch (fb->id) { case S9000_ID_A1439A: CRX24_ENABLE_DISABLE_DISPLAY(fb, enable); break; case CRT_ID_VISUALIZE_EG: case S9000_ID_ARTIST: ARTIST_ENABLE_DISABLE_DISPLAY(fb, enable); break; case S9000_ID_HCRX: HYPER_ENABLE_DISABLE_DISPLAY(fb, enable); break; case S9000_ID_A1659A: /* fall through */ case S9000_ID_TIMBER: case CRX24_OVERLAY_PLANES: default: ENABLE_DISABLE_DISPLAY(fb, enable); break; } SETUP_FB(fb); return 0; } static void __init stifb_init_display(struct stifb_info *fb) { int id = fb->id; SETUP_FB(fb); /* HCRX specific initialization */ SETUP_HCRX(fb); /* if (id == S9000_ID_HCRX) hyperInitSprite(fb); else ngleInitSprite(fb); */ /* Initialize the image planes. */ switch (id) { case S9000_ID_HCRX: hyperResetPlanes(fb, ENABLE); break; case S9000_ID_A1439A: rattlerSetupPlanes(fb); break; case S9000_ID_A1659A: case S9000_ID_ARTIST: case CRT_ID_VISUALIZE_EG: elkSetupPlanes(fb); break; } /* Clear attribute planes on non HCRX devices. */ switch (id) { case S9000_ID_A1659A: case S9000_ID_A1439A: if (fb->info.var.bits_per_pixel == 32) ngleSetupAttrPlanes(fb, BUFF1_CMAP3); else { ngleSetupAttrPlanes(fb, BUFF1_CMAP0); } if (id == S9000_ID_A1439A) ngleClearOverlayPlanes(fb, 0xff, 0); break; case S9000_ID_ARTIST: case CRT_ID_VISUALIZE_EG: if (fb->info.var.bits_per_pixel == 32) ngleSetupAttrPlanes(fb, BUFF1_CMAP3); else { ngleSetupAttrPlanes(fb, ARTIST_CMAP0); } break; } stifb_blank(0, (struct fb_info *)fb); /* 0=enable screen */ SETUP_FB(fb); } /* ------------ Interfaces to hardware functions ------------ */ static struct fb_ops stifb_ops = { .owner = THIS_MODULE, .fb_setcolreg = stifb_setcolreg, .fb_blank = stifb_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; /* * Initialization */ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) { struct fb_fix_screeninfo *fix; struct fb_var_screeninfo *var; struct stifb_info *fb; struct fb_info *info; unsigned long sti_rom_address; char *dev_name; int bpp, xres, yres; fb = kzalloc(sizeof(*fb), GFP_ATOMIC); if (!fb) { printk(KERN_ERR "stifb: Could not allocate stifb structure\n"); return -ENODEV; } info = &fb->info; /* set struct to a known state */ fix = &info->fix; var = &info->var; fb->sti = sti; dev_name = sti->sti_data->inq_outptr.dev_name; /* store upper 32bits of the graphics id */ fb->id = fb->sti->graphics_id[0]; /* only supported cards are allowed */ switch (fb->id) { case CRT_ID_VISUALIZE_EG: /* Visualize cards can run either in "double buffer" or "standard" mode. Depending on the mode, the card reports a different device name, e.g. "INTERNAL_EG_DX1024" in double buffer mode and "INTERNAL_EG_X1024" in standard mode. Since this driver only supports standard mode, we check if the device name contains the string "DX" and tell the user how to reconfigure the card. */ if (strstr(dev_name, "DX")) { printk(KERN_WARNING "WARNING: stifb framebuffer driver does not support '%s' in double-buffer mode.\n" "WARNING: Please disable the double-buffer mode in IPL menu (the PARISC-BIOS).\n", dev_name); goto out_err0; } /* fall though */ case S9000_ID_ARTIST: case S9000_ID_HCRX: case S9000_ID_TIMBER: case S9000_ID_A1659A: case S9000_ID_A1439A: break; default: printk(KERN_WARNING "stifb: '%s' (id: 0x%08x) not supported.\n", dev_name, fb->id); goto out_err0; } /* default to 8 bpp on most graphic chips */ bpp = 8; xres = sti_onscreen_x(fb->sti); yres = sti_onscreen_y(fb->sti); ngleGetDeviceRomData(fb); /* get (virtual) io region base addr */ fix->mmio_start = REGION_BASE(fb,2); fix->mmio_len = 0x400000; /* Reject any device not in the NGLE family */ switch (fb->id) { case S9000_ID_A1659A: /* CRX/A1659A */ break; case S9000_ID_ELM: /* GRX, grayscale but else same as A1659A */ var->grayscale = 1; fb->id = S9000_ID_A1659A; break; case S9000_ID_TIMBER: /* HP9000/710 Any (may be a grayscale device) */ if (strstr(dev_name, "GRAYSCALE") || strstr(dev_name, "Grayscale") || strstr(dev_name, "grayscale")) var->grayscale = 1; break; case S9000_ID_TOMCAT: /* Dual CRX, behaves else like a CRX */ /* FIXME: TomCat supports two heads: * fb.iobase = REGION_BASE(fb_info,3); * fb.screen_base = ioremap_nocache(REGION_BASE(fb_info,2),xxx); * for now we only support the left one ! */ xres = fb->ngle_rom.x_size_visible; yres = fb->ngle_rom.y_size_visible; fb->id = S9000_ID_A1659A; break; case S9000_ID_A1439A: /* CRX24/A1439A */ bpp = 32; break; case S9000_ID_HCRX: /* Hyperdrive/HCRX */ memset(&fb->ngle_rom, 0, sizeof(fb->ngle_rom)); if ((fb->sti->regions_phys[0] & 0xfc000000) == (fb->sti->regions_phys[2] & 0xfc000000)) sti_rom_address = F_EXTEND(fb->sti->regions_phys[0]); else sti_rom_address = F_EXTEND(fb->sti->regions_phys[1]); fb->deviceSpecificConfig = gsc_readl(sti_rom_address); if (IS_24_DEVICE(fb)) { if (bpp_pref == 8 || bpp_pref == 32) bpp = bpp_pref; else bpp = 32; } else bpp = 8; READ_WORD(fb, REG_15); SETUP_HW(fb); break; case CRT_ID_VISUALIZE_EG: case S9000_ID_ARTIST: /* Artist */ break; default: #ifdef FALLBACK_TO_1BPP printk(KERN_WARNING "stifb: Unsupported graphics card (id=0x%08x) " "- now trying 1bpp mode instead\n", fb->id); bpp = 1; /* default to 1 bpp */ break; #else printk(KERN_WARNING "stifb: Unsupported graphics card (id=0x%08x) " "- skipping.\n", fb->id); goto out_err0; #endif } /* get framebuffer physical and virtual base addr & len (64bit ready) */ fix->smem_start = F_EXTEND(fb->sti->regions_phys[1]); fix->smem_len = fb->sti->regions[1].region_desc.length * 4096; fix->line_length = (fb->sti->glob_cfg->total_x * bpp) / 8; if (!fix->line_length) fix->line_length = 2048; /* default */ /* limit fbsize to max visible screen size */ if (fix->smem_len > yres*fix->line_length) fix->smem_len = yres*fix->line_length; fix->accel = FB_ACCEL_NONE; switch (bpp) { case 1: fix->type = FB_TYPE_PLANES; /* well, sort of */ fix->visual = FB_VISUAL_MONO10; var->red.length = var->green.length = var->blue.length = 1; break; case 8: fix->type = FB_TYPE_PACKED_PIXELS; fix->visual = FB_VISUAL_PSEUDOCOLOR; var->red.length = var->green.length = var->blue.length = 8; break; case 32: fix->type = FB_TYPE_PACKED_PIXELS; fix->visual = FB_VISUAL_DIRECTCOLOR; var->red.length = var->green.length = var->blue.length = var->transp.length = 8; var->blue.offset = 0; var->green.offset = 8; var->red.offset = 16; var->transp.offset = 24; break; default: break; } var->xres = var->xres_virtual = xres; var->yres = var->yres_virtual = yres; var->bits_per_pixel = bpp; strcpy(fix->id, "stifb"); info->fbops = &stifb_ops; info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len); info->screen_size = fix->smem_len; info->flags = FBINFO_DEFAULT; info->pseudo_palette = &fb->pseudo_palette; /* This has to be done !!! */ if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0)) goto out_err1; stifb_init_display(fb); if (!request_mem_region(fix->smem_start, fix->smem_len, "stifb fb")) { printk(KERN_ERR "stifb: cannot reserve fb region 0x%04lx-0x%04lx\n", fix->smem_start, fix->smem_start+fix->smem_len); goto out_err2; } if (!request_mem_region(fix->mmio_start, fix->mmio_len, "stifb mmio")) { printk(KERN_ERR "stifb: cannot reserve sti mmio region 0x%04lx-0x%04lx\n", fix->mmio_start, fix->mmio_start+fix->mmio_len); goto out_err3; } if (register_framebuffer(&fb->info) < 0) goto out_err4; sti->info = info; /* save for unregister_framebuffer() */ printk(KERN_INFO "fb%d: %s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n", fb->info.node, fix->id, var->xres, var->yres, var->bits_per_pixel, dev_name, fb->id, fix->mmio_start); return 0; out_err4: release_mem_region(fix->mmio_start, fix->mmio_len); out_err3: release_mem_region(fix->smem_start, fix->smem_len); out_err2: fb_dealloc_cmap(&info->cmap); out_err1: iounmap(info->screen_base); out_err0: kfree(fb); return -ENXIO; } static int stifb_disabled __initdata; int __init stifb_setup(char *options); static int __init stifb_init(void) { struct sti_struct *sti; struct sti_struct *def_sti; int i; #ifndef MODULE char *option = NULL; if (fb_get_options("stifb", &option)) return -ENODEV; stifb_setup(option); #endif if (stifb_disabled) { printk(KERN_INFO "stifb: disabled by \"stifb=off\" kernel parameter\n"); return -ENXIO; } def_sti = sti_get_rom(0); if (def_sti) { for (i = 1; i <= MAX_STI_ROMS; i++) { sti = sti_get_rom(i); if (!sti) break; if (sti == def_sti) { stifb_init_fb(sti, stifb_bpp_pref[i - 1]); break; } } } for (i = 1; i <= MAX_STI_ROMS; i++) { sti = sti_get_rom(i); if (!sti) break; if (sti == def_sti) continue; stifb_init_fb(sti, stifb_bpp_pref[i - 1]); } return 0; } /* * Cleanup */ static void __exit stifb_cleanup(void) { struct sti_struct *sti; int i; for (i = 1; i <= MAX_STI_ROMS; i++) { sti = sti_get_rom(i); if (!sti) break; if (sti->info) { struct fb_info *info = sti->info; unregister_framebuffer(sti->info); release_mem_region(info->fix.mmio_start, info->fix.mmio_len); release_mem_region(info->fix.smem_start, info->fix.smem_len); if (info->screen_base) iounmap(info->screen_base); fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } sti->info = NULL; } } int __init stifb_setup(char *options) { int i; if (!options || !*options) return 1; if (strncmp(options, "off", 3) == 0) { stifb_disabled = 1; options += 3; } if (strncmp(options, "bpp", 3) == 0) { options += 3; for (i = 0; i < MAX_STI_ROMS; i++) { if (*options++ != ':') break; stifb_bpp_pref[i] = simple_strtoul(options, &options, 10); } } return 1; } __setup("stifb=", stifb_setup); module_init(stifb_init); module_exit(stifb_cleanup); MODULE_AUTHOR("Helge Deller <deller@gmx.de>, Thomas Bogendoerfer <tsbogend@alpha.franken.de>"); MODULE_DESCRIPTION("Framebuffer driver for HP's NGLE series graphics cards in HP PARISC machines"); MODULE_LICENSE("GPL v2");
VRToxin-AOSP/android_kernel_lge_bullhead
drivers/video/stifb.c
C
gpl-2.0
36,808
/* * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained at www.Open-FCoE.org */ /* * RPORT GENERAL INFO * * This file contains all processing regarding fc_rports. It contains the * rport state machine and does all rport interaction with the transport class. * There should be no other places in libfc that interact directly with the * transport class in regards to adding and deleting rports. * * fc_rport's represent N_Port's within the fabric. */ /* * RPORT LOCKING * * The rport should never hold the rport mutex and then attempt to acquire * either the lport or disc mutexes. The rport's mutex is considered lesser * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for * more comments on the hierarchy. * * The locking strategy is similar to the lport's strategy. The lock protects * the rport's states and is held and released by the entry points to the rport * block. All _enter_* functions correspond to rport states and expect the rport * mutex to be locked before calling them. This means that rports only handle * one request or response at a time, since they're not critical for the I/O * path this potential over-use of the mutex is acceptable. */ #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/rcupdate.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/export.h> #include <asm/unaligned.h> #include <scsi/libfc.h> #include <scsi/fc_encode.h> #include "fc_libfc.h" static struct workqueue_struct *rport_event_queue; static void fc_rport_enter_flogi(struct fc_rport_priv *); static void fc_rport_enter_plogi(struct fc_rport_priv *); static void fc_rport_enter_prli(struct fc_rport_priv *); static void fc_rport_enter_rtv(struct fc_rport_priv *); static void fc_rport_enter_ready(struct fc_rport_priv *); static void fc_rport_enter_logo(struct fc_rport_priv *); static void fc_rport_enter_adisc(struct fc_rport_priv *); static void fc_rport_recv_plogi_req(struct fc_lport *, struct fc_frame *); static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *); static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *); static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *); static void fc_rport_timeout(struct work_struct *); static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *); static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *); static void fc_rport_work(struct work_struct *); static const char *fc_rport_state_names[] = { [RPORT_ST_INIT] = "Init", [RPORT_ST_FLOGI] = "FLOGI", [RPORT_ST_PLOGI_WAIT] = "PLOGI_WAIT", [RPORT_ST_PLOGI] = "PLOGI", [RPORT_ST_PRLI] = "PRLI", [RPORT_ST_RTV] = "RTV", [RPORT_ST_READY] = "Ready", [RPORT_ST_ADISC] = "ADISC", [RPORT_ST_DELETE] = "Delete", }; /** * fc_rport_lookup() - Lookup a remote port by port_id * @lport: The local port to lookup the remote port on * @port_id: The remote port ID to look up * * The caller must hold either disc_mutex or rcu_read_lock(). */ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, u32 port_id) { struct fc_rport_priv *rdata; list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) if (rdata->ids.port_id == port_id) return rdata; return NULL; } /** * fc_rport_create() - Create a new remote port * @lport: The local port this remote port will be associated with * @ids: The identifiers for the new remote port * * The remote port will start in the INIT state. * * Locking note: must be called with the disc_mutex held. */ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) { struct fc_rport_priv *rdata; rdata = lport->tt.rport_lookup(lport, port_id); if (rdata) return rdata; rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL); if (!rdata) return NULL; rdata->ids.node_name = -1; rdata->ids.port_name = -1; rdata->ids.port_id = port_id; rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; kref_init(&rdata->kref); mutex_init(&rdata->rp_mutex); rdata->local_port = lport; rdata->rp_state = RPORT_ST_INIT; rdata->event = RPORT_EV_NONE; rdata->flags = FC_RP_FLAGS_REC_SUPPORTED; rdata->e_d_tov = lport->e_d_tov; rdata->r_a_tov = lport->r_a_tov; rdata->maxframe_size = FC_MIN_MAX_PAYLOAD; INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout); INIT_WORK(&rdata->event_work, fc_rport_work); if (port_id != FC_FID_DIR_SERV) { rdata->lld_event_callback = lport->tt.rport_event_callback; list_add_rcu(&rdata->peers, &lport->disc.rports); } return rdata; } /** * fc_rport_destroy() - Free a remote port after last reference is released * @kref: The remote port's kref */ static void fc_rport_destroy(struct kref *kref) { struct fc_rport_priv *rdata; rdata = container_of(kref, struct fc_rport_priv, kref); kfree_rcu(rdata, rcu); } /** * fc_rport_state() - Return a string identifying the remote port's state * @rdata: The remote port */ static const char *fc_rport_state(struct fc_rport_priv *rdata) { const char *cp; cp = fc_rport_state_names[rdata->rp_state]; if (!cp) cp = "Unknown"; return cp; } /** * fc_set_rport_loss_tmo() - Set the remote port loss timeout * @rport: The remote port that gets a new timeout value * @timeout: The new timeout value (in seconds) */ void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) { if (timeout) rport->dev_loss_tmo = timeout; else rport->dev_loss_tmo = 1; } EXPORT_SYMBOL(fc_set_rport_loss_tmo); /** * fc_plogi_get_maxframe() - Get the maximum payload from the common service * parameters in a FLOGI frame * @flp: The FLOGI or PLOGI payload * @maxval: The maximum frame size upper limit; this may be less than what * is in the service parameters */ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval) { unsigned int mfs; /* * Get max payload from the common service parameters and the * class 3 receive data field size. */ mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK; if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval) maxval = mfs; mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs); if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval) maxval = mfs; return maxval; } /** * fc_rport_state_enter() - Change the state of a remote port * @rdata: The remote port whose state should change * @new: The new state * * Locking Note: Called with the rport lock held */ static void fc_rport_state_enter(struct fc_rport_priv *rdata, enum fc_rport_state new) { if (rdata->rp_state != new) rdata->retries = 0; rdata->rp_state = new; } /** * fc_rport_work() - Handler for remote port events in the rport_event_queue * @work: Handle to the remote port being dequeued */ static void fc_rport_work(struct work_struct *work) { u32 port_id; struct fc_rport_priv *rdata = container_of(work, struct fc_rport_priv, event_work); struct fc_rport_libfc_priv *rpriv; enum fc_rport_event event; struct fc_lport *lport = rdata->local_port; struct fc_rport_operations *rport_ops; struct fc_rport_identifiers ids; struct fc_rport *rport; struct fc4_prov *prov; u8 type; mutex_lock(&rdata->rp_mutex); event = rdata->event; rport_ops = rdata->ops; rport = rdata->rport; FC_RPORT_DBG(rdata, "work event %u\n", event); switch (event) { case RPORT_EV_READY: ids = rdata->ids; rdata->event = RPORT_EV_NONE; rdata->major_retries = 0; kref_get(&rdata->kref); mutex_unlock(&rdata->rp_mutex); if (!rport) rport = fc_remote_port_add(lport->host, 0, &ids); if (!rport) { FC_RPORT_DBG(rdata, "Failed to add the rport\n"); lport->tt.rport_logoff(rdata); kref_put(&rdata->kref, lport->tt.rport_destroy); return; } mutex_lock(&rdata->rp_mutex); if (rdata->rport) FC_RPORT_DBG(rdata, "rport already allocated\n"); rdata->rport = rport; rport->maxframe_size = rdata->maxframe_size; rport->supported_classes = rdata->supported_classes; rpriv = rport->dd_data; rpriv->local_port = lport; rpriv->rp_state = rdata->rp_state; rpriv->flags = rdata->flags; rpriv->e_d_tov = rdata->e_d_tov; rpriv->r_a_tov = rdata->r_a_tov; mutex_unlock(&rdata->rp_mutex); if (rport_ops && rport_ops->event_callback) { FC_RPORT_DBG(rdata, "callback ev %d\n", event); rport_ops->event_callback(lport, rdata, event); } if (rdata->lld_event_callback) { FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); rdata->lld_event_callback(lport, rdata, event); } kref_put(&rdata->kref, lport->tt.rport_destroy); break; case RPORT_EV_FAILED: case RPORT_EV_LOGO: case RPORT_EV_STOP: if (rdata->prli_count) { mutex_lock(&fc_prov_mutex); for (type = 1; type < FC_FC4_PROV_SIZE; type++) { prov = fc_passive_prov[type]; if (prov && prov->prlo) prov->prlo(rdata); } mutex_unlock(&fc_prov_mutex); } port_id = rdata->ids.port_id; mutex_unlock(&rdata->rp_mutex); if (rport_ops && rport_ops->event_callback) { FC_RPORT_DBG(rdata, "callback ev %d\n", event); rport_ops->event_callback(lport, rdata, event); } if (rdata->lld_event_callback) { FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); rdata->lld_event_callback(lport, rdata, event); } cancel_delayed_work_sync(&rdata->retry_work); /* * Reset any outstanding exchanges before freeing rport. */ lport->tt.exch_mgr_reset(lport, 0, port_id); lport->tt.exch_mgr_reset(lport, port_id, 0); if (rport) { rpriv = rport->dd_data; rpriv->rp_state = RPORT_ST_DELETE; mutex_lock(&rdata->rp_mutex); rdata->rport = NULL; mutex_unlock(&rdata->rp_mutex); fc_remote_port_delete(rport); } mutex_lock(&lport->disc.disc_mutex); mutex_lock(&rdata->rp_mutex); if (rdata->rp_state == RPORT_ST_DELETE) { if (port_id == FC_FID_DIR_SERV) { rdata->event = RPORT_EV_NONE; mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, lport->tt.rport_destroy); } else if ((rdata->flags & FC_RP_STARTED) && rdata->major_retries < lport->max_rport_retry_count) { rdata->major_retries++; rdata->event = RPORT_EV_NONE; FC_RPORT_DBG(rdata, "work restart\n"); fc_rport_enter_flogi(rdata); mutex_unlock(&rdata->rp_mutex); } else { FC_RPORT_DBG(rdata, "work delete\n"); list_del_rcu(&rdata->peers); mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, lport->tt.rport_destroy); } } else { /* * Re-open for events. Reissue READY event if ready. */ rdata->event = RPORT_EV_NONE; if (rdata->rp_state == RPORT_ST_READY) fc_rport_enter_ready(rdata); mutex_unlock(&rdata->rp_mutex); } mutex_unlock(&lport->disc.disc_mutex); break; default: mutex_unlock(&rdata->rp_mutex); break; } } /** * fc_rport_login() - Start the remote port login state machine * @rdata: The remote port to be logged in to * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* * function and then unlock the rport. * * This indicates the intent to be logged into the remote port. * If it appears we are already logged in, ADISC is used to verify * the setup. */ static int fc_rport_login(struct fc_rport_priv *rdata) { mutex_lock(&rdata->rp_mutex); rdata->flags |= FC_RP_STARTED; switch (rdata->rp_state) { case RPORT_ST_READY: FC_RPORT_DBG(rdata, "ADISC port\n"); fc_rport_enter_adisc(rdata); break; case RPORT_ST_DELETE: FC_RPORT_DBG(rdata, "Restart deleted port\n"); break; default: FC_RPORT_DBG(rdata, "Login to port\n"); fc_rport_enter_flogi(rdata); break; } mutex_unlock(&rdata->rp_mutex); return 0; } /** * fc_rport_enter_delete() - Schedule a remote port to be deleted * @rdata: The remote port to be deleted * @event: The event to report as the reason for deletion * * Locking Note: Called with the rport lock held. * * Allow state change into DELETE only once. * * Call queue_work only if there's no event already pending. * Set the new event so that the old pending event will not occur. * Since we have the mutex, even if fc_rport_work() is already started, * it'll see the new event. */ static void fc_rport_enter_delete(struct fc_rport_priv *rdata, enum fc_rport_event event) { if (rdata->rp_state == RPORT_ST_DELETE) return; FC_RPORT_DBG(rdata, "Delete port\n"); fc_rport_state_enter(rdata, RPORT_ST_DELETE); if (rdata->event == RPORT_EV_NONE) queue_work(rport_event_queue, &rdata->event_work); rdata->event = event; } /** * fc_rport_logoff() - Logoff and remove a remote port * @rdata: The remote port to be logged off of * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* * function and then unlock the rport. */ static int fc_rport_logoff(struct fc_rport_priv *rdata) { mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Remove port\n"); rdata->flags &= ~FC_RP_STARTED; if (rdata->rp_state == RPORT_ST_DELETE) { FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n"); goto out; } fc_rport_enter_logo(rdata); /* * Change the state to Delete so that we discard * the response. */ fc_rport_enter_delete(rdata, RPORT_EV_STOP); out: mutex_unlock(&rdata->rp_mutex); return 0; } /** * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state * @rdata: The remote port that is ready * * Locking Note: The rport lock is expected to be held before calling * this routine. */ static void fc_rport_enter_ready(struct fc_rport_priv *rdata) { fc_rport_state_enter(rdata, RPORT_ST_READY); FC_RPORT_DBG(rdata, "Port is Ready\n"); if (rdata->event == RPORT_EV_NONE) queue_work(rport_event_queue, &rdata->event_work); rdata->event = RPORT_EV_READY; } /** * fc_rport_timeout() - Handler for the retry_work timer * @work: Handle to the remote port that has timed out * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* * function and then unlock the rport. */ static void fc_rport_timeout(struct work_struct *work) { struct fc_rport_priv *rdata = container_of(work, struct fc_rport_priv, retry_work.work); mutex_lock(&rdata->rp_mutex); switch (rdata->rp_state) { case RPORT_ST_FLOGI: fc_rport_enter_flogi(rdata); break; case RPORT_ST_PLOGI: fc_rport_enter_plogi(rdata); break; case RPORT_ST_PRLI: fc_rport_enter_prli(rdata); break; case RPORT_ST_RTV: fc_rport_enter_rtv(rdata); break; case RPORT_ST_ADISC: fc_rport_enter_adisc(rdata); break; case RPORT_ST_PLOGI_WAIT: case RPORT_ST_READY: case RPORT_ST_INIT: case RPORT_ST_DELETE: break; } mutex_unlock(&rdata->rp_mutex); } /** * fc_rport_error() - Error handler, called once retries have been exhausted * @rdata: The remote port the error is happened on * @fp: The error code encapsulated in a frame pointer * * Locking Note: The rport lock is expected to be held before * calling this routine */ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) { FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n", IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_rport_state(rdata), rdata->retries); switch (rdata->rp_state) { case RPORT_ST_FLOGI: case RPORT_ST_PLOGI: rdata->flags &= ~FC_RP_STARTED; fc_rport_enter_delete(rdata, RPORT_EV_FAILED); break; case RPORT_ST_RTV: fc_rport_enter_ready(rdata); break; case RPORT_ST_PRLI: case RPORT_ST_ADISC: fc_rport_enter_logo(rdata); break; case RPORT_ST_PLOGI_WAIT: case RPORT_ST_DELETE: case RPORT_ST_READY: case RPORT_ST_INIT: break; } } /** * fc_rport_error_retry() - Handler for remote port state retries * @rdata: The remote port whose state is to be retried * @fp: The error code encapsulated in a frame pointer * * If the error was an exchange timeout retry immediately, * otherwise wait for E_D_TOV. * * Locking Note: The rport lock is expected to be held before * calling this routine */ static void fc_rport_error_retry(struct fc_rport_priv *rdata, struct fc_frame *fp) { unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV); /* make sure this isn't an FC_EX_CLOSED error, never retry those */ if (PTR_ERR(fp) == -FC_EX_CLOSED) goto out; if (rdata->retries < rdata->local_port->max_rport_retry_count) { FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n", PTR_ERR(fp), fc_rport_state(rdata)); rdata->retries++; /* no additional delay on exchange timeouts */ if (PTR_ERR(fp) == -FC_EX_TIMEOUT) delay = 0; schedule_delayed_work(&rdata->retry_work, delay); return; } out: fc_rport_error(rdata, fp); } /** * fc_rport_login_complete() - Handle parameters and completion of p-mp login. * @rdata: The remote port which we logged into or which logged into us. * @fp: The FLOGI or PLOGI request or response frame * * Returns non-zero error if a problem is detected with the frame. * Does not free the frame. * * This is only used in point-to-multipoint mode for FIP currently. */ static int fc_rport_login_complete(struct fc_rport_priv *rdata, struct fc_frame *fp) { struct fc_lport *lport = rdata->local_port; struct fc_els_flogi *flogi; unsigned int e_d_tov; u16 csp_flags; flogi = fc_frame_payload_get(fp, sizeof(*flogi)); if (!flogi) return -EINVAL; csp_flags = ntohs(flogi->fl_csp.sp_features); if (fc_frame_payload_op(fp) == ELS_FLOGI) { if (csp_flags & FC_SP_FT_FPORT) { FC_RPORT_DBG(rdata, "Fabric bit set in FLOGI\n"); return -EINVAL; } } else { /* * E_D_TOV is not valid on an incoming FLOGI request. */ e_d_tov = ntohl(flogi->fl_csp.sp_e_d_tov); if (csp_flags & FC_SP_FT_EDTR) e_d_tov /= 1000000; if (e_d_tov > rdata->e_d_tov) rdata->e_d_tov = e_d_tov; } rdata->maxframe_size = fc_plogi_get_maxframe(flogi, lport->mfs); return 0; } /** * fc_rport_flogi_resp() - Handle response to FLOGI request for p-mp mode * @sp: The sequence that the FLOGI was on * @fp: The FLOGI response frame * @rp_arg: The remote port that received the FLOGI response */ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, void *rp_arg) { struct fc_rport_priv *rdata = rp_arg; struct fc_lport *lport = rdata->local_port; struct fc_els_flogi *flogi; unsigned int r_a_tov; FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp)); if (fp == ERR_PTR(-FC_EX_CLOSED)) goto put; mutex_lock(&rdata->rp_mutex); if (rdata->rp_state != RPORT_ST_FLOGI) { FC_RPORT_DBG(rdata, "Received a FLOGI response, but in state " "%s\n", fc_rport_state(rdata)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_rport_error(rdata, fp); goto err; } if (fc_frame_payload_op(fp) != ELS_LS_ACC) goto bad; if (fc_rport_login_complete(rdata, fp)) goto bad; flogi = fc_frame_payload_get(fp, sizeof(*flogi)); if (!flogi) goto bad; r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov); if (r_a_tov > rdata->r_a_tov) rdata->r_a_tov = r_a_tov; if (rdata->ids.port_name < lport->wwpn) fc_rport_enter_plogi(rdata); else fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); out: fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); put: kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); return; bad: FC_RPORT_DBG(rdata, "Bad FLOGI response\n"); fc_rport_error_retry(rdata, fp); goto out; } /** * fc_rport_enter_flogi() - Send a FLOGI request to the remote port for p-mp * @rdata: The remote port to send a FLOGI to * * Locking Note: The rport lock is expected to be held before calling * this routine. */ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; if (!lport->point_to_multipoint) return fc_rport_enter_plogi(rdata); FC_RPORT_DBG(rdata, "Entered FLOGI state from %s state\n", fc_rport_state(rdata)); fc_rport_state_enter(rdata, RPORT_ST_FLOGI); fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); if (!fp) return fc_rport_error_retry(rdata, fp); if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI, fc_rport_flogi_resp, rdata, 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** * fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode * @lport: The local port that received the PLOGI request * @rx_fp: The PLOGI request frame */ static void fc_rport_recv_flogi_req(struct fc_lport *lport, struct fc_frame *rx_fp) { struct fc_disc *disc; struct fc_els_flogi *flp; struct fc_rport_priv *rdata; struct fc_frame *fp = rx_fp; struct fc_seq_els_data rjt_data; u32 sid; sid = fc_frame_sid(fp); FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n"); disc = &lport->disc; mutex_lock(&disc->disc_mutex); if (!lport->point_to_multipoint) { rjt_data.reason = ELS_RJT_UNSUP; rjt_data.explan = ELS_EXPL_NONE; goto reject; } flp = fc_frame_payload_get(fp, sizeof(*flp)); if (!flp) { rjt_data.reason = ELS_RJT_LOGIC; rjt_data.explan = ELS_EXPL_INV_LEN; goto reject; } rdata = lport->tt.rport_lookup(lport, sid); if (!rdata) { rjt_data.reason = ELS_RJT_FIP; rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR; goto reject; } mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Received FLOGI in %s state\n", fc_rport_state(rdata)); switch (rdata->rp_state) { case RPORT_ST_INIT: /* * If received the FLOGI request on RPORT which is INIT state * (means not transition to FLOGI either fc_rport timeout * function didn;t trigger or this end hasn;t received * beacon yet from other end. In that case only, allow RPORT * state machine to continue, otherwise fall through which * causes the code to send reject response. * NOTE; Not checking for FIP->state such as VNMP_UP or * VNMP_CLAIM because if FIP state is not one of those, * RPORT wouldn;t have created and 'rport_lookup' would have * failed anyway in that case. */ if (lport->point_to_multipoint) break; case RPORT_ST_DELETE: mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_FIP; rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR; goto reject; case RPORT_ST_FLOGI: case RPORT_ST_PLOGI_WAIT: case RPORT_ST_PLOGI: break; case RPORT_ST_PRLI: case RPORT_ST_RTV: case RPORT_ST_READY: case RPORT_ST_ADISC: /* * Set the remote port to be deleted and to then restart. * This queues work to be sure exchanges are reset. */ fc_rport_enter_delete(rdata, RPORT_EV_LOGO); mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_BUSY; rjt_data.explan = ELS_EXPL_NONE; goto reject; } if (fc_rport_login_complete(rdata, fp)) { mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_LOGIC; rjt_data.explan = ELS_EXPL_NONE; goto reject; } fp = fc_frame_alloc(lport, sizeof(*flp)); if (!fp) goto out; fc_flogi_fill(lport, fp); flp = fc_frame_payload_get(fp, sizeof(*flp)); flp->fl_cmd = ELS_LS_ACC; fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); if (rdata->ids.port_name < lport->wwpn) fc_rport_enter_plogi(rdata); else fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); out: mutex_unlock(&rdata->rp_mutex); mutex_unlock(&disc->disc_mutex); fc_frame_free(rx_fp); return; reject: mutex_unlock(&disc->disc_mutex); lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); fc_frame_free(rx_fp); } /** * fc_rport_plogi_resp() - Handler for ELS PLOGI responses * @sp: The sequence the PLOGI is on * @fp: The PLOGI response frame * @rdata_arg: The remote port that sent the PLOGI response * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error * and then unlock the rport. */ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, void *rdata_arg) { struct fc_rport_priv *rdata = rdata_arg; struct fc_lport *lport = rdata->local_port; struct fc_els_flogi *plp = NULL; u16 csp_seq; u16 cssp_seq; u8 op; mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp)); if (rdata->rp_state != RPORT_ST_PLOGI) { FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state " "%s\n", fc_rport_state(rdata)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_rport_error_retry(rdata, fp); goto err; } op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC && (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) { rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn); rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn); /* save plogi response sp_features for further reference */ rdata->sp_features = ntohs(plp->fl_csp.sp_features); if (lport->point_to_multipoint) fc_rport_login_complete(rdata, fp); csp_seq = ntohs(plp->fl_csp.sp_tot_seq); cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq); if (cssp_seq < csp_seq) csp_seq = cssp_seq; rdata->max_seq = csp_seq; rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs); fc_rport_enter_prli(rdata); } else fc_rport_error_retry(rdata, fp); out: fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } /** * fc_rport_enter_plogi() - Send Port Login (PLOGI) request * @rdata: The remote port to send a PLOGI to * * Locking Note: The rport lock is expected to be held before calling * this routine. */ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n", fc_rport_state(rdata)); fc_rport_state_enter(rdata, RPORT_ST_PLOGI); rdata->maxframe_size = FC_MIN_MAX_PAYLOAD; fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); if (!fp) { FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__); fc_rport_error_retry(rdata, fp); return; } rdata->e_d_tov = lport->e_d_tov; if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI, fc_rport_plogi_resp, rdata, 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** * fc_rport_prli_resp() - Process Login (PRLI) response handler * @sp: The sequence the PRLI response was on * @fp: The PRLI response frame * @rdata_arg: The remote port that sent the PRLI response * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error * and then unlock the rport. */ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, void *rdata_arg) { struct fc_rport_priv *rdata = rdata_arg; struct { struct fc_els_prli prli; struct fc_els_spp spp; } *pp; struct fc_els_spp temp_spp; struct fc4_prov *prov; u32 roles = FC_RPORT_ROLE_UNKNOWN; u32 fcp_parm = 0; u8 op; u8 resp_code = 0; mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp)); if (rdata->rp_state != RPORT_ST_PRLI) { FC_RPORT_DBG(rdata, "Received a PRLI response, but in state " "%s\n", fc_rport_state(rdata)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_rport_error_retry(rdata, fp); goto err; } /* reinitialize remote port roles */ rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC) { pp = fc_frame_payload_get(fp, sizeof(*pp)); if (!pp) goto out; resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK); FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n", pp->spp.spp_flags); rdata->spp_type = pp->spp.spp_type; if (resp_code != FC_SPP_RESP_ACK) { if (resp_code == FC_SPP_RESP_CONF) fc_rport_error(rdata, fp); else fc_rport_error_retry(rdata, fp); goto out; } if (pp->prli.prli_spp_len < sizeof(pp->spp)) goto out; fcp_parm = ntohl(pp->spp.spp_params); if (fcp_parm & FCP_SPPF_RETRY) rdata->flags |= FC_RP_FLAGS_RETRY; if (fcp_parm & FCP_SPPF_CONF_COMPL) rdata->flags |= FC_RP_FLAGS_CONF_REQ; prov = fc_passive_prov[FC_TYPE_FCP]; if (prov) { memset(&temp_spp, 0, sizeof(temp_spp)); prov->prli(rdata, pp->prli.prli_spp_len, &pp->spp, &temp_spp); } rdata->supported_classes = FC_COS_CLASS3; if (fcp_parm & FCP_SPPF_INIT_FCN) roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (fcp_parm & FCP_SPPF_TARG_FCN) roles |= FC_RPORT_ROLE_FCP_TARGET; rdata->ids.roles = roles; fc_rport_enter_rtv(rdata); } else { FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n"); fc_rport_error_retry(rdata, fp); } out: fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } /** * fc_rport_enter_prli() - Send Process Login (PRLI) request * @rdata: The remote port to send the PRLI request to * * Locking Note: The rport lock is expected to be held before calling * this routine. */ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) { struct fc_lport *lport = rdata->local_port; struct { struct fc_els_prli prli; struct fc_els_spp spp; } *pp; struct fc_frame *fp; struct fc4_prov *prov; /* * If the rport is one of the well known addresses * we skip PRLI and RTV and go straight to READY. */ if (rdata->ids.port_id >= FC_FID_DOM_MGR) { fc_rport_enter_ready(rdata); return; } FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n", fc_rport_state(rdata)); fc_rport_state_enter(rdata, RPORT_ST_PRLI); fp = fc_frame_alloc(lport, sizeof(*pp)); if (!fp) { fc_rport_error_retry(rdata, fp); return; } fc_prli_fill(lport, fp); prov = fc_passive_prov[FC_TYPE_FCP]; if (prov) { pp = fc_frame_payload_get(fp, sizeof(*pp)); prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp); } fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rdata->ids.port_id, fc_host_port_id(lport->host), FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp, NULL, rdata, 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses * @sp: The sequence the RTV was on * @fp: The RTV response frame * @rdata_arg: The remote port that sent the RTV response * * Many targets don't seem to support this. * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error * and then unlock the rport. */ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, void *rdata_arg) { struct fc_rport_priv *rdata = rdata_arg; u8 op; mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp)); if (rdata->rp_state != RPORT_ST_RTV) { FC_RPORT_DBG(rdata, "Received a RTV response, but in state " "%s\n", fc_rport_state(rdata)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_rport_error(rdata, fp); goto err; } op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC) { struct fc_els_rtv_acc *rtv; u32 toq; u32 tov; rtv = fc_frame_payload_get(fp, sizeof(*rtv)); if (rtv) { toq = ntohl(rtv->rtv_toq); tov = ntohl(rtv->rtv_r_a_tov); if (tov == 0) tov = 1; rdata->r_a_tov = tov; tov = ntohl(rtv->rtv_e_d_tov); if (toq & FC_ELS_RTV_EDRES) tov /= 1000000; if (tov == 0) tov = 1; rdata->e_d_tov = tov; } } fc_rport_enter_ready(rdata); out: fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } /** * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request * @rdata: The remote port to send the RTV request to * * Locking Note: The rport lock is expected to be held before calling * this routine. */ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) { struct fc_frame *fp; struct fc_lport *lport = rdata->local_port; FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n", fc_rport_state(rdata)); fc_rport_state_enter(rdata, RPORT_ST_RTV); fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv)); if (!fp) { fc_rport_error_retry(rdata, fp); return; } if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV, fc_rport_rtv_resp, rdata, 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** * fc_rport_logo_resp() - Handler for logout (LOGO) responses * @sp: The sequence the LOGO was on * @fp: The LOGO response frame * @lport_arg: The local port */ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, void *lport_arg) { struct fc_lport *lport = lport_arg; FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did, "Received a LOGO %s\n", fc_els_resp_type(fp)); if (IS_ERR(fp)) return; fc_frame_free(fp); } /** * fc_rport_enter_logo() - Send a logout (LOGO) request * @rdata: The remote port to send the LOGO request to * * Locking Note: The rport lock is expected to be held before calling * this routine. */ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; FC_RPORT_DBG(rdata, "Port sending LOGO from %s state\n", fc_rport_state(rdata)); fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo)); if (!fp) return; (void)lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, fc_rport_logo_resp, lport, 0); } /** * fc_rport_els_adisc_resp() - Handler for Address Discovery (ADISC) responses * @sp: The sequence the ADISC response was on * @fp: The ADISC response frame * @rdata_arg: The remote port that sent the ADISC response * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error * and then unlock the rport. */ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp, void *rdata_arg) { struct fc_rport_priv *rdata = rdata_arg; struct fc_els_adisc *adisc; u8 op; mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Received a ADISC response\n"); if (rdata->rp_state != RPORT_ST_ADISC) { FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n", fc_rport_state(rdata)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_rport_error(rdata, fp); goto err; } /* * If address verification failed. Consider us logged out of the rport. * Since the rport is still in discovery, we want to be * logged in, so go to PLOGI state. Otherwise, go back to READY. */ op = fc_frame_payload_op(fp); adisc = fc_frame_payload_get(fp, sizeof(*adisc)); if (op != ELS_LS_ACC || !adisc || ntoh24(adisc->adisc_port_id) != rdata->ids.port_id || get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name || get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) { FC_RPORT_DBG(rdata, "ADISC error or mismatch\n"); fc_rport_enter_flogi(rdata); } else { FC_RPORT_DBG(rdata, "ADISC OK\n"); fc_rport_enter_ready(rdata); } out: fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } /** * fc_rport_enter_adisc() - Send Address Discover (ADISC) request * @rdata: The remote port to send the ADISC request to * * Locking Note: The rport lock is expected to be held before calling * this routine. */ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; FC_RPORT_DBG(rdata, "sending ADISC from %s state\n", fc_rport_state(rdata)); fc_rport_state_enter(rdata, RPORT_ST_ADISC); fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc)); if (!fp) { fc_rport_error_retry(rdata, fp); return; } if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC, fc_rport_adisc_resp, rdata, 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests * @rdata: The remote port that sent the ADISC request * @in_fp: The ADISC request frame * * Locking Note: Called with the lport and rport locks held. */ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata, struct fc_frame *in_fp) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; struct fc_els_adisc *adisc; struct fc_seq_els_data rjt_data; FC_RPORT_DBG(rdata, "Received ADISC request\n"); adisc = fc_frame_payload_get(in_fp, sizeof(*adisc)); if (!adisc) { rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); goto drop; } fp = fc_frame_alloc(lport, sizeof(*adisc)); if (!fp) goto drop; fc_adisc_fill(lport, fp); adisc = fc_frame_payload_get(fp, sizeof(*adisc)); adisc->adisc_cmd = ELS_LS_ACC; fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); drop: fc_frame_free(in_fp); } /** * fc_rport_recv_rls_req() - Handle received Read Link Status request * @rdata: The remote port that sent the RLS request * @rx_fp: The PRLI request frame * * Locking Note: The rport lock is expected to be held before calling * this function. */ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, struct fc_frame *rx_fp) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; struct fc_els_rls *rls; struct fc_els_rls_resp *rsp; struct fc_els_lesb *lesb; struct fc_seq_els_data rjt_data; struct fc_host_statistics *hst; FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n", fc_rport_state(rdata)); rls = fc_frame_payload_get(rx_fp, sizeof(*rls)); if (!rls) { rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; goto out_rjt; } fp = fc_frame_alloc(lport, sizeof(*rsp)); if (!fp) { rjt_data.reason = ELS_RJT_UNAB; rjt_data.explan = ELS_EXPL_INSUF_RES; goto out_rjt; } rsp = fc_frame_payload_get(fp, sizeof(*rsp)); memset(rsp, 0, sizeof(*rsp)); rsp->rls_cmd = ELS_LS_ACC; lesb = &rsp->rls_lesb; if (lport->tt.get_lesb) { /* get LESB from LLD if it supports it */ lport->tt.get_lesb(lport, lesb); } else { fc_get_host_stats(lport->host); hst = &lport->host_stats; lesb->lesb_link_fail = htonl(hst->link_failure_count); lesb->lesb_sync_loss = htonl(hst->loss_of_sync_count); lesb->lesb_sig_loss = htonl(hst->loss_of_signal_count); lesb->lesb_prim_err = htonl(hst->prim_seq_protocol_err_count); lesb->lesb_inv_word = htonl(hst->invalid_tx_word_count); lesb->lesb_inv_crc = htonl(hst->invalid_crc_count); } fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); goto out; out_rjt: lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); out: fc_frame_free(rx_fp); } /** * fc_rport_recv_els_req() - Handler for validated ELS requests * @lport: The local port that received the ELS request * @fp: The ELS request frame * * Handle incoming ELS requests that require port login. * The ELS opcode has already been validated by the caller. * * Locking Note: Called with the lport lock held. */ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_rport_priv *rdata; struct fc_seq_els_data els_data; mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp)); if (!rdata) { mutex_unlock(&lport->disc.disc_mutex); goto reject; } mutex_lock(&rdata->rp_mutex); mutex_unlock(&lport->disc.disc_mutex); switch (rdata->rp_state) { case RPORT_ST_PRLI: case RPORT_ST_RTV: case RPORT_ST_READY: case RPORT_ST_ADISC: break; default: mutex_unlock(&rdata->rp_mutex); goto reject; } switch (fc_frame_payload_op(fp)) { case ELS_PRLI: fc_rport_recv_prli_req(rdata, fp); break; case ELS_PRLO: fc_rport_recv_prlo_req(rdata, fp); break; case ELS_ADISC: fc_rport_recv_adisc_req(rdata, fp); break; case ELS_RRQ: lport->tt.seq_els_rsp_send(fp, ELS_RRQ, NULL); fc_frame_free(fp); break; case ELS_REC: lport->tt.seq_els_rsp_send(fp, ELS_REC, NULL); fc_frame_free(fp); break; case ELS_RLS: fc_rport_recv_rls_req(rdata, fp); break; default: fc_frame_free(fp); /* can't happen */ break; } mutex_unlock(&rdata->rp_mutex); return; reject: els_data.reason = ELS_RJT_UNAB; els_data.explan = ELS_EXPL_PLOGI_REQD; lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); fc_frame_free(fp); } /** * fc_rport_recv_req() - Handler for requests * @lport: The local port that received the request * @fp: The request frame * * Locking Note: Called with the lport lock held. */ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_seq_els_data els_data; /* * Handle FLOGI, PLOGI and LOGO requests separately, since they * don't require prior login. * Check for unsupported opcodes first and reject them. * For some ops, it would be incorrect to reject with "PLOGI required". */ switch (fc_frame_payload_op(fp)) { case ELS_FLOGI: fc_rport_recv_flogi_req(lport, fp); break; case ELS_PLOGI: fc_rport_recv_plogi_req(lport, fp); break; case ELS_LOGO: fc_rport_recv_logo_req(lport, fp); break; case ELS_PRLI: case ELS_PRLO: case ELS_ADISC: case ELS_RRQ: case ELS_REC: case ELS_RLS: fc_rport_recv_els_req(lport, fp); break; default: els_data.reason = ELS_RJT_UNSUP; els_data.explan = ELS_EXPL_NONE; lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); fc_frame_free(fp); break; } } /** * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests * @lport: The local port that received the PLOGI request * @rx_fp: The PLOGI request frame * * Locking Note: The rport lock is held before calling this function. */ static void fc_rport_recv_plogi_req(struct fc_lport *lport, struct fc_frame *rx_fp) { struct fc_disc *disc; struct fc_rport_priv *rdata; struct fc_frame *fp = rx_fp; struct fc_els_flogi *pl; struct fc_seq_els_data rjt_data; u32 sid; sid = fc_frame_sid(fp); FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n"); pl = fc_frame_payload_get(fp, sizeof(*pl)); if (!pl) { FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n"); rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; goto reject; } disc = &lport->disc; mutex_lock(&disc->disc_mutex); rdata = lport->tt.rport_create(lport, sid); if (!rdata) { mutex_unlock(&disc->disc_mutex); rjt_data.reason = ELS_RJT_UNAB; rjt_data.explan = ELS_EXPL_INSUF_RES; goto reject; } mutex_lock(&rdata->rp_mutex); mutex_unlock(&disc->disc_mutex); rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn); rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn); /* * If the rport was just created, possibly due to the incoming PLOGI, * set the state appropriately and accept the PLOGI. * * If we had also sent a PLOGI, and if the received PLOGI is from a * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason * "command already in progress". * * XXX TBD: If the session was ready before, the PLOGI should result in * all outstanding exchanges being reset. */ switch (rdata->rp_state) { case RPORT_ST_INIT: FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n"); break; case RPORT_ST_PLOGI_WAIT: FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI_WAIT state\n"); break; case RPORT_ST_PLOGI: FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n"); if (rdata->ids.port_name < lport->wwpn) { mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_INPROG; rjt_data.explan = ELS_EXPL_NONE; goto reject; } break; case RPORT_ST_PRLI: case RPORT_ST_RTV: case RPORT_ST_READY: case RPORT_ST_ADISC: FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d " "- ignored for now\n", rdata->rp_state); /* XXX TBD - should reset */ break; case RPORT_ST_FLOGI: case RPORT_ST_DELETE: FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n", fc_rport_state(rdata)); mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_BUSY; rjt_data.explan = ELS_EXPL_NONE; goto reject; } /* * Get session payload size from incoming PLOGI. */ rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs); /* * Send LS_ACC. If this fails, the originator should retry. */ fp = fc_frame_alloc(lport, sizeof(*pl)); if (!fp) goto out; fc_plogi_fill(lport, fp, ELS_LS_ACC); fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); fc_rport_enter_prli(rdata); out: mutex_unlock(&rdata->rp_mutex); fc_frame_free(rx_fp); return; reject: lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); fc_frame_free(fp); } /** * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests * @rdata: The remote port that sent the PRLI request * @rx_fp: The PRLI request frame * * Locking Note: The rport lock is exected to be held before calling * this function. */ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, struct fc_frame *rx_fp) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; struct { struct fc_els_prli prli; struct fc_els_spp spp; } *pp; struct fc_els_spp *rspp; /* request service param page */ struct fc_els_spp *spp; /* response spp */ unsigned int len; unsigned int plen; enum fc_els_spp_resp resp; enum fc_els_spp_resp passive; struct fc_seq_els_data rjt_data; struct fc4_prov *prov; FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n", fc_rport_state(rdata)); len = fr_len(rx_fp) - sizeof(struct fc_frame_header); pp = fc_frame_payload_get(rx_fp, sizeof(*pp)); if (!pp) goto reject_len; plen = ntohs(pp->prli.prli_len); if ((plen % 4) != 0 || plen > len || plen < 16) goto reject_len; if (plen < len) len = plen; plen = pp->prli.prli_spp_len; if ((plen % 4) != 0 || plen < sizeof(*spp) || plen > len || len < sizeof(*pp) || plen < 12) goto reject_len; rspp = &pp->spp; fp = fc_frame_alloc(lport, len); if (!fp) { rjt_data.reason = ELS_RJT_UNAB; rjt_data.explan = ELS_EXPL_INSUF_RES; goto reject; } pp = fc_frame_payload_get(fp, len); WARN_ON(!pp); memset(pp, 0, len); pp->prli.prli_cmd = ELS_LS_ACC; pp->prli.prli_spp_len = plen; pp->prli.prli_len = htons(len); len -= sizeof(struct fc_els_prli); /* * Go through all the service parameter pages and build * response. If plen indicates longer SPP than standard, * use that. The entire response has been pre-cleared above. */ spp = &pp->spp; mutex_lock(&fc_prov_mutex); while (len >= plen) { rdata->spp_type = rspp->spp_type; spp->spp_type = rspp->spp_type; spp->spp_type_ext = rspp->spp_type_ext; resp = 0; if (rspp->spp_type < FC_FC4_PROV_SIZE) { prov = fc_active_prov[rspp->spp_type]; if (prov) resp = prov->prli(rdata, plen, rspp, spp); prov = fc_passive_prov[rspp->spp_type]; if (prov) { passive = prov->prli(rdata, plen, rspp, spp); if (!resp || passive == FC_SPP_RESP_ACK) resp = passive; } } if (!resp) { if (spp->spp_flags & FC_SPP_EST_IMG_PAIR) resp |= FC_SPP_RESP_CONF; else resp |= FC_SPP_RESP_INVL; } spp->spp_flags |= resp; len -= plen; rspp = (struct fc_els_spp *)((char *)rspp + plen); spp = (struct fc_els_spp *)((char *)spp + plen); } mutex_unlock(&fc_prov_mutex); /* * Send LS_ACC. If this fails, the originator should retry. */ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); switch (rdata->rp_state) { case RPORT_ST_PRLI: fc_rport_enter_ready(rdata); break; default: break; } goto drop; reject_len: rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; reject: lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); drop: fc_frame_free(rx_fp); } /** * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests * @rdata: The remote port that sent the PRLO request * @rx_fp: The PRLO request frame * * Locking Note: The rport lock is exected to be held before calling * this function. */ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, struct fc_frame *rx_fp) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; struct { struct fc_els_prlo prlo; struct fc_els_spp spp; } *pp; struct fc_els_spp *rspp; /* request service param page */ struct fc_els_spp *spp; /* response spp */ unsigned int len; unsigned int plen; struct fc_seq_els_data rjt_data; FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n", fc_rport_state(rdata)); len = fr_len(rx_fp) - sizeof(struct fc_frame_header); pp = fc_frame_payload_get(rx_fp, sizeof(*pp)); if (!pp) goto reject_len; plen = ntohs(pp->prlo.prlo_len); if (plen != 20) goto reject_len; if (plen < len) len = plen; rspp = &pp->spp; fp = fc_frame_alloc(lport, len); if (!fp) { rjt_data.reason = ELS_RJT_UNAB; rjt_data.explan = ELS_EXPL_INSUF_RES; goto reject; } pp = fc_frame_payload_get(fp, len); WARN_ON(!pp); memset(pp, 0, len); pp->prlo.prlo_cmd = ELS_LS_ACC; pp->prlo.prlo_obs = 0x10; pp->prlo.prlo_len = htons(len); spp = &pp->spp; spp->spp_type = rspp->spp_type; spp->spp_type_ext = rspp->spp_type_ext; spp->spp_flags = FC_SPP_RESP_ACK; fc_rport_enter_delete(rdata, RPORT_EV_LOGO); fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); goto drop; reject_len: rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; reject: lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); drop: fc_frame_free(rx_fp); } /** * fc_rport_recv_logo_req() - Handler for logout (LOGO) requests * @lport: The local port that received the LOGO request * @fp: The LOGO request frame * * Locking Note: The rport lock is exected to be held before calling * this function. */ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_rport_priv *rdata; u32 sid; lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); sid = fc_frame_sid(fp); mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_lookup(lport, sid); if (rdata) { mutex_lock(&rdata->rp_mutex); FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", fc_rport_state(rdata)); fc_rport_enter_delete(rdata, RPORT_EV_LOGO); mutex_unlock(&rdata->rp_mutex); } else FC_RPORT_ID_DBG(lport, sid, "Received LOGO from non-logged-in port\n"); mutex_unlock(&lport->disc.disc_mutex); fc_frame_free(fp); } /** * fc_rport_flush_queue() - Flush the rport_event_queue */ static void fc_rport_flush_queue(void) { flush_workqueue(rport_event_queue); } /** * fc_rport_init() - Initialize the remote port layer for a local port * @lport: The local port to initialize the remote port layer for */ int fc_rport_init(struct fc_lport *lport) { if (!lport->tt.rport_lookup) lport->tt.rport_lookup = fc_rport_lookup; if (!lport->tt.rport_create) lport->tt.rport_create = fc_rport_create; if (!lport->tt.rport_login) lport->tt.rport_login = fc_rport_login; if (!lport->tt.rport_logoff) lport->tt.rport_logoff = fc_rport_logoff; if (!lport->tt.rport_recv_req) lport->tt.rport_recv_req = fc_rport_recv_req; if (!lport->tt.rport_flush_queue) lport->tt.rport_flush_queue = fc_rport_flush_queue; if (!lport->tt.rport_destroy) lport->tt.rport_destroy = fc_rport_destroy; return 0; } EXPORT_SYMBOL(fc_rport_init); /** * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator. * @rdata: remote port private * @spp_len: service parameter page length * @rspp: received service parameter page * @spp: response service parameter page * * Returns the value for the response code to be placed in spp_flags; * Returns 0 if not an initiator. */ static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len, const struct fc_els_spp *rspp, struct fc_els_spp *spp) { struct fc_lport *lport = rdata->local_port; u32 fcp_parm; fcp_parm = ntohl(rspp->spp_params); rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; if (fcp_parm & FCP_SPPF_INIT_FCN) rdata->ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (fcp_parm & FCP_SPPF_TARG_FCN) rdata->ids.roles |= FC_RPORT_ROLE_FCP_TARGET; if (fcp_parm & FCP_SPPF_RETRY) rdata->flags |= FC_RP_FLAGS_RETRY; rdata->supported_classes = FC_COS_CLASS3; if (!(lport->service_params & FCP_SPPF_INIT_FCN)) return 0; spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR; /* * OR in our service parameters with other providers (target), if any. */ fcp_parm = ntohl(spp->spp_params); spp->spp_params = htonl(fcp_parm | lport->service_params); return FC_SPP_RESP_ACK; } /* * FC-4 provider ops for FCP initiator. */ struct fc4_prov fc_rport_fcp_init = { .prli = fc_rport_fcp_prli, }; /** * fc_rport_t0_prli() - Handle incoming PRLI parameters for type 0 * @rdata: remote port private * @spp_len: service parameter page length * @rspp: received service parameter page * @spp: response service parameter page */ static int fc_rport_t0_prli(struct fc_rport_priv *rdata, u32 spp_len, const struct fc_els_spp *rspp, struct fc_els_spp *spp) { if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) return FC_SPP_RESP_INVL; return FC_SPP_RESP_ACK; } /* * FC-4 provider ops for type 0 service parameters. * * This handles the special case of type 0 which is always successful * but doesn't do anything otherwise. */ struct fc4_prov fc_rport_t0_prov = { .prli = fc_rport_t0_prli, }; /** * fc_setup_rport() - Initialize the rport_event_queue */ int fc_setup_rport(void) { rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); if (!rport_event_queue) return -ENOMEM; return 0; } /** * fc_destroy_rport() - Destroy the rport_event_queue */ void fc_destroy_rport(void) { destroy_workqueue(rport_event_queue); } /** * fc_rport_terminate_io() - Stop all outstanding I/O on a remote port * @rport: The remote port whose I/O should be terminated */ void fc_rport_terminate_io(struct fc_rport *rport) { struct fc_rport_libfc_priv *rpriv = rport->dd_data; struct fc_lport *lport = rpriv->local_port; lport->tt.exch_mgr_reset(lport, 0, rport->port_id); lport->tt.exch_mgr_reset(lport, rport->port_id, 0); } EXPORT_SYMBOL(fc_rport_terminate_io);
pavian/LITMUS_RT_WITH_HMP
drivers/scsi/libfc/fc_rport.c
C
gpl-2.0
55,276
/* * HID driver for some microsoft "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" #define MS_HIDINPUT 0x01 #define MS_ERGONOMY 0x02 #define MS_PRESENTER 0x04 #define MS_RDESC 0x08 #define MS_NOGET 0x10 #define MS_DUPLICATE_USAGES 0x20 #define MS_RDESC_3K 0x40 static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); /* * Microsoft Wireless Desktop Receiver (Model 1028) has * 'Usage Min/Max' where it ought to have 'Physical Min/Max' */ if ((quirks & MS_RDESC) && *rsize == 571 && rdesc[557] == 0x19 && rdesc[559] == 0x29) { hid_info(hdev, "fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n"); rdesc[557] = 0x35; rdesc[559] = 0x45; } /* the same as above (s/usage/physical/) */ if ((quirks & MS_RDESC_3K) && *rsize == 106 && rdesc[94] == 0x19 && rdesc[95] == 0x00 && rdesc[96] == 0x29 && rdesc[97] == 0xff) { rdesc[94] = 0x35; rdesc[96] = 0x45; } return rdesc; } #define ms_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int ms_ergonomy_kb_quirk(struct hid_input *hi, struct hid_usage *usage, unsigned long **bit, int *max) { struct input_dev *input = hi->input; switch (usage->hid & HID_USAGE) { case 0xfd06: ms_map_key_clear(KEY_CHAT); break; case 0xfd07: ms_map_key_clear(KEY_PHONE); break; case 0xff05: set_bit(EV_REP, input->evbit); ms_map_key_clear(KEY_F13); set_bit(KEY_F14, input->keybit); set_bit(KEY_F15, input->keybit); set_bit(KEY_F16, input->keybit); set_bit(KEY_F17, input->keybit); set_bit(KEY_F18, input->keybit); default: return 0; } return 1; } static int ms_presenter_8k_quirk(struct hid_input *hi, struct hid_usage *usage, unsigned long **bit, int *max) { set_bit(EV_REP, hi->input->evbit); switch (usage->hid & HID_USAGE) { case 0xfd08: ms_map_key_clear(KEY_FORWARD); break; case 0xfd09: ms_map_key_clear(KEY_BACK); break; case 0xfd0b: ms_map_key_clear(KEY_PLAYPAUSE); break; case 0xfd0e: ms_map_key_clear(KEY_CLOSE); break; case 0xfd0f: ms_map_key_clear(KEY_PLAY); break; default: return 0; } return 1; } static int ms_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); if ((usage->hid & HID_USAGE_PAGE) != HID_UP_MSVENDOR) return 0; if (quirks & MS_ERGONOMY) { int ret = ms_ergonomy_kb_quirk(hi, usage, bit, max); if (ret) return ret; } if ((quirks & MS_PRESENTER) && ms_presenter_8k_quirk(hi, usage, bit, max)) return 1; return 0; } static int ms_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); if (quirks & MS_DUPLICATE_USAGES) clear_bit(usage->code, *bit); return 0; } static int ms_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput || !usage->type) return 0; /* Handling MS keyboards special buttons */ if (quirks & MS_ERGONOMY && usage->hid == (HID_UP_MSVENDOR | 0xff05)) { struct input_dev *input = field->hidinput->input; static unsigned int last_key = 0; unsigned int key = 0; switch (value) { case 0x01: key = KEY_F14; break; case 0x02: key = KEY_F15; break; case 0x04: key = KEY_F16; break; case 0x08: key = KEY_F17; break; case 0x10: key = KEY_F18; break; } if (key) { input_event(input, usage->type, key, 1); last_key = key; } else input_event(input, usage->type, last_key, 0); return 1; } return 0; } static int ms_probe(struct hid_device *hdev, const struct hid_device_id *id) { unsigned long quirks = id->driver_data; int ret; hid_set_drvdata(hdev, (void *)quirks); if (quirks & MS_NOGET) hdev->quirks |= HID_QUIRK_NOGET; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT | ((quirks & MS_HIDINPUT) ? HID_CONNECT_HIDINPUT_FORCE : 0)); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } return 0; err_free: return ret; } static const struct hid_device_id ms_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV), .driver_data = MS_HIDINPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K), .driver_data = MS_ERGONOMY }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP), .driver_data = MS_ERGONOMY }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K), .driver_data = MS_ERGONOMY | MS_RDESC }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB), .driver_data = MS_PRESENTER }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K), .driver_data = MS_ERGONOMY | MS_RDESC_3K }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0), .driver_data = MS_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), .driver_data = MS_DUPLICATE_USAGES }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT), .driver_data = MS_PRESENTER }, { } }; MODULE_DEVICE_TABLE(hid, ms_devices); static struct hid_driver ms_driver = { .name = "microsoft", .id_table = ms_devices, .report_fixup = ms_report_fixup, .input_mapping = ms_input_mapping, .input_mapped = ms_input_mapped, .event = ms_event, .probe = ms_probe, }; module_hid_driver(ms_driver); MODULE_LICENSE("GPL");
voodik/android_kernel_hardkernel_odroidxu3
drivers/hid/hid-microsoft.c
C
gpl-2.0
6,400
/* * Copyright 2010 Analog Devices Inc. * Copyright (C) 2008 Jonathan Cameron * * Licensed under the GPL-2 or later. * * ad7476_ring.c */ #include <linux/interrupt.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/spi/spi.h> #include "../iio.h" #include "../ring_generic.h" #include "../ring_sw.h" #include "../trigger.h" #include "../sysfs.h" #include "ad7476.h" int ad7476_scan_from_ring(struct ad7476_state *st) { struct iio_ring_buffer *ring = st->indio_dev->ring; int ret; u8 *ring_data; ring_data = kmalloc(ring->access->get_bytes_per_datum(ring), GFP_KERNEL); if (ring_data == NULL) { ret = -ENOMEM; goto error_ret; } ret = ring->access->read_last(ring, ring_data); if (ret) goto error_free_ring_data; ret = (ring_data[0] << 8) | ring_data[1]; error_free_ring_data: kfree(ring_data); error_ret: return ret; } /** * ad7476_ring_preenable() setup the parameters of the ring before enabling * * The complex nature of the setting of the nuber of bytes per datum is due * to this driver currently ensuring that the timestamp is stored at an 8 * byte boundary. **/ static int ad7476_ring_preenable(struct iio_dev *indio_dev) { struct ad7476_state *st = indio_dev->dev_data; struct iio_ring_buffer *ring = indio_dev->ring; st->d_size = ring->scan_count * st->chip_info->channel[0].scan_type.storagebits / 8; if (ring->scan_timestamp) { st->d_size += sizeof(s64); if (st->d_size % sizeof(s64)) st->d_size += sizeof(s64) - (st->d_size % sizeof(s64)); } if (indio_dev->ring->access->set_bytes_per_datum) indio_dev->ring->access->set_bytes_per_datum(indio_dev->ring, st->d_size); return 0; } static irqreturn_t ad7476_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->private_data; struct ad7476_state *st = iio_dev_get_devdata(indio_dev); s64 time_ns; __u8 *rxbuf; int b_sent; rxbuf = kzalloc(st->d_size, GFP_KERNEL); if (rxbuf == NULL) return -ENOMEM; b_sent = spi_read(st->spi, rxbuf, st->chip_info->channel[0].scan_type.storagebits / 8); if (b_sent < 0) goto done; time_ns = iio_get_time_ns(); if (indio_dev->ring->scan_timestamp) memcpy(rxbuf + st->d_size - sizeof(s64), &time_ns, sizeof(time_ns)); indio_dev->ring->access->store_to(indio_dev->ring, rxbuf, time_ns); done: iio_trigger_notify_done(indio_dev->trig); kfree(rxbuf); return IRQ_HANDLED; } static const struct iio_ring_setup_ops ad7476_ring_setup_ops = { .preenable = &ad7476_ring_preenable, .postenable = &iio_triggered_ring_postenable, .predisable = &iio_triggered_ring_predisable, }; int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev) { struct ad7476_state *st = indio_dev->dev_data; int ret = 0; indio_dev->ring = iio_sw_rb_allocate(indio_dev); if (!indio_dev->ring) { ret = -ENOMEM; goto error_ret; } /* Effectively select the ring buffer implementation */ indio_dev->ring->access = &ring_sw_access_funcs; indio_dev->pollfunc = iio_alloc_pollfunc(NULL, &ad7476_trigger_handler, IRQF_ONESHOT, indio_dev, "%s_consumer%d", spi_get_device_id(st->spi)->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_deallocate_sw_rb; } /* Ring buffer functions - here trigger setup related */ indio_dev->ring->setup_ops = &ad7476_ring_setup_ops; indio_dev->ring->scan_timestamp = true; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_RING_TRIGGERED; return 0; error_deallocate_sw_rb: iio_sw_rb_free(indio_dev->ring); error_ret: return ret; } void ad7476_ring_cleanup(struct iio_dev *indio_dev) { /* ensure that the trigger has been detached */ if (indio_dev->trig) { iio_put_trigger(indio_dev->trig); iio_trigger_dettach_poll_func(indio_dev->trig, indio_dev->pollfunc); } iio_dealloc_pollfunc(indio_dev->pollfunc); iio_sw_rb_free(indio_dev->ring); }
akshay4/android_old_kernel_htc_pico
drivers/staging/iio/adc/ad7476_ring.c
C
gpl-2.0
4,023
/* * security/tomoyo/group.c * * Copyright (C) 2005-2010 NTT DATA CORPORATION */ #include <linux/slab.h> #include "common.h" static bool tomoyo_same_path_group(const struct tomoyo_acl_head *a, const struct tomoyo_acl_head *b) { return container_of(a, struct tomoyo_path_group, head)->member_name == container_of(b, struct tomoyo_path_group, head)->member_name; } static bool tomoyo_same_number_group(const struct tomoyo_acl_head *a, const struct tomoyo_acl_head *b) { return !memcmp(&container_of(a, struct tomoyo_number_group, head) ->number, &container_of(b, struct tomoyo_number_group, head) ->number, sizeof(container_of(a, struct tomoyo_number_group, head) ->number)); } /** * tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group" list. * * @data: String to parse. * @is_delete: True if it is a delete request. * @type: Type of this group. * * Returns 0 on success, negative value otherwise. */ int tomoyo_write_group(char *data, const bool is_delete, const u8 type) { struct tomoyo_group *group; struct list_head *member; char *w[2]; int error = -EINVAL; if (!tomoyo_tokenize(data, w, sizeof(w)) || !w[1][0]) return -EINVAL; group = tomoyo_get_group(w[0], type); if (!group) return -ENOMEM; member = &group->member_list; if (type == TOMOYO_PATH_GROUP) { struct tomoyo_path_group e = { }; e.member_name = tomoyo_get_name(w[1]); if (!e.member_name) { error = -ENOMEM; goto out; } error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, member, tomoyo_same_path_group); tomoyo_put_name(e.member_name); } else if (type == TOMOYO_NUMBER_GROUP) { struct tomoyo_number_group e = { }; if (w[1][0] == '@' || !tomoyo_parse_number_union(w[1], &e.number) || e.number.values[0] > e.number.values[1]) goto out; error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, member, tomoyo_same_number_group); /* * tomoyo_put_number_union() is not needed because * w[1][0] != '@'. */ } out: tomoyo_put_group(group); return error; } /** * tomoyo_path_matches_group - Check whether the given pathname matches members of the given pathname group. * * @pathname: The name of pathname. * @group: Pointer to "struct tomoyo_path_group". * * Returns matched member's pathname if @pathname matches pathnames in @group, * NULL otherwise. * * Caller holds tomoyo_read_lock(). */ const struct tomoyo_path_info * tomoyo_path_matches_group(const struct tomoyo_path_info *pathname, const struct tomoyo_group *group) { struct tomoyo_path_group *member; list_for_each_entry_rcu(member, &group->member_list, head.list) { if (member->head.is_deleted) continue; if (!tomoyo_path_matches_pattern(pathname, member->member_name)) continue; return member->member_name; } return NULL; } /** * tomoyo_number_matches_group - Check whether the given number matches members of the given number group. * * @min: Min number. * @max: Max number. * @group: Pointer to "struct tomoyo_number_group". * * Returns true if @min and @max partially overlaps @group, false otherwise. * * Caller holds tomoyo_read_lock(). */ bool tomoyo_number_matches_group(const unsigned long min, const unsigned long max, const struct tomoyo_group *group) { struct tomoyo_number_group *member; bool matched = false; list_for_each_entry_rcu(member, &group->member_list, head.list) { if (member->head.is_deleted) continue; if (min > member->number.values[1] || max < member->number.values[0]) continue; matched = true; break; } return matched; }
1N4148/kernel_golden
security/tomoyo/group.c
C
gpl-2.0
3,682
/* * Copyright (C) 1997 Wu Ching Chen * 2.1.x update (C) 1998 Krzysztof G. Baranowski * 2.5.x update (C) 2002 Red Hat * 2.6.x update (C) 2004 Red Hat * * Marcelo Tosatti <marcelo@conectiva.com.br> : SMP fixes * * Wu Ching Chen : NULL pointer fixes 2000/06/02 * support atp876 chip * enable 32 bit fifo transfer * support cdrom & remove device run ultra speed * fix disconnect bug 2000/12/21 * support atp880 chip lvd u160 2001/05/15 * fix prd table bug 2001/09/12 (7.1) * * atp885 support add by ACARD Hao Ping Lian 2005/01/05 */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/blkdev.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "atp870u.h" static struct scsi_host_template atp870u_template; static void send_s870(struct atp_unit *dev,unsigned char c); static void is885(struct atp_unit *dev, unsigned int wkport,unsigned char c); static void tscam_885(void); static irqreturn_t atp870u_intr_handle(int irq, void *dev_id) { unsigned long flags; unsigned short int tmpcip, id; unsigned char i, j, c, target_id, lun,cmdp; unsigned char *prd; struct scsi_cmnd *workreq; unsigned int workport, tmport, tmport1; unsigned long adrcnt, k; #ifdef ED_DBGP unsigned long l; #endif int errstus; struct Scsi_Host *host = dev_id; struct atp_unit *dev = (struct atp_unit *)&host->hostdata; for (c = 0; c < 2; c++) { tmport = dev->ioport[c] + 0x1f; j = inb(tmport); if ((j & 0x80) != 0) { goto ch_sel; } dev->in_int[c] = 0; } return IRQ_NONE; ch_sel: #ifdef ED_DBGP printk("atp870u_intr_handle enter\n"); #endif dev->in_int[c] = 1; cmdp = inb(dev->ioport[c] + 0x10); workport = dev->ioport[c]; if (dev->working[c] != 0) { if (dev->dev_id == ATP885_DEVID) { tmport1 = workport + 0x16; if ((inb(tmport1) & 0x80) == 0) outb((inb(tmport1) | 0x80), tmport1); } tmpcip = dev->pciport[c]; if ((inb(tmpcip) & 0x08) != 0) { tmpcip += 0x2; for (k=0; k < 1000; k++) { if ((inb(tmpcip) & 0x08) == 0) { goto stop_dma; } if ((inb(tmpcip) & 0x01) == 0) { goto stop_dma; } } } stop_dma: tmpcip = dev->pciport[c]; outb(0x00, tmpcip); tmport -= 0x08; i = inb(tmport); if (dev->dev_id == ATP885_DEVID) { tmpcip += 2; outb(0x06, tmpcip); tmpcip -= 2; } tmport -= 0x02; target_id = inb(tmport); tmport += 0x02; /* * Remap wide devices onto id numbers */ if ((target_id & 0x40) != 0) { target_id = (target_id & 0x07) | 0x08; } else { target_id &= 0x07; } if ((j & 0x40) != 0) { if (dev->last_cmd[c] == 0xff) { dev->last_cmd[c] = target_id; } dev->last_cmd[c] |= 0x40; } if (dev->dev_id == ATP885_DEVID) dev->r1f[c][target_id] |= j; #ifdef ED_DBGP printk("atp870u_intr_handle status = %x\n",i); #endif if (i == 0x85) { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } if (dev->dev_id == ATP885_DEVID) { tmport -= 0x05; adrcnt = 0; ((unsigned char *) &adrcnt)[2] = inb(tmport++); ((unsigned char *) &adrcnt)[1] = inb(tmport++); ((unsigned char *) &adrcnt)[0] = inb(tmport); if (dev->id[c][target_id].last_len != adrcnt) { k = dev->id[c][target_id].last_len; k -= adrcnt; dev->id[c][target_id].tran_len = k; dev->id[c][target_id].last_len = adrcnt; } #ifdef ED_DBGP printk("tmport = %x dev->id[c][target_id].last_len = %d dev->id[c][target_id].tran_len = %d\n",tmport,dev->id[c][target_id].last_len,dev->id[c][target_id].tran_len); #endif } /* * Flip wide */ if (dev->wide_id[c] != 0) { tmport = workport + 0x1b; outb(0x01, tmport); while ((inb(tmport) & 0x01) != 0x01) { outb(0x01, tmport); } } /* * Issue more commands */ spin_lock_irqsave(dev->host->host_lock, flags); if (((dev->quhd[c] != dev->quend[c]) || (dev->last_cmd[c] != 0xff)) && (dev->in_snd[c] == 0)) { #ifdef ED_DBGP printk("Call sent_s870\n"); #endif send_s870(dev,c); } spin_unlock_irqrestore(dev->host->host_lock, flags); /* * Done */ dev->in_int[c] = 0; #ifdef ED_DBGP printk("Status 0x85 return\n"); #endif goto handled; } if (i == 0x40) { dev->last_cmd[c] |= 0x40; dev->in_int[c] = 0; goto handled; } if (i == 0x21) { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } tmport -= 0x05; adrcnt = 0; ((unsigned char *) &adrcnt)[2] = inb(tmport++); ((unsigned char *) &adrcnt)[1] = inb(tmport++); ((unsigned char *) &adrcnt)[0] = inb(tmport); k = dev->id[c][target_id].last_len; k -= adrcnt; dev->id[c][target_id].tran_len = k; dev->id[c][target_id].last_len = adrcnt; tmport -= 0x04; outb(0x41, tmport); tmport += 0x08; outb(0x08, tmport); dev->in_int[c] = 0; goto handled; } if (dev->dev_id == ATP885_DEVID) { if ((i == 0x4c) || (i == 0x4d) || (i == 0x8c) || (i == 0x8d)) { if ((i == 0x4c) || (i == 0x8c)) i=0x48; else i=0x49; } } if ((i == 0x80) || (i == 0x8f)) { #ifdef ED_DBGP printk(KERN_DEBUG "Device reselect\n"); #endif lun = 0; tmport -= 0x07; if (cmdp == 0x44 || i==0x80) { tmport += 0x0d; lun = inb(tmport) & 0x07; } else { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } if (cmdp == 0x41) { #ifdef ED_DBGP printk("cmdp = 0x41\n"); #endif tmport += 0x02; adrcnt = 0; ((unsigned char *) &adrcnt)[2] = inb(tmport++); ((unsigned char *) &adrcnt)[1] = inb(tmport++); ((unsigned char *) &adrcnt)[0] = inb(tmport); k = dev->id[c][target_id].last_len; k -= adrcnt; dev->id[c][target_id].tran_len = k; dev->id[c][target_id].last_len = adrcnt; tmport += 0x04; outb(0x08, tmport); dev->in_int[c] = 0; goto handled; } else { #ifdef ED_DBGP printk("cmdp != 0x41\n"); #endif outb(0x46, tmport); dev->id[c][target_id].dirct = 0x00; tmport += 0x02; outb(0x00, tmport++); outb(0x00, tmport++); outb(0x00, tmport++); tmport += 0x03; outb(0x08, tmport); dev->in_int[c] = 0; goto handled; } } if (dev->last_cmd[c] != 0xff) { dev->last_cmd[c] |= 0x40; } if (dev->dev_id == ATP885_DEVID) { j = inb(dev->baseport + 0x29) & 0xfe; outb(j, dev->baseport + 0x29); tmport = workport + 0x16; } else { tmport = workport + 0x10; outb(0x45, tmport); tmport += 0x06; } target_id = inb(tmport); /* * Remap wide identifiers */ if ((target_id & 0x10) != 0) { target_id = (target_id & 0x07) | 0x08; } else { target_id &= 0x07; } if (dev->dev_id == ATP885_DEVID) { tmport = workport + 0x10; outb(0x45, tmport); } workreq = dev->id[c][target_id].curr_req; #ifdef ED_DBGP scmd_printk(KERN_DEBUG, workreq, "CDB"); for (l = 0; l < workreq->cmd_len; l++) printk(KERN_DEBUG " %x",workreq->cmnd[l]); printk("\n"); #endif tmport = workport + 0x0f; outb(lun, tmport); tmport += 0x02; outb(dev->id[c][target_id].devsp, tmport++); adrcnt = dev->id[c][target_id].tran_len; k = dev->id[c][target_id].last_len; outb(((unsigned char *) &k)[2], tmport++); outb(((unsigned char *) &k)[1], tmport++); outb(((unsigned char *) &k)[0], tmport++); #ifdef ED_DBGP printk("k %x, k[0] 0x%x k[1] 0x%x k[2] 0x%x\n", k, inb(tmport-1), inb(tmport-2), inb(tmport-3)); #endif /* Remap wide */ j = target_id; if (target_id > 7) { j = (j & 0x07) | 0x40; } /* Add direction */ j |= dev->id[c][target_id].dirct; outb(j, tmport++); outb(0x80,tmport); /* enable 32 bit fifo transfer */ if (dev->dev_id == ATP885_DEVID) { tmpcip = dev->pciport[c] + 1; i=inb(tmpcip) & 0xf3; //j=workreq->cmnd[0]; if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) { i |= 0x0c; } outb(i,tmpcip); } else if ((dev->dev_id == ATP880_DEVID1) || (dev->dev_id == ATP880_DEVID2) ) { tmport = workport - 0x05; if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) { outb((unsigned char) ((inb(tmport) & 0x3f) | 0xc0), tmport); } else { outb((unsigned char) (inb(tmport) & 0x3f), tmport); } } else { tmport = workport + 0x3a; if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) { outb((unsigned char) ((inb(tmport) & 0xf3) | 0x08), tmport); } else { outb((unsigned char) (inb(tmport) & 0xf3), tmport); } } tmport = workport + 0x1b; j = 0; id = 1; id = id << target_id; /* * Is this a wide device */ if ((id & dev->wide_id[c]) != 0) { j |= 0x01; } outb(j, tmport); while ((inb(tmport) & 0x01) != j) { outb(j,tmport); } if (dev->id[c][target_id].last_len == 0) { tmport = workport + 0x18; outb(0x08, tmport); dev->in_int[c] = 0; #ifdef ED_DBGP printk("dev->id[c][target_id].last_len = 0\n"); #endif goto handled; } #ifdef ED_DBGP printk("target_id = %d adrcnt = %d\n",target_id,adrcnt); #endif prd = dev->id[c][target_id].prd_pos; while (adrcnt != 0) { id = ((unsigned short int *)prd)[2]; if (id == 0) { k = 0x10000; } else { k = id; } if (k > adrcnt) { ((unsigned short int *)prd)[2] = (unsigned short int) (k - adrcnt); ((unsigned long *)prd)[0] += adrcnt; adrcnt = 0; dev->id[c][target_id].prd_pos = prd; } else { adrcnt -= k; dev->id[c][target_id].prdaddr += 0x08; prd += 0x08; if (adrcnt == 0) { dev->id[c][target_id].prd_pos = prd; } } } tmpcip = dev->pciport[c] + 0x04; outl(dev->id[c][target_id].prdaddr, tmpcip); #ifdef ED_DBGP printk("dev->id[%d][%d].prdaddr 0x%8x\n", c, target_id, dev->id[c][target_id].prdaddr); #endif if (dev->dev_id == ATP885_DEVID) { tmpcip -= 0x04; } else { tmpcip -= 0x02; outb(0x06, tmpcip); outb(0x00, tmpcip); tmpcip -= 0x02; } tmport = workport + 0x18; /* * Check transfer direction */ if (dev->id[c][target_id].dirct != 0) { outb(0x08, tmport); outb(0x01, tmpcip); dev->in_int[c] = 0; #ifdef ED_DBGP printk("status 0x80 return dirct != 0\n"); #endif goto handled; } outb(0x08, tmport); outb(0x09, tmpcip); dev->in_int[c] = 0; #ifdef ED_DBGP printk("status 0x80 return dirct = 0\n"); #endif goto handled; } /* * Current scsi request on this target */ workreq = dev->id[c][target_id].curr_req; if (i == 0x42) { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } errstus = 0x02; workreq->result = errstus; goto go_42; } if (i == 0x16) { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } errstus = 0; tmport -= 0x08; errstus = inb(tmport); if (((dev->r1f[c][target_id] & 0x10) != 0)&&(dev->dev_id==ATP885_DEVID)) { printk(KERN_WARNING "AEC67162 CRC ERROR !\n"); errstus = 0x02; } workreq->result = errstus; go_42: if (dev->dev_id == ATP885_DEVID) { j = inb(dev->baseport + 0x29) | 0x01; outb(j, dev->baseport + 0x29); } /* * Complete the command */ scsi_dma_unmap(workreq); spin_lock_irqsave(dev->host->host_lock, flags); (*workreq->scsi_done) (workreq); #ifdef ED_DBGP printk("workreq->scsi_done\n"); #endif /* * Clear it off the queue */ dev->id[c][target_id].curr_req = NULL; dev->working[c]--; spin_unlock_irqrestore(dev->host->host_lock, flags); /* * Take it back wide */ if (dev->wide_id[c] != 0) { tmport = workport + 0x1b; outb(0x01, tmport); while ((inb(tmport) & 0x01) != 0x01) { outb(0x01, tmport); } } /* * If there is stuff to send and nothing going then send it */ spin_lock_irqsave(dev->host->host_lock, flags); if (((dev->last_cmd[c] != 0xff) || (dev->quhd[c] != dev->quend[c])) && (dev->in_snd[c] == 0)) { #ifdef ED_DBGP printk("Call sent_s870(scsi_done)\n"); #endif send_s870(dev,c); } spin_unlock_irqrestore(dev->host->host_lock, flags); dev->in_int[c] = 0; goto handled; } if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } if (i == 0x4f) { i = 0x89; } i &= 0x0f; if (i == 0x09) { tmpcip += 4; outl(dev->id[c][target_id].prdaddr, tmpcip); tmpcip = tmpcip - 2; outb(0x06, tmpcip); outb(0x00, tmpcip); tmpcip = tmpcip - 2; tmport = workport + 0x10; outb(0x41, tmport); if (dev->dev_id == ATP885_DEVID) { tmport += 2; k = dev->id[c][target_id].last_len; outb((unsigned char) (((unsigned char *) (&k))[2]), tmport++); outb((unsigned char) (((unsigned char *) (&k))[1]), tmport++); outb((unsigned char) (((unsigned char *) (&k))[0]), tmport); dev->id[c][target_id].dirct = 0x00; tmport += 0x04; } else { dev->id[c][target_id].dirct = 0x00; tmport += 0x08; } outb(0x08, tmport); outb(0x09, tmpcip); dev->in_int[c] = 0; goto handled; } if (i == 0x08) { tmpcip += 4; outl(dev->id[c][target_id].prdaddr, tmpcip); tmpcip = tmpcip - 2; outb(0x06, tmpcip); outb(0x00, tmpcip); tmpcip = tmpcip - 2; tmport = workport + 0x10; outb(0x41, tmport); if (dev->dev_id == ATP885_DEVID) { tmport += 2; k = dev->id[c][target_id].last_len; outb((unsigned char) (((unsigned char *) (&k))[2]), tmport++); outb((unsigned char) (((unsigned char *) (&k))[1]), tmport++); outb((unsigned char) (((unsigned char *) (&k))[0]), tmport++); } else { tmport += 5; } outb((unsigned char) (inb(tmport) | 0x20), tmport); dev->id[c][target_id].dirct = 0x20; tmport += 0x03; outb(0x08, tmport); outb(0x01, tmpcip); dev->in_int[c] = 0; goto handled; } tmport -= 0x07; if (i == 0x0a) { outb(0x30, tmport); } else { outb(0x46, tmport); } dev->id[c][target_id].dirct = 0x00; tmport += 0x02; outb(0x00, tmport++); outb(0x00, tmport++); outb(0x00, tmport++); tmport += 0x03; outb(0x08, tmport); dev->in_int[c] = 0; goto handled; } else { // tmport = workport + 0x17; // inb(tmport); // dev->working[c] = 0; dev->in_int[c] = 0; goto handled; } handled: #ifdef ED_DBGP printk("atp870u_intr_handle exit\n"); #endif return IRQ_HANDLED; } /** * atp870u_queuecommand - Queue SCSI command * @req_p: request block * @done: completion function * * Queue a command to the ATP queue. Called with the host lock held. */ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p, void (*done) (struct scsi_cmnd *)) { unsigned char c; unsigned int tmport,m; struct atp_unit *dev; struct Scsi_Host *host; c = scmd_channel(req_p); req_p->sense_buffer[0]=0; scsi_set_resid(req_p, 0); if (scmd_channel(req_p) > 1) { req_p->result = 0x00040000; done(req_p); #ifdef ED_DBGP printk("atp870u_queuecommand : req_p->device->channel > 1\n"); #endif return 0; } host = req_p->device->host; dev = (struct atp_unit *)&host->hostdata; m = 1; m = m << scmd_id(req_p); /* * Fake a timeout for missing targets */ if ((m & dev->active_id[c]) == 0) { req_p->result = 0x00040000; done(req_p); return 0; } if (done) { req_p->scsi_done = done; } else { #ifdef ED_DBGP printk( "atp870u_queuecommand: done can't be NULL\n"); #endif req_p->result = 0; done(req_p); return 0; } /* * Count new command */ dev->quend[c]++; if (dev->quend[c] >= qcnt) { dev->quend[c] = 0; } /* * Check queue state */ if (dev->quhd[c] == dev->quend[c]) { if (dev->quend[c] == 0) { dev->quend[c] = qcnt; } #ifdef ED_DBGP printk("atp870u_queuecommand : dev->quhd[c] == dev->quend[c]\n"); #endif dev->quend[c]--; req_p->result = 0x00020000; done(req_p); return 0; } dev->quereq[c][dev->quend[c]] = req_p; tmport = dev->ioport[c] + 0x1c; #ifdef ED_DBGP printk("dev->ioport[c] = %x inb(tmport) = %x dev->in_int[%d] = %d dev->in_snd[%d] = %d\n",dev->ioport[c],inb(tmport),c,dev->in_int[c],c,dev->in_snd[c]); #endif if ((inb(tmport) == 0) && (dev->in_int[c] == 0) && (dev->in_snd[c] == 0)) { #ifdef ED_DBGP printk("Call sent_s870(atp870u_queuecommand)\n"); #endif send_s870(dev,c); } #ifdef ED_DBGP printk("atp870u_queuecommand : exit\n"); #endif return 0; } static DEF_SCSI_QCMD(atp870u_queuecommand) /** * send_s870 - send a command to the controller * @host: host * * On entry there is work queued to be done. We move some of that work to the * controller itself. * * Caller holds the host lock. */ static void send_s870(struct atp_unit *dev,unsigned char c) { unsigned int tmport; struct scsi_cmnd *workreq; unsigned int i;//,k; unsigned char j, target_id; unsigned char *prd; unsigned short int tmpcip, w; unsigned long l, bttl = 0; unsigned int workport; unsigned long sg_count; if (dev->in_snd[c] != 0) { #ifdef ED_DBGP printk("cmnd in_snd\n"); #endif return; } #ifdef ED_DBGP printk("Sent_s870 enter\n"); #endif dev->in_snd[c] = 1; if ((dev->last_cmd[c] != 0xff) && ((dev->last_cmd[c] & 0x40) != 0)) { dev->last_cmd[c] &= 0x0f; workreq = dev->id[c][dev->last_cmd[c]].curr_req; if (workreq != NULL) { /* check NULL pointer */ goto cmd_subp; } dev->last_cmd[c] = 0xff; if (dev->quhd[c] == dev->quend[c]) { dev->in_snd[c] = 0; return ; } } if ((dev->last_cmd[c] != 0xff) && (dev->working[c] != 0)) { dev->in_snd[c] = 0; return ; } dev->working[c]++; j = dev->quhd[c]; dev->quhd[c]++; if (dev->quhd[c] >= qcnt) { dev->quhd[c] = 0; } workreq = dev->quereq[c][dev->quhd[c]]; if (dev->id[c][scmd_id(workreq)].curr_req == NULL) { dev->id[c][scmd_id(workreq)].curr_req = workreq; dev->last_cmd[c] = scmd_id(workreq); goto cmd_subp; } dev->quhd[c] = j; dev->working[c]--; dev->in_snd[c] = 0; return; cmd_subp: workport = dev->ioport[c]; tmport = workport + 0x1f; if ((inb(tmport) & 0xb0) != 0) { goto abortsnd; } tmport = workport + 0x1c; if (inb(tmport) == 0) { goto oktosend; } abortsnd: #ifdef ED_DBGP printk("Abort to Send\n"); #endif dev->last_cmd[c] |= 0x40; dev->in_snd[c] = 0; return; oktosend: #ifdef ED_DBGP printk("OK to Send\n"); scmd_printk(KERN_DEBUG, workreq, "CDB"); for(i=0;i<workreq->cmd_len;i++) { printk(" %x",workreq->cmnd[i]); } printk("\n"); #endif l = scsi_bufflen(workreq); if (dev->dev_id == ATP885_DEVID) { j = inb(dev->baseport + 0x29) & 0xfe; outb(j, dev->baseport + 0x29); dev->r1f[c][scmd_id(workreq)] = 0; } if (workreq->cmnd[0] == READ_CAPACITY) { if (l > 8) l = 8; } if (workreq->cmnd[0] == 0x00) { l = 0; } tmport = workport + 0x1b; j = 0; target_id = scmd_id(workreq); /* * Wide ? */ w = 1; w = w << target_id; if ((w & dev->wide_id[c]) != 0) { j |= 0x01; } outb(j, tmport); while ((inb(tmport) & 0x01) != j) { outb(j,tmport); #ifdef ED_DBGP printk("send_s870 while loop 1\n"); #endif } /* * Write the command */ tmport = workport; outb(workreq->cmd_len, tmport++); outb(0x2c, tmport++); if (dev->dev_id == ATP885_DEVID) { outb(0x7f, tmport++); } else { outb(0xcf, tmport++); } for (i = 0; i < workreq->cmd_len; i++) { outb(workreq->cmnd[i], tmport++); } tmport = workport + 0x0f; outb(workreq->device->lun, tmport); tmport += 0x02; /* * Write the target */ outb(dev->id[c][target_id].devsp, tmport++); #ifdef ED_DBGP printk("dev->id[%d][%d].devsp = %2x\n",c,target_id,dev->id[c][target_id].devsp); #endif sg_count = scsi_dma_map(workreq); /* * Write transfer size */ outb((unsigned char) (((unsigned char *) (&l))[2]), tmport++); outb((unsigned char) (((unsigned char *) (&l))[1]), tmport++); outb((unsigned char) (((unsigned char *) (&l))[0]), tmport++); j = target_id; dev->id[c][j].last_len = l; dev->id[c][j].tran_len = 0; #ifdef ED_DBGP printk("dev->id[%2d][%2d].last_len = %d\n",c,j,dev->id[c][j].last_len); #endif /* * Flip the wide bits */ if ((j & 0x08) != 0) { j = (j & 0x07) | 0x40; } /* * Check transfer direction */ if (workreq->sc_data_direction == DMA_TO_DEVICE) { outb((unsigned char) (j | 0x20), tmport++); } else { outb(j, tmport++); } outb((unsigned char) (inb(tmport) | 0x80), tmport); outb(0x80, tmport); tmport = workport + 0x1c; dev->id[c][target_id].dirct = 0; if (l == 0) { if (inb(tmport) == 0) { tmport = workport + 0x18; #ifdef ED_DBGP printk("change SCSI_CMD_REG 0x08\n"); #endif outb(0x08, tmport); } else { dev->last_cmd[c] |= 0x40; } dev->in_snd[c] = 0; return; } tmpcip = dev->pciport[c]; prd = dev->id[c][target_id].prd_table; dev->id[c][target_id].prd_pos = prd; /* * Now write the request list. Either as scatter/gather or as * a linear chain. */ if (l) { struct scatterlist *sgpnt; i = 0; scsi_for_each_sg(workreq, sgpnt, sg_count, j) { bttl = sg_dma_address(sgpnt); l=sg_dma_len(sgpnt); #ifdef ED_DBGP printk("1. bttl %x, l %x\n",bttl, l); #endif while (l > 0x10000) { (((u16 *) (prd))[i + 3]) = 0x0000; (((u16 *) (prd))[i + 2]) = 0x0000; (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl); l -= 0x10000; bttl += 0x10000; i += 0x04; } (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl); (((u16 *) (prd))[i + 2]) = cpu_to_le16(l); (((u16 *) (prd))[i + 3]) = 0; i += 0x04; } (((u16 *) (prd))[i - 1]) = cpu_to_le16(0x8000); #ifdef ED_DBGP printk("prd %4x %4x %4x %4x\n",(((unsigned short int *)prd)[0]),(((unsigned short int *)prd)[1]),(((unsigned short int *)prd)[2]),(((unsigned short int *)prd)[3])); printk("2. bttl %x, l %x\n",bttl, l); #endif } tmpcip += 4; #ifdef ED_DBGP printk("send_s870: prdaddr_2 0x%8x tmpcip %x target_id %d\n", dev->id[c][target_id].prdaddr,tmpcip,target_id); #endif dev->id[c][target_id].prdaddr = dev->id[c][target_id].prd_bus; outl(dev->id[c][target_id].prdaddr, tmpcip); tmpcip = tmpcip - 2; outb(0x06, tmpcip); outb(0x00, tmpcip); if (dev->dev_id == ATP885_DEVID) { tmpcip--; j=inb(tmpcip) & 0xf3; if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) { j |= 0x0c; } outb(j,tmpcip); tmpcip--; } else if ((dev->dev_id == ATP880_DEVID1) || (dev->dev_id == ATP880_DEVID2)) { tmpcip =tmpcip -2; tmport = workport - 0x05; if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) { outb((unsigned char) ((inb(tmport) & 0x3f) | 0xc0), tmport); } else { outb((unsigned char) (inb(tmport) & 0x3f), tmport); } } else { tmpcip =tmpcip -2; tmport = workport + 0x3a; if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) { outb((inb(tmport) & 0xf3) | 0x08, tmport); } else { outb(inb(tmport) & 0xf3, tmport); } } tmport = workport + 0x1c; if(workreq->sc_data_direction == DMA_TO_DEVICE) { dev->id[c][target_id].dirct = 0x20; if (inb(tmport) == 0) { tmport = workport + 0x18; outb(0x08, tmport); outb(0x01, tmpcip); #ifdef ED_DBGP printk( "start DMA(to target)\n"); #endif } else { dev->last_cmd[c] |= 0x40; } dev->in_snd[c] = 0; return; } if (inb(tmport) == 0) { tmport = workport + 0x18; outb(0x08, tmport); outb(0x09, tmpcip); #ifdef ED_DBGP printk( "start DMA(to host)\n"); #endif } else { dev->last_cmd[c] |= 0x40; } dev->in_snd[c] = 0; return; } static unsigned char fun_scam(struct atp_unit *dev, unsigned short int *val) { unsigned int tmport; unsigned short int i, k; unsigned char j; tmport = dev->ioport[0] + 0x1c; outw(*val, tmport); FUN_D7: for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ k = inw(tmport); j = (unsigned char) (k >> 8); if ((k & 0x8000) != 0) { /* DB7 all release? */ goto FUN_D7; } } *val |= 0x4000; /* assert DB6 */ outw(*val, tmport); *val &= 0xdfff; /* assert DB5 */ outw(*val, tmport); FUN_D5: for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ if ((inw(tmport) & 0x2000) != 0) { /* DB5 all release? */ goto FUN_D5; } } *val |= 0x8000; /* no DB4-0, assert DB7 */ *val &= 0xe0ff; outw(*val, tmport); *val &= 0xbfff; /* release DB6 */ outw(*val, tmport); FUN_D6: for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ if ((inw(tmport) & 0x4000) != 0) { /* DB6 all release? */ goto FUN_D6; } } return j; } static void tscam(struct Scsi_Host *host) { unsigned int tmport; unsigned char i, j, k; unsigned long n; unsigned short int m, assignid_map, val; unsigned char mbuf[33], quintet[2]; struct atp_unit *dev = (struct atp_unit *)&host->hostdata; static unsigned char g2q_tab[8] = { 0x38, 0x31, 0x32, 0x2b, 0x34, 0x2d, 0x2e, 0x27 }; /* I can't believe we need this before we've even done anything. Remove it * and see if anyone bitches. for (i = 0; i < 0x10; i++) { udelay(0xffff); } */ tmport = dev->ioport[0] + 1; outb(0x08, tmport++); outb(0x7f, tmport); tmport = dev->ioport[0] + 0x11; outb(0x20, tmport); if ((dev->scam_on & 0x40) == 0) { return; } m = 1; m <<= dev->host_id[0]; j = 16; if (dev->chip_ver < 4) { m |= 0xff00; j = 8; } assignid_map = m; tmport = dev->ioport[0] + 0x02; outb(0x02, tmport++); /* 2*2=4ms,3EH 2/32*3E=3.9ms */ outb(0, tmport++); outb(0, tmport++); outb(0, tmport++); outb(0, tmport++); outb(0, tmport++); outb(0, tmport++); for (i = 0; i < j; i++) { m = 1; m = m << i; if ((m & assignid_map) != 0) { continue; } tmport = dev->ioport[0] + 0x0f; outb(0, tmport++); tmport += 0x02; outb(0, tmport++); outb(0, tmport++); outb(0, tmport++); if (i > 7) { k = (i & 0x07) | 0x40; } else { k = i; } outb(k, tmport++); tmport = dev->ioport[0] + 0x1b; if (dev->chip_ver == 4) { outb(0x01, tmport); } else { outb(0x00, tmport); } wait_rdyok: tmport = dev->ioport[0] + 0x18; outb(0x09, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; k = inb(tmport); if (k != 0x16) { if ((k == 0x85) || (k == 0x42)) { continue; } tmport = dev->ioport[0] + 0x10; outb(0x41, tmport); goto wait_rdyok; } assignid_map |= m; } tmport = dev->ioport[0] + 0x02; outb(0x7f, tmport); tmport = dev->ioport[0] + 0x1b; outb(0x02, tmport); outb(0, 0x80); val = 0x0080; /* bsy */ tmport = dev->ioport[0] + 0x1c; outw(val, tmport); val |= 0x0040; /* sel */ outw(val, tmport); val |= 0x0004; /* msg */ outw(val, tmport); inb(0x80); /* 2 deskew delay(45ns*2=90ns) */ val &= 0x007f; /* no bsy */ outw(val, tmport); mdelay(128); val &= 0x00fb; /* after 1ms no msg */ outw(val, tmport); wait_nomsg: if ((inb(tmport) & 0x04) != 0) { goto wait_nomsg; } outb(1, 0x80); udelay(100); for (n = 0; n < 0x30000; n++) { if ((inb(tmport) & 0x80) != 0) { /* bsy ? */ goto wait_io; } } goto TCM_SYNC; wait_io: for (n = 0; n < 0x30000; n++) { if ((inb(tmport) & 0x81) == 0x0081) { goto wait_io1; } } goto TCM_SYNC; wait_io1: inb(0x80); val |= 0x8003; /* io,cd,db7 */ outw(val, tmport); inb(0x80); val &= 0x00bf; /* no sel */ outw(val, tmport); outb(2, 0x80); TCM_SYNC: udelay(0x800); if ((inb(tmport) & 0x80) == 0x00) { /* bsy ? */ outw(0, tmport--); outb(0, tmport); tmport = dev->ioport[0] + 0x15; outb(0, tmport); tmport += 0x03; outb(0x09, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) cpu_relax(); tmport -= 0x08; inb(tmport); return; } val &= 0x00ff; /* synchronization */ val |= 0x3f00; fun_scam(dev, &val); outb(3, 0x80); val &= 0x00ff; /* isolation */ val |= 0x2000; fun_scam(dev, &val); outb(4, 0x80); i = 8; j = 0; TCM_ID: if ((inw(tmport) & 0x2000) == 0) { goto TCM_ID; } outb(5, 0x80); val &= 0x00ff; /* get ID_STRING */ val |= 0x2000; k = fun_scam(dev, &val); if ((k & 0x03) == 0) { goto TCM_5; } mbuf[j] <<= 0x01; mbuf[j] &= 0xfe; if ((k & 0x02) != 0) { mbuf[j] |= 0x01; } i--; if (i > 0) { goto TCM_ID; } j++; i = 8; goto TCM_ID; TCM_5: /* isolation complete.. */ /* mbuf[32]=0; printk(" \n%x %x %x %s\n ",assignid_map,mbuf[0],mbuf[1],&mbuf[2]); */ i = 15; j = mbuf[0]; if ((j & 0x20) != 0) { /* bit5=1:ID up to 7 */ i = 7; } if ((j & 0x06) == 0) { /* IDvalid? */ goto G2Q5; } k = mbuf[1]; small_id: m = 1; m <<= k; if ((m & assignid_map) == 0) { goto G2Q_QUIN; } if (k > 0) { k--; goto small_id; } G2Q5: /* srch from max acceptable ID# */ k = i; /* max acceptable ID# */ G2Q_LP: m = 1; m <<= k; if ((m & assignid_map) == 0) { goto G2Q_QUIN; } if (k > 0) { k--; goto G2Q_LP; } G2Q_QUIN: /* k=binID#, */ assignid_map |= m; if (k < 8) { quintet[0] = 0x38; /* 1st dft ID<8 */ } else { quintet[0] = 0x31; /* 1st ID>=8 */ } k &= 0x07; quintet[1] = g2q_tab[k]; val &= 0x00ff; /* AssignID 1stQuintet,AH=001xxxxx */ m = quintet[0] << 8; val |= m; fun_scam(dev, &val); val &= 0x00ff; /* AssignID 2ndQuintet,AH=001xxxxx */ m = quintet[1] << 8; val |= m; fun_scam(dev, &val); goto TCM_SYNC; } static void is870(struct atp_unit *dev, unsigned int wkport) { unsigned int tmport; unsigned char i, j, k, rmb, n; unsigned short int m; static unsigned char mbuf[512]; static unsigned char satn[9] = { 0, 0, 0, 0, 0, 0, 0, 6, 6 }; static unsigned char inqd[9] = { 0x12, 0, 0, 0, 0x24, 0, 0, 0x24, 6 }; static unsigned char synn[6] = { 0x80, 1, 3, 1, 0x19, 0x0e }; static unsigned char synu[6] = { 0x80, 1, 3, 1, 0x0c, 0x0e }; static unsigned char synw[6] = { 0x80, 1, 3, 1, 0x0c, 0x07 }; static unsigned char wide[6] = { 0x80, 1, 2, 3, 1, 0 }; tmport = wkport + 0x3a; outb((unsigned char) (inb(tmport) | 0x10), tmport); for (i = 0; i < 16; i++) { if ((dev->chip_ver != 4) && (i > 7)) { break; } m = 1; m = m << i; if ((m & dev->active_id[0]) != 0) { continue; } if (i == dev->host_id[0]) { printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_id[0]); continue; } tmport = wkport + 0x1b; if (dev->chip_ver == 4) { outb(0x01, tmport); } else { outb(0x00, tmport); } tmport = wkport + 1; outb(0x08, tmport++); outb(0x7f, tmport++); outb(satn[0], tmport++); outb(satn[1], tmport++); outb(satn[2], tmport++); outb(satn[3], tmport++); outb(satn[4], tmport++); outb(satn[5], tmport++); tmport += 0x06; outb(0, tmport); tmport += 0x02; outb(dev->id[0][i].devsp, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); j = i; if ((j & 0x08) != 0) { j = (j & 0x07) | 0x40; } outb(j, tmport); tmport += 0x03; outb(satn[8], tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) continue; while (inb(tmport) != 0x8e) cpu_relax(); dev->active_id[0] |= m; tmport = wkport + 0x10; outb(0x30, tmport); tmport = wkport + 0x04; outb(0x00, tmport); phase_cmd: tmport = wkport + 0x18; outb(0x08, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; j = inb(tmport); if (j != 0x16) { tmport = wkport + 0x10; outb(0x41, tmport); goto phase_cmd; } sel_ok: tmport = wkport + 3; outb(inqd[0], tmport++); outb(inqd[1], tmport++); outb(inqd[2], tmport++); outb(inqd[3], tmport++); outb(inqd[4], tmport++); outb(inqd[5], tmport); tmport += 0x07; outb(0, tmport); tmport += 0x02; outb(dev->id[0][i].devsp, tmport++); outb(0, tmport++); outb(inqd[6], tmport++); outb(inqd[7], tmport++); tmport += 0x03; outb(inqd[8], tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) continue; while (inb(tmport) != 0x8e) cpu_relax(); tmport = wkport + 0x1b; if (dev->chip_ver == 4) outb(0x00, tmport); tmport = wkport + 0x18; outb(0x08, tmport); tmport += 0x07; j = 0; rd_inq_data: k = inb(tmport); if ((k & 0x01) != 0) { tmport -= 0x06; mbuf[j++] = inb(tmport); tmport += 0x06; goto rd_inq_data; } if ((k & 0x80) == 0) { goto rd_inq_data; } tmport -= 0x08; j = inb(tmport); if (j == 0x16) { goto inq_ok; } tmport = wkport + 0x10; outb(0x46, tmport); tmport += 0x02; outb(0, tmport++); outb(0, tmport++); outb(0, tmport++); tmport += 0x03; outb(0x08, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if (inb(tmport) != 0x16) { goto sel_ok; } inq_ok: mbuf[36] = 0; printk(KERN_INFO " ID: %2d %s\n", i, &mbuf[8]); dev->id[0][i].devtype = mbuf[0]; rmb = mbuf[1]; n = mbuf[7]; if (dev->chip_ver != 4) { goto not_wide; } if ((mbuf[7] & 0x60) == 0) { goto not_wide; } if ((dev->global_map[0] & 0x20) == 0) { goto not_wide; } tmport = wkport + 0x1b; outb(0x01, tmport); tmport = wkport + 3; outb(satn[0], tmport++); outb(satn[1], tmport++); outb(satn[2], tmport++); outb(satn[3], tmport++); outb(satn[4], tmport++); outb(satn[5], tmport++); tmport += 0x06; outb(0, tmport); tmport += 0x02; outb(dev->id[0][i].devsp, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); tmport += 0x03; outb(satn[8], tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) continue; while (inb(tmport) != 0x8e) cpu_relax(); try_wide: j = 0; tmport = wkport + 0x14; outb(0x05, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) { if ((inb(tmport) & 0x01) != 0) { tmport -= 0x06; outb(wide[j++], tmport); tmport += 0x06; } } tmport -= 0x08; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); j = inb(tmport) & 0x0f; if (j == 0x0f) { goto widep_in; } if (j == 0x0a) { goto widep_cmd; } if (j == 0x0e) { goto try_wide; } continue; widep_out: tmport = wkport + 0x18; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) { if ((inb(tmport) & 0x01) != 0) { tmport -= 0x06; outb(0, tmport); tmport += 0x06; } } tmport -= 0x08; j = inb(tmport) & 0x0f; if (j == 0x0f) { goto widep_in; } if (j == 0x0a) { goto widep_cmd; } if (j == 0x0e) { goto widep_out; } continue; widep_in: tmport = wkport + 0x14; outb(0xff, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; k = 0; widep_in1: j = inb(tmport); if ((j & 0x01) != 0) { tmport -= 0x06; mbuf[k++] = inb(tmport); tmport += 0x06; goto widep_in1; } if ((j & 0x80) == 0x00) { goto widep_in1; } tmport -= 0x08; j = inb(tmport) & 0x0f; if (j == 0x0f) { goto widep_in; } if (j == 0x0a) { goto widep_cmd; } if (j == 0x0e) { goto widep_out; } continue; widep_cmd: tmport = wkport + 0x10; outb(0x30, tmport); tmport = wkport + 0x14; outb(0x00, tmport); tmport += 0x04; outb(0x08, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; j = inb(tmport); if (j != 0x16) { if (j == 0x4e) { goto widep_out; } continue; } if (mbuf[0] != 0x01) { goto not_wide; } if (mbuf[1] != 0x02) { goto not_wide; } if (mbuf[2] != 0x03) { goto not_wide; } if (mbuf[3] != 0x01) { goto not_wide; } m = 1; m = m << i; dev->wide_id[0] |= m; not_wide: if ((dev->id[0][i].devtype == 0x00) || (dev->id[0][i].devtype == 0x07) || ((dev->id[0][i].devtype == 0x05) && ((n & 0x10) != 0))) { goto set_sync; } continue; set_sync: tmport = wkport + 0x1b; j = 0; if ((m & dev->wide_id[0]) != 0) { j |= 0x01; } outb(j, tmport); tmport = wkport + 3; outb(satn[0], tmport++); outb(satn[1], tmport++); outb(satn[2], tmport++); outb(satn[3], tmport++); outb(satn[4], tmport++); outb(satn[5], tmport++); tmport += 0x06; outb(0, tmport); tmport += 0x02; outb(dev->id[0][i].devsp, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); tmport += 0x03; outb(satn[8], tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) continue; while (inb(tmport) != 0x8e) cpu_relax(); try_sync: j = 0; tmport = wkport + 0x14; outb(0x06, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) { if ((inb(tmport) & 0x01) != 0) { tmport -= 0x06; if ((m & dev->wide_id[0]) != 0) { outb(synw[j++], tmport); } else { if ((m & dev->ultra_map[0]) != 0) { outb(synu[j++], tmport); } else { outb(synn[j++], tmport); } } tmport += 0x06; } } tmport -= 0x08; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); j = inb(tmport) & 0x0f; if (j == 0x0f) { goto phase_ins; } if (j == 0x0a) { goto phase_cmds; } if (j == 0x0e) { goto try_sync; } continue; phase_outs: tmport = wkport + 0x18; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) { if ((inb(tmport) & 0x01) != 0x00) { tmport -= 0x06; outb(0x00, tmport); tmport += 0x06; } } tmport -= 0x08; j = inb(tmport); if (j == 0x85) { goto tar_dcons; } j &= 0x0f; if (j == 0x0f) { goto phase_ins; } if (j == 0x0a) { goto phase_cmds; } if (j == 0x0e) { goto phase_outs; } continue; phase_ins: tmport = wkport + 0x14; outb(0xff, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; k = 0; phase_ins1: j = inb(tmport); if ((j & 0x01) != 0x00) { tmport -= 0x06; mbuf[k++] = inb(tmport); tmport += 0x06; goto phase_ins1; } if ((j & 0x80) == 0x00) { goto phase_ins1; } tmport -= 0x08; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); j = inb(tmport); if (j == 0x85) { goto tar_dcons; } j &= 0x0f; if (j == 0x0f) { goto phase_ins; } if (j == 0x0a) { goto phase_cmds; } if (j == 0x0e) { goto phase_outs; } continue; phase_cmds: tmport = wkport + 0x10; outb(0x30, tmport); tar_dcons: tmport = wkport + 0x14; outb(0x00, tmport); tmport += 0x04; outb(0x08, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; j = inb(tmport); if (j != 0x16) { continue; } if (mbuf[0] != 0x01) { continue; } if (mbuf[1] != 0x03) { continue; } if (mbuf[4] == 0x00) { continue; } if (mbuf[3] > 0x64) { continue; } if (mbuf[4] > 0x0c) { mbuf[4] = 0x0c; } dev->id[0][i].devsp = mbuf[4]; if ((mbuf[3] < 0x0d) && (rmb == 0)) { j = 0xa0; goto set_syn_ok; } if (mbuf[3] < 0x1a) { j = 0x20; goto set_syn_ok; } if (mbuf[3] < 0x33) { j = 0x40; goto set_syn_ok; } if (mbuf[3] < 0x4c) { j = 0x50; goto set_syn_ok; } j = 0x60; set_syn_ok: dev->id[0][i].devsp = (dev->id[0][i].devsp & 0x0f) | j; } tmport = wkport + 0x3a; outb((unsigned char) (inb(tmport) & 0xef), tmport); } static void is880(struct atp_unit *dev, unsigned int wkport) { unsigned int tmport; unsigned char i, j, k, rmb, n, lvdmode; unsigned short int m; static unsigned char mbuf[512]; static unsigned char satn[9] = { 0, 0, 0, 0, 0, 0, 0, 6, 6 }; static unsigned char inqd[9] = { 0x12, 0, 0, 0, 0x24, 0, 0, 0x24, 6 }; static unsigned char synn[6] = { 0x80, 1, 3, 1, 0x19, 0x0e }; unsigned char synu[6] = { 0x80, 1, 3, 1, 0x0a, 0x0e }; static unsigned char synw[6] = { 0x80, 1, 3, 1, 0x19, 0x0e }; unsigned char synuw[6] = { 0x80, 1, 3, 1, 0x0a, 0x0e }; static unsigned char wide[6] = { 0x80, 1, 2, 3, 1, 0 }; static unsigned char u3[9] = { 0x80, 1, 6, 4, 0x09, 00, 0x0e, 0x01, 0x02 }; lvdmode = inb(wkport + 0x3f) & 0x40; for (i = 0; i < 16; i++) { m = 1; m = m << i; if ((m & dev->active_id[0]) != 0) { continue; } if (i == dev->host_id[0]) { printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_id[0]); continue; } tmport = wkport + 0x5b; outb(0x01, tmport); tmport = wkport + 0x41; outb(0x08, tmport++); outb(0x7f, tmport++); outb(satn[0], tmport++); outb(satn[1], tmport++); outb(satn[2], tmport++); outb(satn[3], tmport++); outb(satn[4], tmport++); outb(satn[5], tmport++); tmport += 0x06; outb(0, tmport); tmport += 0x02; outb(dev->id[0][i].devsp, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); j = i; if ((j & 0x08) != 0) { j = (j & 0x07) | 0x40; } outb(j, tmport); tmport += 0x03; outb(satn[8], tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) continue; while (inb(tmport) != 0x8e) cpu_relax(); dev->active_id[0] |= m; tmport = wkport + 0x50; outb(0x30, tmport); tmport = wkport + 0x54; outb(0x00, tmport); phase_cmd: tmport = wkport + 0x58; outb(0x08, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; j = inb(tmport); if (j != 0x16) { tmport = wkport + 0x50; outb(0x41, tmport); goto phase_cmd; } sel_ok: tmport = wkport + 0x43; outb(inqd[0], tmport++); outb(inqd[1], tmport++); outb(inqd[2], tmport++); outb(inqd[3], tmport++); outb(inqd[4], tmport++); outb(inqd[5], tmport); tmport += 0x07; outb(0, tmport); tmport += 0x02; outb(dev->id[0][i].devsp, tmport++); outb(0, tmport++); outb(inqd[6], tmport++); outb(inqd[7], tmport++); tmport += 0x03; outb(inqd[8], tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) continue; while (inb(tmport) != 0x8e) cpu_relax(); tmport = wkport + 0x5b; outb(0x00, tmport); tmport = wkport + 0x58; outb(0x08, tmport); tmport += 0x07; j = 0; rd_inq_data: k = inb(tmport); if ((k & 0x01) != 0) { tmport -= 0x06; mbuf[j++] = inb(tmport); tmport += 0x06; goto rd_inq_data; } if ((k & 0x80) == 0) { goto rd_inq_data; } tmport -= 0x08; j = inb(tmport); if (j == 0x16) { goto inq_ok; } tmport = wkport + 0x50; outb(0x46, tmport); tmport += 0x02; outb(0, tmport++); outb(0, tmport++); outb(0, tmport++); tmport += 0x03; outb(0x08, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if (inb(tmport) != 0x16) goto sel_ok; inq_ok: mbuf[36] = 0; printk(KERN_INFO " ID: %2d %s\n", i, &mbuf[8]); dev->id[0][i].devtype = mbuf[0]; rmb = mbuf[1]; n = mbuf[7]; if ((mbuf[7] & 0x60) == 0) { goto not_wide; } if ((i < 8) && ((dev->global_map[0] & 0x20) == 0)) { goto not_wide; } if (lvdmode == 0) { goto chg_wide; } if (dev->sp[0][i] != 0x04) // force u2 { goto chg_wide; } tmport = wkport + 0x5b; outb(0x01, tmport); tmport = wkport + 0x43; outb(satn[0], tmport++); outb(satn[1], tmport++); outb(satn[2], tmport++); outb(satn[3], tmport++); outb(satn[4], tmport++); outb(satn[5], tmport++); tmport += 0x06; outb(0, tmport); tmport += 0x02; outb(dev->id[0][i].devsp, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); tmport += 0x03; outb(satn[8], tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) continue; while (inb(tmport) != 0x8e) cpu_relax(); try_u3: j = 0; tmport = wkport + 0x54; outb(0x09, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) { if ((inb(tmport) & 0x01) != 0) { tmport -= 0x06; outb(u3[j++], tmport); tmport += 0x06; } } tmport -= 0x08; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); j = inb(tmport) & 0x0f; if (j == 0x0f) { goto u3p_in; } if (j == 0x0a) { goto u3p_cmd; } if (j == 0x0e) { goto try_u3; } continue; u3p_out: tmport = wkport + 0x58; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) { if ((inb(tmport) & 0x01) != 0) { tmport -= 0x06; outb(0, tmport); tmport += 0x06; } } tmport -= 0x08; j = inb(tmport) & 0x0f; if (j == 0x0f) { goto u3p_in; } if (j == 0x0a) { goto u3p_cmd; } if (j == 0x0e) { goto u3p_out; } continue; u3p_in: tmport = wkport + 0x54; outb(0x09, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; k = 0; u3p_in1: j = inb(tmport); if ((j & 0x01) != 0) { tmport -= 0x06; mbuf[k++] = inb(tmport); tmport += 0x06; goto u3p_in1; } if ((j & 0x80) == 0x00) { goto u3p_in1; } tmport -= 0x08; j = inb(tmport) & 0x0f; if (j == 0x0f) { goto u3p_in; } if (j == 0x0a) { goto u3p_cmd; } if (j == 0x0e) { goto u3p_out; } continue; u3p_cmd: tmport = wkport + 0x50; outb(0x30, tmport); tmport = wkport + 0x54; outb(0x00, tmport); tmport += 0x04; outb(0x08, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; j = inb(tmport); if (j != 0x16) { if (j == 0x4e) { goto u3p_out; } continue; } if (mbuf[0] != 0x01) { goto chg_wide; } if (mbuf[1] != 0x06) { goto chg_wide; } if (mbuf[2] != 0x04) { goto chg_wide; } if (mbuf[3] == 0x09) { m = 1; m = m << i; dev->wide_id[0] |= m; dev->id[0][i].devsp = 0xce; continue; } chg_wide: tmport = wkport + 0x5b; outb(0x01, tmport); tmport = wkport + 0x43; outb(satn[0], tmport++); outb(satn[1], tmport++); outb(satn[2], tmport++); outb(satn[3], tmport++); outb(satn[4], tmport++); outb(satn[5], tmport++); tmport += 0x06; outb(0, tmport); tmport += 0x02; outb(dev->id[0][i].devsp, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); tmport += 0x03; outb(satn[8], tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) continue; while (inb(tmport) != 0x8e) cpu_relax(); try_wide: j = 0; tmport = wkport + 0x54; outb(0x05, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) { if ((inb(tmport) & 0x01) != 0) { tmport -= 0x06; outb(wide[j++], tmport); tmport += 0x06; } } tmport -= 0x08; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); j = inb(tmport) & 0x0f; if (j == 0x0f) { goto widep_in; } if (j == 0x0a) { goto widep_cmd; } if (j == 0x0e) { goto try_wide; } continue; widep_out: tmport = wkport + 0x58; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) { if ((inb(tmport) & 0x01) != 0) { tmport -= 0x06; outb(0, tmport); tmport += 0x06; } } tmport -= 0x08; j = inb(tmport) & 0x0f; if (j == 0x0f) { goto widep_in; } if (j == 0x0a) { goto widep_cmd; } if (j == 0x0e) { goto widep_out; } continue; widep_in: tmport = wkport + 0x54; outb(0xff, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; k = 0; widep_in1: j = inb(tmport); if ((j & 0x01) != 0) { tmport -= 0x06; mbuf[k++] = inb(tmport); tmport += 0x06; goto widep_in1; } if ((j & 0x80) == 0x00) { goto widep_in1; } tmport -= 0x08; j = inb(tmport) & 0x0f; if (j == 0x0f) { goto widep_in; } if (j == 0x0a) { goto widep_cmd; } if (j == 0x0e) { goto widep_out; } continue; widep_cmd: tmport = wkport + 0x50; outb(0x30, tmport); tmport = wkport + 0x54; outb(0x00, tmport); tmport += 0x04; outb(0x08, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; j = inb(tmport); if (j != 0x16) { if (j == 0x4e) { goto widep_out; } continue; } if (mbuf[0] != 0x01) { goto not_wide; } if (mbuf[1] != 0x02) { goto not_wide; } if (mbuf[2] != 0x03) { goto not_wide; } if (mbuf[3] != 0x01) { goto not_wide; } m = 1; m = m << i; dev->wide_id[0] |= m; not_wide: if ((dev->id[0][i].devtype == 0x00) || (dev->id[0][i].devtype == 0x07) || ((dev->id[0][i].devtype == 0x05) && ((n & 0x10) != 0))) { m = 1; m = m << i; if ((dev->async[0] & m) != 0) { goto set_sync; } } continue; set_sync: if (dev->sp[0][i] == 0x02) { synu[4] = 0x0c; synuw[4] = 0x0c; } else { if (dev->sp[0][i] >= 0x03) { synu[4] = 0x0a; synuw[4] = 0x0a; } } tmport = wkport + 0x5b; j = 0; if ((m & dev->wide_id[0]) != 0) { j |= 0x01; } outb(j, tmport); tmport = wkport + 0x43; outb(satn[0], tmport++); outb(satn[1], tmport++); outb(satn[2], tmport++); outb(satn[3], tmport++); outb(satn[4], tmport++); outb(satn[5], tmport++); tmport += 0x06; outb(0, tmport); tmport += 0x02; outb(dev->id[0][i].devsp, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); tmport += 0x03; outb(satn[8], tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) { continue; } while (inb(tmport) != 0x8e) cpu_relax(); try_sync: j = 0; tmport = wkport + 0x54; outb(0x06, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) { if ((inb(tmport) & 0x01) != 0) { tmport -= 0x06; if ((m & dev->wide_id[0]) != 0) { if ((m & dev->ultra_map[0]) != 0) { outb(synuw[j++], tmport); } else { outb(synw[j++], tmport); } } else { if ((m & dev->ultra_map[0]) != 0) { outb(synu[j++], tmport); } else { outb(synn[j++], tmport); } } tmport += 0x06; } } tmport -= 0x08; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); j = inb(tmport) & 0x0f; if (j == 0x0f) { goto phase_ins; } if (j == 0x0a) { goto phase_cmds; } if (j == 0x0e) { goto try_sync; } continue; phase_outs: tmport = wkport + 0x58; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) { if ((inb(tmport) & 0x01) != 0x00) { tmport -= 0x06; outb(0x00, tmport); tmport += 0x06; } } tmport -= 0x08; j = inb(tmport); if (j == 0x85) { goto tar_dcons; } j &= 0x0f; if (j == 0x0f) { goto phase_ins; } if (j == 0x0a) { goto phase_cmds; } if (j == 0x0e) { goto phase_outs; } continue; phase_ins: tmport = wkport + 0x54; outb(0x06, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; k = 0; phase_ins1: j = inb(tmport); if ((j & 0x01) != 0x00) { tmport -= 0x06; mbuf[k++] = inb(tmport); tmport += 0x06; goto phase_ins1; } if ((j & 0x80) == 0x00) { goto phase_ins1; } tmport -= 0x08; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); j = inb(tmport); if (j == 0x85) { goto tar_dcons; } j &= 0x0f; if (j == 0x0f) { goto phase_ins; } if (j == 0x0a) { goto phase_cmds; } if (j == 0x0e) { goto phase_outs; } continue; phase_cmds: tmport = wkport + 0x50; outb(0x30, tmport); tar_dcons: tmport = wkport + 0x54; outb(0x00, tmport); tmport += 0x04; outb(0x08, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; j = inb(tmport); if (j != 0x16) { continue; } if (mbuf[0] != 0x01) { continue; } if (mbuf[1] != 0x03) { continue; } if (mbuf[4] == 0x00) { continue; } if (mbuf[3] > 0x64) { continue; } if (mbuf[4] > 0x0e) { mbuf[4] = 0x0e; } dev->id[0][i].devsp = mbuf[4]; if (mbuf[3] < 0x0c) { j = 0xb0; goto set_syn_ok; } if ((mbuf[3] < 0x0d) && (rmb == 0)) { j = 0xa0; goto set_syn_ok; } if (mbuf[3] < 0x1a) { j = 0x20; goto set_syn_ok; } if (mbuf[3] < 0x33) { j = 0x40; goto set_syn_ok; } if (mbuf[3] < 0x4c) { j = 0x50; goto set_syn_ok; } j = 0x60; set_syn_ok: dev->id[0][i].devsp = (dev->id[0][i].devsp & 0x0f) | j; } } static void atp870u_free_tables(struct Scsi_Host *host) { struct atp_unit *atp_dev = (struct atp_unit *)&host->hostdata; int j, k; for (j=0; j < 2; j++) { for (k = 0; k < 16; k++) { if (!atp_dev->id[j][k].prd_table) continue; pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus); atp_dev->id[j][k].prd_table = NULL; } } } static int atp870u_init_tables(struct Scsi_Host *host) { struct atp_unit *atp_dev = (struct atp_unit *)&host->hostdata; int c,k; for(c=0;c < 2;c++) { for(k=0;k<16;k++) { atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prd_bus)); if (!atp_dev->id[c][k].prd_table) { printk("atp870u_init_tables fail\n"); atp870u_free_tables(host); return -ENOMEM; } atp_dev->id[c][k].prdaddr = atp_dev->id[c][k].prd_bus; atp_dev->id[c][k].devsp=0x20; atp_dev->id[c][k].devtype = 0x7f; atp_dev->id[c][k].curr_req = NULL; } atp_dev->active_id[c] = 0; atp_dev->wide_id[c] = 0; atp_dev->host_id[c] = 0x07; atp_dev->quhd[c] = 0; atp_dev->quend[c] = 0; atp_dev->last_cmd[c] = 0xff; atp_dev->in_snd[c] = 0; atp_dev->in_int[c] = 0; for (k = 0; k < qcnt; k++) { atp_dev->quereq[c][k] = NULL; } for (k = 0; k < 16; k++) { atp_dev->id[c][k].curr_req = NULL; atp_dev->sp[c][k] = 0x04; } } return 0; } /* return non-zero on detection */ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned char k, m, c; unsigned long flags; unsigned int base_io, tmport, error,n; unsigned char host_id; struct Scsi_Host *shpnt = NULL; struct atp_unit *atpdev, *p; unsigned char setupdata[2][16]; int count = 0; atpdev = kzalloc(sizeof(*atpdev), GFP_KERNEL); if (!atpdev) return -ENOMEM; if (pci_enable_device(pdev)) goto err_eio; if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_INFO "atp870u: use 32bit DMA mask.\n"); } else { printk(KERN_ERR "atp870u: DMA mask required but not available.\n"); goto err_eio; } /* * It's probably easier to weed out some revisions like * this than via the PCI device table */ if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) { atpdev->chip_ver = pdev->revision; if (atpdev->chip_ver < 2) goto err_eio; } switch (ent->device) { case PCI_DEVICE_ID_ARTOP_AEC7612UW: case PCI_DEVICE_ID_ARTOP_AEC7612SUW: case ATP880_DEVID1: case ATP880_DEVID2: case ATP885_DEVID: atpdev->chip_ver = 0x04; default: break; } base_io = pci_resource_start(pdev, 0); base_io &= 0xfffffff8; if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) { atpdev->chip_ver = pdev->revision; pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803 host_id = inb(base_io + 0x39); host_id >>= 0x04; printk(KERN_INFO " ACARD AEC-67160 PCI Ultra3 LVD Host Adapter: %d" " IO:%x, IRQ:%d.\n", count, base_io, pdev->irq); atpdev->ioport[0] = base_io + 0x40; atpdev->pciport[0] = base_io + 0x28; atpdev->dev_id = ent->device; atpdev->host_id[0] = host_id; tmport = base_io + 0x22; atpdev->scam_on = inb(tmport); tmport += 0x13; atpdev->global_map[0] = inb(tmport); tmport += 0x07; atpdev->ultra_map[0] = inw(tmport); n = 0x3f09; next_fblk_880: if (n >= 0x4000) goto flash_ok_880; m = 0; outw(n, base_io + 0x34); n += 0x0002; if (inb(base_io + 0x30) == 0xff) goto flash_ok_880; atpdev->sp[0][m++] = inb(base_io + 0x30); atpdev->sp[0][m++] = inb(base_io + 0x31); atpdev->sp[0][m++] = inb(base_io + 0x32); atpdev->sp[0][m++] = inb(base_io + 0x33); outw(n, base_io + 0x34); n += 0x0002; atpdev->sp[0][m++] = inb(base_io + 0x30); atpdev->sp[0][m++] = inb(base_io + 0x31); atpdev->sp[0][m++] = inb(base_io + 0x32); atpdev->sp[0][m++] = inb(base_io + 0x33); outw(n, base_io + 0x34); n += 0x0002; atpdev->sp[0][m++] = inb(base_io + 0x30); atpdev->sp[0][m++] = inb(base_io + 0x31); atpdev->sp[0][m++] = inb(base_io + 0x32); atpdev->sp[0][m++] = inb(base_io + 0x33); outw(n, base_io + 0x34); n += 0x0002; atpdev->sp[0][m++] = inb(base_io + 0x30); atpdev->sp[0][m++] = inb(base_io + 0x31); atpdev->sp[0][m++] = inb(base_io + 0x32); atpdev->sp[0][m++] = inb(base_io + 0x33); n += 0x0018; goto next_fblk_880; flash_ok_880: outw(0, base_io + 0x34); atpdev->ultra_map[0] = 0; atpdev->async[0] = 0; for (k = 0; k < 16; k++) { n = 1; n = n << k; if (atpdev->sp[0][k] > 1) { atpdev->ultra_map[0] |= n; } else { if (atpdev->sp[0][k] == 0) atpdev->async[0] |= n; } } atpdev->async[0] = ~(atpdev->async[0]); outb(atpdev->global_map[0], base_io + 0x35); shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); if (!shpnt) goto err_nomem; p = (struct atp_unit *)&shpnt->hostdata; atpdev->host = shpnt; atpdev->pdev = pdev; pci_set_drvdata(pdev, p); memcpy(p, atpdev, sizeof(*atpdev)); if (atp870u_init_tables(shpnt) < 0) { printk(KERN_ERR "Unable to allocate tables for Acard controller\n"); goto unregister; } if (request_irq(pdev->irq, atp870u_intr_handle, IRQF_SHARED, "atp880i", shpnt)) { printk(KERN_ERR "Unable to allocate IRQ%d for Acard controller.\n", pdev->irq); goto free_tables; } spin_lock_irqsave(shpnt->host_lock, flags); tmport = base_io + 0x38; k = inb(tmport) & 0x80; outb(k, tmport); tmport += 0x03; outb(0x20, tmport); mdelay(32); outb(0, tmport); mdelay(32); tmport = base_io + 0x5b; inb(tmport); tmport -= 0x04; inb(tmport); tmport = base_io + 0x40; outb((host_id | 0x08), tmport); tmport += 0x18; outb(0, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) mdelay(1); tmport -= 0x08; inb(tmport); tmport = base_io + 0x41; outb(8, tmport++); outb(0x7f, tmport); tmport = base_io + 0x51; outb(0x20, tmport); tscam(shpnt); is880(p, base_io); tmport = base_io + 0x38; outb(0xb0, tmport); shpnt->max_id = 16; shpnt->this_id = host_id; shpnt->unique_id = base_io; shpnt->io_port = base_io; shpnt->n_io_port = 0x60; /* Number of bytes of I/O space used */ shpnt->irq = pdev->irq; } else if (ent->device == ATP885_DEVID) { printk(KERN_INFO " ACARD AEC-67162 PCI Ultra3 LVD Host Adapter: IO:%x, IRQ:%d.\n" , base_io, pdev->irq); atpdev->pdev = pdev; atpdev->dev_id = ent->device; atpdev->baseport = base_io; atpdev->ioport[0] = base_io + 0x80; atpdev->ioport[1] = base_io + 0xc0; atpdev->pciport[0] = base_io + 0x40; atpdev->pciport[1] = base_io + 0x50; shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); if (!shpnt) goto err_nomem; p = (struct atp_unit *)&shpnt->hostdata; atpdev->host = shpnt; atpdev->pdev = pdev; pci_set_drvdata(pdev, p); memcpy(p, atpdev, sizeof(struct atp_unit)); if (atp870u_init_tables(shpnt) < 0) goto unregister; #ifdef ED_DBGP printk("request_irq() shpnt %p hostdata %p\n", shpnt, p); #endif if (request_irq(pdev->irq, atp870u_intr_handle, IRQF_SHARED, "atp870u", shpnt)) { printk(KERN_ERR "Unable to allocate IRQ for Acard controller.\n"); goto free_tables; } spin_lock_irqsave(shpnt->host_lock, flags); c=inb(base_io + 0x29); outb((c | 0x04),base_io + 0x29); n=0x1f80; next_fblk_885: if (n >= 0x2000) { goto flash_ok_885; } outw(n,base_io + 0x3c); if (inl(base_io + 0x38) == 0xffffffff) { goto flash_ok_885; } for (m=0; m < 2; m++) { p->global_map[m]= 0; for (k=0; k < 4; k++) { outw(n++,base_io + 0x3c); ((unsigned long *)&setupdata[m][0])[k]=inl(base_io + 0x38); } for (k=0; k < 4; k++) { outw(n++,base_io + 0x3c); ((unsigned long *)&p->sp[m][0])[k]=inl(base_io + 0x38); } n += 8; } goto next_fblk_885; flash_ok_885: #ifdef ED_DBGP printk( "Flash Read OK\n"); #endif c=inb(base_io + 0x29); outb((c & 0xfb),base_io + 0x29); for (c=0;c < 2;c++) { p->ultra_map[c]=0; p->async[c] = 0; for (k=0; k < 16; k++) { n=1; n = n << k; if (p->sp[c][k] > 1) { p->ultra_map[c] |= n; } else { if (p->sp[c][k] == 0) { p->async[c] |= n; } } } p->async[c] = ~(p->async[c]); if (p->global_map[c] == 0) { k=setupdata[c][1]; if ((k & 0x40) != 0) p->global_map[c] |= 0x20; k &= 0x07; p->global_map[c] |= k; if ((setupdata[c][2] & 0x04) != 0) p->global_map[c] |= 0x08; p->host_id[c] = setupdata[c][0] & 0x07; } } k = inb(base_io + 0x28) & 0x8f; k |= 0x10; outb(k, base_io + 0x28); outb(0x80, base_io + 0x41); outb(0x80, base_io + 0x51); mdelay(100); outb(0, base_io + 0x41); outb(0, base_io + 0x51); mdelay(1000); inb(base_io + 0x9b); inb(base_io + 0x97); inb(base_io + 0xdb); inb(base_io + 0xd7); tmport = base_io + 0x80; k=p->host_id[0]; if (k > 7) k = (k & 0x07) | 0x40; k |= 0x08; outb(k, tmport); tmport += 0x18; outb(0, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) cpu_relax(); tmport -= 0x08; inb(tmport); tmport = base_io + 0x81; outb(8, tmport++); outb(0x7f, tmport); tmport = base_io + 0x91; outb(0x20, tmport); tmport = base_io + 0xc0; k=p->host_id[1]; if (k > 7) k = (k & 0x07) | 0x40; k |= 0x08; outb(k, tmport); tmport += 0x18; outb(0, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) cpu_relax(); tmport -= 0x08; inb(tmport); tmport = base_io + 0xc1; outb(8, tmport++); outb(0x7f, tmport); tmport = base_io + 0xd1; outb(0x20, tmport); tscam_885(); printk(KERN_INFO " Scanning Channel A SCSI Device ...\n"); is885(p, base_io + 0x80, 0); printk(KERN_INFO " Scanning Channel B SCSI Device ...\n"); is885(p, base_io + 0xc0, 1); k = inb(base_io + 0x28) & 0xcf; k |= 0xc0; outb(k, base_io + 0x28); k = inb(base_io + 0x1f) | 0x80; outb(k, base_io + 0x1f); k = inb(base_io + 0x29) | 0x01; outb(k, base_io + 0x29); #ifdef ED_DBGP //printk("atp885: atp_host[0] 0x%p\n", atp_host[0]); #endif shpnt->max_id = 16; shpnt->max_lun = (p->global_map[0] & 0x07) + 1; shpnt->max_channel = 1; shpnt->this_id = p->host_id[0]; shpnt->unique_id = base_io; shpnt->io_port = base_io; shpnt->n_io_port = 0xff; /* Number of bytes of I/O space used */ shpnt->irq = pdev->irq; } else { error = pci_read_config_byte(pdev, 0x49, &host_id); printk(KERN_INFO " ACARD AEC-671X PCI Ultra/W SCSI-2/3 Host Adapter: %d " "IO:%x, IRQ:%d.\n", count, base_io, pdev->irq); atpdev->ioport[0] = base_io; atpdev->pciport[0] = base_io + 0x20; atpdev->dev_id = ent->device; host_id &= 0x07; atpdev->host_id[0] = host_id; tmport = base_io + 0x22; atpdev->scam_on = inb(tmport); tmport += 0x0b; atpdev->global_map[0] = inb(tmport++); atpdev->ultra_map[0] = inw(tmport); if (atpdev->ultra_map[0] == 0) { atpdev->scam_on = 0x00; atpdev->global_map[0] = 0x20; atpdev->ultra_map[0] = 0xffff; } shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); if (!shpnt) goto err_nomem; p = (struct atp_unit *)&shpnt->hostdata; atpdev->host = shpnt; atpdev->pdev = pdev; pci_set_drvdata(pdev, p); memcpy(p, atpdev, sizeof(*atpdev)); if (atp870u_init_tables(shpnt) < 0) goto unregister; if (request_irq(pdev->irq, atp870u_intr_handle, IRQF_SHARED, "atp870i", shpnt)) { printk(KERN_ERR "Unable to allocate IRQ%d for Acard controller.\n", pdev->irq); goto free_tables; } spin_lock_irqsave(shpnt->host_lock, flags); if (atpdev->chip_ver > 0x07) { /* check if atp876 chip then enable terminator */ tmport = base_io + 0x3e; outb(0x00, tmport); } tmport = base_io + 0x3a; k = (inb(tmport) & 0xf3) | 0x10; outb(k, tmport); outb((k & 0xdf), tmport); mdelay(32); outb(k, tmport); mdelay(32); tmport = base_io; outb((host_id | 0x08), tmport); tmport += 0x18; outb(0, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) mdelay(1); tmport -= 0x08; inb(tmport); tmport = base_io + 1; outb(8, tmport++); outb(0x7f, tmport); tmport = base_io + 0x11; outb(0x20, tmport); tscam(shpnt); is870(p, base_io); tmport = base_io + 0x3a; outb((inb(tmport) & 0xef), tmport); tmport++; outb((inb(tmport) | 0x20), tmport); if (atpdev->chip_ver == 4) shpnt->max_id = 16; else shpnt->max_id = 8; shpnt->this_id = host_id; shpnt->unique_id = base_io; shpnt->io_port = base_io; shpnt->n_io_port = 0x40; /* Number of bytes of I/O space used */ shpnt->irq = pdev->irq; } spin_unlock_irqrestore(shpnt->host_lock, flags); if(ent->device==ATP885_DEVID) { if(!request_region(base_io, 0xff, "atp870u")) /* Register the IO ports that we use */ goto request_io_fail; } else if((ent->device==ATP880_DEVID1)||(ent->device==ATP880_DEVID2)) { if(!request_region(base_io, 0x60, "atp870u")) /* Register the IO ports that we use */ goto request_io_fail; } else { if(!request_region(base_io, 0x40, "atp870u")) /* Register the IO ports that we use */ goto request_io_fail; } count++; if (scsi_add_host(shpnt, &pdev->dev)) goto scsi_add_fail; scsi_scan_host(shpnt); #ifdef ED_DBGP printk("atp870u_prob : exit\n"); #endif return 0; scsi_add_fail: printk("atp870u_prob:scsi_add_fail\n"); if(ent->device==ATP885_DEVID) { release_region(base_io, 0xff); } else if((ent->device==ATP880_DEVID1)||(ent->device==ATP880_DEVID2)) { release_region(base_io, 0x60); } else { release_region(base_io, 0x40); } request_io_fail: printk("atp870u_prob:request_io_fail\n"); free_irq(pdev->irq, shpnt); free_tables: printk("atp870u_prob:free_table\n"); atp870u_free_tables(shpnt); unregister: printk("atp870u_prob:unregister\n"); scsi_host_put(shpnt); return -1; err_eio: kfree(atpdev); return -EIO; err_nomem: kfree(atpdev); return -ENOMEM; } /* The abort command does not leave the device in a clean state where it is available to be used again. Until this gets worked out, we will leave it commented out. */ static int atp870u_abort(struct scsi_cmnd * SCpnt) { unsigned char j, k, c; struct scsi_cmnd *workrequ; unsigned int tmport; struct atp_unit *dev; struct Scsi_Host *host; host = SCpnt->device->host; dev = (struct atp_unit *)&host->hostdata; c = scmd_channel(SCpnt); printk(" atp870u: abort Channel = %x \n", c); printk("working=%x last_cmd=%x ", dev->working[c], dev->last_cmd[c]); printk(" quhdu=%x quendu=%x ", dev->quhd[c], dev->quend[c]); tmport = dev->ioport[c]; for (j = 0; j < 0x18; j++) { printk(" r%2x=%2x", j, inb(tmport++)); } tmport += 0x04; printk(" r1c=%2x", inb(tmport)); tmport += 0x03; printk(" r1f=%2x in_snd=%2x ", inb(tmport), dev->in_snd[c]); tmport= dev->pciport[c]; printk(" d00=%2x", inb(tmport)); tmport += 0x02; printk(" d02=%2x", inb(tmport)); for(j=0;j<16;j++) { if (dev->id[c][j].curr_req != NULL) { workrequ = dev->id[c][j].curr_req; printk("\n que cdb= "); for (k=0; k < workrequ->cmd_len; k++) { printk(" %2x ",workrequ->cmnd[k]); } printk(" last_lenu= %x ",(unsigned int)dev->id[c][j].last_len); } } return SUCCESS; } static const char *atp870u_info(struct Scsi_Host *notused) { static char buffer[128]; strcpy(buffer, "ACARD AEC-6710/6712/67160 PCI Ultra/W/LVD SCSI-3 Adapter Driver V2.6+ac "); return buffer; } #define BLS buffer + len + size static int atp870u_proc_info(struct Scsi_Host *HBAptr, char *buffer, char **start, off_t offset, int length, int inout) { static u8 buff[512]; int size = 0; int len = 0; off_t begin = 0; off_t pos = 0; if (inout) return -EINVAL; if (offset == 0) memset(buff, 0, sizeof(buff)); size += sprintf(BLS, "ACARD AEC-671X Driver Version: 2.6+ac\n"); len += size; pos = begin + len; size = 0; size += sprintf(BLS, "\n"); size += sprintf(BLS, "Adapter Configuration:\n"); size += sprintf(BLS, " Base IO: %#.4lx\n", HBAptr->io_port); size += sprintf(BLS, " IRQ: %d\n", HBAptr->irq); len += size; pos = begin + len; *start = buffer + (offset - begin); /* Start of wanted data */ len -= (offset - begin); /* Start slop */ if (len > length) { len = length; /* Ending slop */ } return (len); } static int atp870u_biosparam(struct scsi_device *disk, struct block_device *dev, sector_t capacity, int *ip) { int heads, sectors, cylinders; heads = 64; sectors = 32; cylinders = (unsigned long)capacity / (heads * sectors); if (cylinders > 1024) { heads = 255; sectors = 63; cylinders = (unsigned long)capacity / (heads * sectors); } ip[0] = heads; ip[1] = sectors; ip[2] = cylinders; return 0; } static void atp870u_remove (struct pci_dev *pdev) { struct atp_unit *devext = pci_get_drvdata(pdev); struct Scsi_Host *pshost = devext->host; scsi_remove_host(pshost); printk(KERN_INFO "free_irq : %d\n",pshost->irq); free_irq(pshost->irq, pshost); release_region(pshost->io_port, pshost->n_io_port); printk(KERN_INFO "atp870u_free_tables : %p\n",pshost); atp870u_free_tables(pshost); printk(KERN_INFO "scsi_host_put : %p\n",pshost); scsi_host_put(pshost); printk(KERN_INFO "pci_set_drvdata : %p\n",pdev); pci_set_drvdata(pdev, NULL); } MODULE_LICENSE("GPL"); static struct scsi_host_template atp870u_template = { .module = THIS_MODULE, .name = "atp870u" /* name */, .proc_name = "atp870u", .proc_info = atp870u_proc_info, .info = atp870u_info /* info */, .queuecommand = atp870u_queuecommand /* queuecommand */, .eh_abort_handler = atp870u_abort /* abort */, .bios_param = atp870u_biosparam /* biosparm */, .can_queue = qcnt /* can_queue */, .this_id = 7 /* SCSI ID */, .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/, .cmd_per_lun = ATP870U_CMDLUN /* commands per lun */, .use_clustering = ENABLE_CLUSTERING, .max_sectors = ATP870U_MAX_SECTORS, }; static struct pci_device_id atp870u_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP885_DEVID) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID1) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID2) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7610) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612UW) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612U) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612S) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612D) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612SUW) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_8060) }, { 0, }, }; MODULE_DEVICE_TABLE(pci, atp870u_id_table); static struct pci_driver atp870u_driver = { .id_table = atp870u_id_table, .name = "atp870u", .probe = atp870u_probe, .remove = __devexit_p(atp870u_remove), }; static int __init atp870u_init(void) { #ifdef ED_DBGP printk("atp870u_init: Entry\n"); #endif return pci_register_driver(&atp870u_driver); } static void __exit atp870u_exit(void) { #ifdef ED_DBGP printk("atp870u_exit: Entry\n"); #endif pci_unregister_driver(&atp870u_driver); } static void tscam_885(void) { unsigned char i; for (i = 0; i < 0x2; i++) { mdelay(300); } return; } static void is885(struct atp_unit *dev, unsigned int wkport,unsigned char c) { unsigned int tmport; unsigned char i, j, k, rmb, n, lvdmode; unsigned short int m; static unsigned char mbuf[512]; static unsigned char satn[9] = {0, 0, 0, 0, 0, 0, 0, 6, 6}; static unsigned char inqd[9] = {0x12, 0, 0, 0, 0x24, 0, 0, 0x24, 6}; static unsigned char synn[6] = {0x80, 1, 3, 1, 0x19, 0x0e}; unsigned char synu[6] = {0x80, 1, 3, 1, 0x0a, 0x0e}; static unsigned char synw[6] = {0x80, 1, 3, 1, 0x19, 0x0e}; unsigned char synuw[6] = {0x80, 1, 3, 1, 0x0a, 0x0e}; static unsigned char wide[6] = {0x80, 1, 2, 3, 1, 0}; static unsigned char u3[9] = { 0x80,1,6,4,0x09,00,0x0e,0x01,0x02 }; lvdmode=inb(wkport + 0x1b) >> 7; for (i = 0; i < 16; i++) { m = 1; m = m << i; if ((m & dev->active_id[c]) != 0) { continue; } if (i == dev->host_id[c]) { printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_id[c]); continue; } tmport = wkport + 0x1b; outb(0x01, tmport); tmport = wkport + 0x01; outb(0x08, tmport++); outb(0x7f, tmport++); outb(satn[0], tmport++); outb(satn[1], tmport++); outb(satn[2], tmport++); outb(satn[3], tmport++); outb(satn[4], tmport++); outb(satn[5], tmport++); tmport += 0x06; outb(0, tmport); tmport += 0x02; outb(dev->id[c][i].devsp, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); j = i; if ((j & 0x08) != 0) { j = (j & 0x07) | 0x40; } outb(j, tmport); tmport += 0x03; outb(satn[8], tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) { continue; } while (inb(tmport) != 0x8e) cpu_relax(); dev->active_id[c] |= m; tmport = wkport + 0x10; outb(0x30, tmport); tmport = wkport + 0x14; outb(0x00, tmport); phase_cmd: tmport = wkport + 0x18; outb(0x08, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; j = inb(tmport); if (j != 0x16) { tmport = wkport + 0x10; outb(0x41, tmport); goto phase_cmd; } sel_ok: tmport = wkport + 0x03; outb(inqd[0], tmport++); outb(inqd[1], tmport++); outb(inqd[2], tmport++); outb(inqd[3], tmport++); outb(inqd[4], tmport++); outb(inqd[5], tmport); tmport += 0x07; outb(0, tmport); tmport += 0x02; outb(dev->id[c][i].devsp, tmport++); outb(0, tmport++); outb(inqd[6], tmport++); outb(inqd[7], tmport++); tmport += 0x03; outb(inqd[8], tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) { continue; } while (inb(tmport) != 0x8e) cpu_relax(); tmport = wkport + 0x1b; outb(0x00, tmport); tmport = wkport + 0x18; outb(0x08, tmport); tmport += 0x07; j = 0; rd_inq_data: k = inb(tmport); if ((k & 0x01) != 0) { tmport -= 0x06; mbuf[j++] = inb(tmport); tmport += 0x06; goto rd_inq_data; } if ((k & 0x80) == 0) { goto rd_inq_data; } tmport -= 0x08; j = inb(tmport); if (j == 0x16) { goto inq_ok; } tmport = wkport + 0x10; outb(0x46, tmport); tmport += 0x02; outb(0, tmport++); outb(0, tmport++); outb(0, tmport++); tmport += 0x03; outb(0x08, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if (inb(tmport) != 0x16) { goto sel_ok; } inq_ok: mbuf[36] = 0; printk( KERN_INFO" ID: %2d %s\n", i, &mbuf[8]); dev->id[c][i].devtype = mbuf[0]; rmb = mbuf[1]; n = mbuf[7]; if ((mbuf[7] & 0x60) == 0) { goto not_wide; } if ((i < 8) && ((dev->global_map[c] & 0x20) == 0)) { goto not_wide; } if (lvdmode == 0) { goto chg_wide; } if (dev->sp[c][i] != 0x04) { // force u2 goto chg_wide; } tmport = wkport + 0x1b; outb(0x01, tmport); tmport = wkport + 0x03; outb(satn[0], tmport++); outb(satn[1], tmport++); outb(satn[2], tmport++); outb(satn[3], tmport++); outb(satn[4], tmport++); outb(satn[5], tmport++); tmport += 0x06; outb(0, tmport); tmport += 0x02; outb(dev->id[c][i].devsp, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); tmport += 0x03; outb(satn[8], tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) { continue; } while (inb(tmport) != 0x8e) cpu_relax(); try_u3: j = 0; tmport = wkport + 0x14; outb(0x09, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) { if ((inb(tmport) & 0x01) != 0) { tmport -= 0x06; outb(u3[j++], tmport); tmport += 0x06; } cpu_relax(); } tmport -= 0x08; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); j = inb(tmport) & 0x0f; if (j == 0x0f) { goto u3p_in; } if (j == 0x0a) { goto u3p_cmd; } if (j == 0x0e) { goto try_u3; } continue; u3p_out: tmport = wkport + 0x18; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) { if ((inb(tmport) & 0x01) != 0) { tmport -= 0x06; outb(0, tmport); tmport += 0x06; } cpu_relax(); } tmport -= 0x08; j = inb(tmport) & 0x0f; if (j == 0x0f) { goto u3p_in; } if (j == 0x0a) { goto u3p_cmd; } if (j == 0x0e) { goto u3p_out; } continue; u3p_in: tmport = wkport + 0x14; outb(0x09, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; k = 0; u3p_in1: j = inb(tmport); if ((j & 0x01) != 0) { tmport -= 0x06; mbuf[k++] = inb(tmport); tmport += 0x06; goto u3p_in1; } if ((j & 0x80) == 0x00) { goto u3p_in1; } tmport -= 0x08; j = inb(tmport) & 0x0f; if (j == 0x0f) { goto u3p_in; } if (j == 0x0a) { goto u3p_cmd; } if (j == 0x0e) { goto u3p_out; } continue; u3p_cmd: tmport = wkport + 0x10; outb(0x30, tmport); tmport = wkport + 0x14; outb(0x00, tmport); tmport += 0x04; outb(0x08, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00); tmport -= 0x08; j = inb(tmport); if (j != 0x16) { if (j == 0x4e) { goto u3p_out; } continue; } if (mbuf[0] != 0x01) { goto chg_wide; } if (mbuf[1] != 0x06) { goto chg_wide; } if (mbuf[2] != 0x04) { goto chg_wide; } if (mbuf[3] == 0x09) { m = 1; m = m << i; dev->wide_id[c] |= m; dev->id[c][i].devsp = 0xce; #ifdef ED_DBGP printk("dev->id[%2d][%2d].devsp = %2x\n",c,i,dev->id[c][i].devsp); #endif continue; } chg_wide: tmport = wkport + 0x1b; outb(0x01, tmport); tmport = wkport + 0x03; outb(satn[0], tmport++); outb(satn[1], tmport++); outb(satn[2], tmport++); outb(satn[3], tmport++); outb(satn[4], tmport++); outb(satn[5], tmport++); tmport += 0x06; outb(0, tmport); tmport += 0x02; outb(dev->id[c][i].devsp, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); tmport += 0x03; outb(satn[8], tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) { continue; } while (inb(tmport) != 0x8e) cpu_relax(); try_wide: j = 0; tmport = wkport + 0x14; outb(0x05, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) { if ((inb(tmport) & 0x01) != 0) { tmport -= 0x06; outb(wide[j++], tmport); tmport += 0x06; } cpu_relax(); } tmport -= 0x08; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); j = inb(tmport) & 0x0f; if (j == 0x0f) { goto widep_in; } if (j == 0x0a) { goto widep_cmd; } if (j == 0x0e) { goto try_wide; } continue; widep_out: tmport = wkport + 0x18; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) { if ((inb(tmport) & 0x01) != 0) { tmport -= 0x06; outb(0, tmport); tmport += 0x06; } cpu_relax(); } tmport -= 0x08; j = inb(tmport) & 0x0f; if (j == 0x0f) { goto widep_in; } if (j == 0x0a) { goto widep_cmd; } if (j == 0x0e) { goto widep_out; } continue; widep_in: tmport = wkport + 0x14; outb(0xff, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; k = 0; widep_in1: j = inb(tmport); if ((j & 0x01) != 0) { tmport -= 0x06; mbuf[k++] = inb(tmport); tmport += 0x06; goto widep_in1; } if ((j & 0x80) == 0x00) { goto widep_in1; } tmport -= 0x08; j = inb(tmport) & 0x0f; if (j == 0x0f) { goto widep_in; } if (j == 0x0a) { goto widep_cmd; } if (j == 0x0e) { goto widep_out; } continue; widep_cmd: tmport = wkport + 0x10; outb(0x30, tmport); tmport = wkport + 0x14; outb(0x00, tmport); tmport += 0x04; outb(0x08, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; j = inb(tmport); if (j != 0x16) { if (j == 0x4e) { goto widep_out; } continue; } if (mbuf[0] != 0x01) { goto not_wide; } if (mbuf[1] != 0x02) { goto not_wide; } if (mbuf[2] != 0x03) { goto not_wide; } if (mbuf[3] != 0x01) { goto not_wide; } m = 1; m = m << i; dev->wide_id[c] |= m; not_wide: if ((dev->id[c][i].devtype == 0x00) || (dev->id[c][i].devtype == 0x07) || ((dev->id[c][i].devtype == 0x05) && ((n & 0x10) != 0))) { m = 1; m = m << i; if ((dev->async[c] & m) != 0) { goto set_sync; } } continue; set_sync: if (dev->sp[c][i] == 0x02) { synu[4]=0x0c; synuw[4]=0x0c; } else { if (dev->sp[c][i] >= 0x03) { synu[4]=0x0a; synuw[4]=0x0a; } } tmport = wkport + 0x1b; j = 0; if ((m & dev->wide_id[c]) != 0) { j |= 0x01; } outb(j, tmport); tmport = wkport + 0x03; outb(satn[0], tmport++); outb(satn[1], tmport++); outb(satn[2], tmport++); outb(satn[3], tmport++); outb(satn[4], tmport++); outb(satn[5], tmport++); tmport += 0x06; outb(0, tmport); tmport += 0x02; outb(dev->id[c][i].devsp, tmport++); outb(0, tmport++); outb(satn[6], tmport++); outb(satn[7], tmport++); tmport += 0x03; outb(satn[8], tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) { continue; } while (inb(tmport) != 0x8e) cpu_relax(); try_sync: j = 0; tmport = wkport + 0x14; outb(0x06, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0) { if ((inb(tmport) & 0x01) != 0) { tmport -= 0x06; if ((m & dev->wide_id[c]) != 0) { if ((m & dev->ultra_map[c]) != 0) { outb(synuw[j++], tmport); } else { outb(synw[j++], tmport); } } else { if ((m & dev->ultra_map[c]) != 0) { outb(synu[j++], tmport); } else { outb(synn[j++], tmport); } } tmport += 0x06; } } tmport -= 0x08; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); j = inb(tmport) & 0x0f; if (j == 0x0f) { goto phase_ins; } if (j == 0x0a) { goto phase_cmds; } if (j == 0x0e) { goto try_sync; } continue; phase_outs: tmport = wkport + 0x18; outb(0x20, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) { if ((inb(tmport) & 0x01) != 0x00) { tmport -= 0x06; outb(0x00, tmport); tmport += 0x06; } cpu_relax(); } tmport -= 0x08; j = inb(tmport); if (j == 0x85) { goto tar_dcons; } j &= 0x0f; if (j == 0x0f) { goto phase_ins; } if (j == 0x0a) { goto phase_cmds; } if (j == 0x0e) { goto phase_outs; } continue; phase_ins: tmport = wkport + 0x14; outb(0x06, tmport); tmport += 0x04; outb(0x20, tmport); tmport += 0x07; k = 0; phase_ins1: j = inb(tmport); if ((j & 0x01) != 0x00) { tmport -= 0x06; mbuf[k++] = inb(tmport); tmport += 0x06; goto phase_ins1; } if ((j & 0x80) == 0x00) { goto phase_ins1; } tmport -= 0x08; while ((inb(tmport) & 0x80) == 0x00); j = inb(tmport); if (j == 0x85) { goto tar_dcons; } j &= 0x0f; if (j == 0x0f) { goto phase_ins; } if (j == 0x0a) { goto phase_cmds; } if (j == 0x0e) { goto phase_outs; } continue; phase_cmds: tmport = wkport + 0x10; outb(0x30, tmport); tar_dcons: tmport = wkport + 0x14; outb(0x00, tmport); tmport += 0x04; outb(0x08, tmport); tmport += 0x07; while ((inb(tmport) & 0x80) == 0x00) cpu_relax(); tmport -= 0x08; j = inb(tmport); if (j != 0x16) { continue; } if (mbuf[0] != 0x01) { continue; } if (mbuf[1] != 0x03) { continue; } if (mbuf[4] == 0x00) { continue; } if (mbuf[3] > 0x64) { continue; } if (mbuf[4] > 0x0e) { mbuf[4] = 0x0e; } dev->id[c][i].devsp = mbuf[4]; if (mbuf[3] < 0x0c){ j = 0xb0; goto set_syn_ok; } if ((mbuf[3] < 0x0d) && (rmb == 0)) { j = 0xa0; goto set_syn_ok; } if (mbuf[3] < 0x1a) { j = 0x20; goto set_syn_ok; } if (mbuf[3] < 0x33) { j = 0x40; goto set_syn_ok; } if (mbuf[3] < 0x4c) { j = 0x50; goto set_syn_ok; } j = 0x60; set_syn_ok: dev->id[c][i].devsp = (dev->id[c][i].devsp & 0x0f) | j; #ifdef ED_DBGP printk("dev->id[%2d][%2d].devsp = %2x\n",c,i,dev->id[c][i].devsp); #endif } tmport = wkport + 0x16; outb(0x80, tmport); } module_init(atp870u_init); module_exit(atp870u_exit);
GustavoRD78/78Kernel-5.1.1-Xperia-Z1-14.6.A.0.368
drivers/scsi/atp870u.c
C
gpl-2.0
86,479
/* * Zoran ZR36060 basic configuration functions * * Copyright (C) 2002 Laurent Pinchart <laurent.pinchart@skynet.be> * * $Id: zr36060.c,v 1.1.2.22 2003/05/06 09:35:36 rbultje Exp $ * * ------------------------------------------------------------------------ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * ------------------------------------------------------------------------ */ #define ZR060_VERSION "v0.7" #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/wait.h> /* I/O commands, error codes */ #include <asm/io.h> /* headerfile of this module */ #include "zr36060.h" /* codec io API */ #include "videocodec.h" /* it doesn't make sense to have more than 20 or so, just to prevent some unwanted loops */ #define MAX_CODECS 20 /* amount of chips attached via this driver */ static int zr36060_codecs; static int low_bitrate; module_param(low_bitrate, bool, 0); MODULE_PARM_DESC(low_bitrate, "Buz compatibility option, halves bitrate"); /* debugging is available via module parameter */ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-4)"); #define dprintk(num, format, args...) \ do { \ if (debug >= num) \ printk(format, ##args); \ } while (0) /* ========================================================================= Local hardware I/O functions: read/write via codec layer (registers are located in the master device) ========================================================================= */ /* read and write functions */ static u8 zr36060_read (struct zr36060 *ptr, u16 reg) { u8 value = 0; // just in case something is wrong... if (ptr->codec->master_data->readreg) value = (ptr->codec->master_data->readreg(ptr->codec, reg)) & 0xff; else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing read!\n", ptr->name); //dprintk(4, "%s: reading from 0x%04x: %02x\n",ptr->name,reg,value); return value; } static void zr36060_write(struct zr36060 *ptr, u16 reg, u8 value) { //dprintk(4, "%s: writing 0x%02x to 0x%04x\n",ptr->name,value,reg); dprintk(4, "0x%02x @0x%04x\n", value, reg); // just in case something is wrong... if (ptr->codec->master_data->writereg) ptr->codec->master_data->writereg(ptr->codec, reg, value); else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing written!\n", ptr->name); } /* ========================================================================= Local helper function: status read ========================================================================= */ /* status is kept in datastructure */ static u8 zr36060_read_status (struct zr36060 *ptr) { ptr->status = zr36060_read(ptr, ZR060_CFSR); zr36060_read(ptr, 0); return ptr->status; } /* ========================================================================= Local helper function: scale factor read ========================================================================= */ /* scale factor is kept in datastructure */ static u16 zr36060_read_scalefactor (struct zr36060 *ptr) { ptr->scalefact = (zr36060_read(ptr, ZR060_SF_HI) << 8) | (zr36060_read(ptr, ZR060_SF_LO) & 0xFF); /* leave 0 selected for an eventually GO from master */ zr36060_read(ptr, 0); return ptr->scalefact; } /* ========================================================================= Local helper function: wait if codec is ready to proceed (end of processing) or time is over ========================================================================= */ static void zr36060_wait_end (struct zr36060 *ptr) { int i = 0; while (zr36060_read_status(ptr) & ZR060_CFSR_Busy) { udelay(1); if (i++ > 200000) { // 200ms, there is for sure something wrong!!! dprintk(1, "%s: timeout at wait_end (last status: 0x%02x)\n", ptr->name, ptr->status); break; } } } /* ========================================================================= Local helper function: basic test of "connectivity", writes/reads to/from memory the SOF marker ========================================================================= */ static int zr36060_basic_test (struct zr36060 *ptr) { if ((zr36060_read(ptr, ZR060_IDR_DEV) != 0x33) && (zr36060_read(ptr, ZR060_IDR_REV) != 0x01)) { dprintk(1, KERN_ERR "%s: attach failed, can't connect to jpeg processor!\n", ptr->name); return -ENXIO; } zr36060_wait_end(ptr); if (ptr->status & ZR060_CFSR_Busy) { dprintk(1, KERN_ERR "%s: attach failed, jpeg processor failed (end flag)!\n", ptr->name); return -EBUSY; } return 0; /* looks good! */ } /* ========================================================================= Local helper function: simple loop for pushing the init datasets ========================================================================= */ static int zr36060_pushit (struct zr36060 *ptr, u16 startreg, u16 len, const char *data) { int i = 0; dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, startreg, len); while (i < len) { zr36060_write(ptr, startreg++, data[i++]); } return i; } /* ========================================================================= Basic datasets: jpeg baseline setup data (you find it on lots places in internet, or just extract it from any regular .jpg image...) Could be variable, but until it's not needed it they are just fixed to save memory. Otherwise expand zr36060 structure with arrays, push the values to it and initialize from there, as e.g. the linux zr36057/60 driver does it. ========================================================================= */ static const char zr36060_dqt[0x86] = { 0xff, 0xdb, //Marker: DQT 0x00, 0x84, //Length: 2*65+2 0x00, //Pq,Tq first table 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e, 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28, 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25, 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33, 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44, 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57, 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71, 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63, 0x01, //Pq,Tq second table 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a, 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63 }; static const char zr36060_dht[0x1a4] = { 0xff, 0xc4, //Marker: DHT 0x01, 0xa2, //Length: 2*AC, 2*DC 0x00, //DC first table 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x01, //DC second table 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x10, //AC first table 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0x11, //AC second table 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA }; /* jpeg baseline setup, this is just fixed in this driver (YUV pictures) */ #define NO_OF_COMPONENTS 0x3 //Y,U,V #define BASELINE_PRECISION 0x8 //MCU size (?) static const char zr36060_tq[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's QT static const char zr36060_td[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's DC static const char zr36060_ta[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's AC /* horizontal 422 decimation setup (maybe we support 411 or so later, too) */ static const char zr36060_decimation_h[8] = { 2, 1, 1, 0, 0, 0, 0, 0 }; static const char zr36060_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 }; /* ========================================================================= Local helper functions: calculation and setup of parameter-dependent JPEG baseline segments (needed for compression only) ========================================================================= */ /* ------------------------------------------------------------------------- */ /* SOF (start of frame) segment depends on width, height and sampling ratio of each color component */ static int zr36060_set_sof (struct zr36060 *ptr) { char sof_data[34]; // max. size of register set int i; dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name, ptr->width, ptr->height, NO_OF_COMPONENTS); sof_data[0] = 0xff; sof_data[1] = 0xc0; sof_data[2] = 0x00; sof_data[3] = (3 * NO_OF_COMPONENTS) + 8; sof_data[4] = BASELINE_PRECISION; // only '8' possible with zr36060 sof_data[5] = (ptr->height) >> 8; sof_data[6] = (ptr->height) & 0xff; sof_data[7] = (ptr->width) >> 8; sof_data[8] = (ptr->width) & 0xff; sof_data[9] = NO_OF_COMPONENTS; for (i = 0; i < NO_OF_COMPONENTS; i++) { sof_data[10 + (i * 3)] = i; // index identifier sof_data[11 + (i * 3)] = (ptr->h_samp_ratio[i] << 4) | (ptr->v_samp_ratio[i]); // sampling ratios sof_data[12 + (i * 3)] = zr36060_tq[i]; // Q table selection } return zr36060_pushit(ptr, ZR060_SOF_IDX, (3 * NO_OF_COMPONENTS) + 10, sof_data); } /* ------------------------------------------------------------------------- */ /* SOS (start of scan) segment depends on the used scan components of each color component */ static int zr36060_set_sos (struct zr36060 *ptr) { char sos_data[16]; // max. size of register set int i; dprintk(3, "%s: write SOS\n", ptr->name); sos_data[0] = 0xff; sos_data[1] = 0xda; sos_data[2] = 0x00; sos_data[3] = 2 + 1 + (2 * NO_OF_COMPONENTS) + 3; sos_data[4] = NO_OF_COMPONENTS; for (i = 0; i < NO_OF_COMPONENTS; i++) { sos_data[5 + (i * 2)] = i; // index sos_data[6 + (i * 2)] = (zr36060_td[i] << 4) | zr36060_ta[i]; // AC/DC tbl.sel. } sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 2] = 00; // scan start sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 3] = 0x3f; sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 4] = 00; return zr36060_pushit(ptr, ZR060_SOS_IDX, 4 + 1 + (2 * NO_OF_COMPONENTS) + 3, sos_data); } /* ------------------------------------------------------------------------- */ /* DRI (define restart interval) */ static int zr36060_set_dri (struct zr36060 *ptr) { char dri_data[6]; // max. size of register set dprintk(3, "%s: write DRI\n", ptr->name); dri_data[0] = 0xff; dri_data[1] = 0xdd; dri_data[2] = 0x00; dri_data[3] = 0x04; dri_data[4] = (ptr->dri) >> 8; dri_data[5] = (ptr->dri) & 0xff; return zr36060_pushit(ptr, ZR060_DRI_IDX, 6, dri_data); } /* ========================================================================= Setup function: Setup compression/decompression of Zoran's JPEG processor ( see also zoran 36060 manual ) ... sorry for the spaghetti code ... ========================================================================= */ static void zr36060_init (struct zr36060 *ptr) { int sum = 0; long bitcnt, tmp; if (ptr->mode == CODEC_DO_COMPRESSION) { dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name); zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst); /* 060 communicates with 067 in master mode */ zr36060_write(ptr, ZR060_CIR, ZR060_CIR_CodeMstr); /* Compression with or without variable scale factor */ /*FIXME: What about ptr->bitrate_ctrl? */ zr36060_write(ptr, ZR060_CMR, ZR060_CMR_Comp | ZR060_CMR_Pass2 | ZR060_CMR_BRB); /* Must be zero */ zr36060_write(ptr, ZR060_MBZ, 0x00); zr36060_write(ptr, ZR060_TCR_HI, 0x00); zr36060_write(ptr, ZR060_TCR_LO, 0x00); /* Disable all IRQs - no DataErr means autoreset */ zr36060_write(ptr, ZR060_IMR, 0); /* volume control settings */ zr36060_write(ptr, ZR060_SF_HI, ptr->scalefact >> 8); zr36060_write(ptr, ZR060_SF_LO, ptr->scalefact & 0xff); zr36060_write(ptr, ZR060_AF_HI, 0xff); zr36060_write(ptr, ZR060_AF_M, 0xff); zr36060_write(ptr, ZR060_AF_LO, 0xff); /* setup the variable jpeg tables */ sum += zr36060_set_sof(ptr); sum += zr36060_set_sos(ptr); sum += zr36060_set_dri(ptr); /* setup the fixed jpeg tables - maybe variable, though - * (see table init section above) */ sum += zr36060_pushit(ptr, ZR060_DQT_IDX, sizeof(zr36060_dqt), zr36060_dqt); sum += zr36060_pushit(ptr, ZR060_DHT_IDX, sizeof(zr36060_dht), zr36060_dht); zr36060_write(ptr, ZR060_APP_IDX, 0xff); zr36060_write(ptr, ZR060_APP_IDX + 1, 0xe0 + ptr->app.appn); zr36060_write(ptr, ZR060_APP_IDX + 2, 0x00); zr36060_write(ptr, ZR060_APP_IDX + 3, ptr->app.len + 2); sum += zr36060_pushit(ptr, ZR060_APP_IDX + 4, 60, ptr->app.data) + 4; zr36060_write(ptr, ZR060_COM_IDX, 0xff); zr36060_write(ptr, ZR060_COM_IDX + 1, 0xfe); zr36060_write(ptr, ZR060_COM_IDX + 2, 0x00); zr36060_write(ptr, ZR060_COM_IDX + 3, ptr->com.len + 2); sum += zr36060_pushit(ptr, ZR060_COM_IDX + 4, 60, ptr->com.data) + 4; /* setup misc. data for compression (target code sizes) */ /* size of compressed code to reach without header data */ sum = ptr->real_code_vol - sum; bitcnt = sum << 3; /* need the size in bits */ tmp = bitcnt >> 16; dprintk(3, "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n", ptr->name, sum, ptr->real_code_vol, bitcnt, tmp); zr36060_write(ptr, ZR060_TCV_NET_HI, tmp >> 8); zr36060_write(ptr, ZR060_TCV_NET_MH, tmp & 0xff); tmp = bitcnt & 0xffff; zr36060_write(ptr, ZR060_TCV_NET_ML, tmp >> 8); zr36060_write(ptr, ZR060_TCV_NET_LO, tmp & 0xff); bitcnt -= bitcnt >> 7; // bits without stuffing bitcnt -= ((bitcnt * 5) >> 6); // bits without eob tmp = bitcnt >> 16; dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n", ptr->name, bitcnt, tmp); zr36060_write(ptr, ZR060_TCV_DATA_HI, tmp >> 8); zr36060_write(ptr, ZR060_TCV_DATA_MH, tmp & 0xff); tmp = bitcnt & 0xffff; zr36060_write(ptr, ZR060_TCV_DATA_ML, tmp >> 8); zr36060_write(ptr, ZR060_TCV_DATA_LO, tmp & 0xff); /* JPEG markers to be included in the compressed stream */ zr36060_write(ptr, ZR060_MER, ZR060_MER_DQT | ZR060_MER_DHT | ((ptr->com.len > 0) ? ZR060_MER_Com : 0) | ((ptr->app.len > 0) ? ZR060_MER_App : 0)); /* Setup the Video Frontend */ /* Limit pixel range to 16..235 as per CCIR-601 */ zr36060_write(ptr, ZR060_VCR, ZR060_VCR_Range); } else { dprintk(2, "%s: EXPANSION SETUP\n", ptr->name); zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst); /* 060 communicates with 067 in master mode */ zr36060_write(ptr, ZR060_CIR, ZR060_CIR_CodeMstr); /* Decompression */ zr36060_write(ptr, ZR060_CMR, 0); /* Must be zero */ zr36060_write(ptr, ZR060_MBZ, 0x00); zr36060_write(ptr, ZR060_TCR_HI, 0x00); zr36060_write(ptr, ZR060_TCR_LO, 0x00); /* Disable all IRQs - no DataErr means autoreset */ zr36060_write(ptr, ZR060_IMR, 0); /* setup misc. data for expansion */ zr36060_write(ptr, ZR060_MER, 0); /* setup the fixed jpeg tables - maybe variable, though - * (see table init section above) */ zr36060_pushit(ptr, ZR060_DHT_IDX, sizeof(zr36060_dht), zr36060_dht); /* Setup the Video Frontend */ //zr36060_write(ptr, ZR060_VCR, ZR060_VCR_FIExt); //this doesn't seem right and doesn't work... zr36060_write(ptr, ZR060_VCR, ZR060_VCR_Range); } /* Load the tables */ zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst | ZR060_LOAD_Load); zr36060_wait_end(ptr); dprintk(2, "%s: Status after table preload: 0x%02x\n", ptr->name, ptr->status); if (ptr->status & ZR060_CFSR_Busy) { dprintk(1, KERN_ERR "%s: init aborted!\n", ptr->name); return; // something is wrong, its timed out!!!! } } /* ========================================================================= CODEC API FUNCTIONS this functions are accessed by the master via the API structure ========================================================================= */ /* set compression/expansion mode and launches codec - this should be the last call from the master before starting processing */ static int zr36060_set_mode (struct videocodec *codec, int mode) { struct zr36060 *ptr = (struct zr36060 *) codec->data; dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) return -EINVAL; ptr->mode = mode; zr36060_init(ptr); return 0; } /* set picture size (norm is ignored as the codec doesn't know about it) */ static int zr36060_set_video (struct videocodec *codec, struct tvnorm *norm, struct vfe_settings *cap, struct vfe_polarity *pol) { struct zr36060 *ptr = (struct zr36060 *) codec->data; u32 reg; int size; dprintk(2, "%s: set_video %d/%d-%dx%d (%%%d) call\n", ptr->name, cap->x, cap->y, cap->width, cap->height, cap->decimation); /* if () return -EINVAL; * trust the master driver that it knows what it does - so * we allow invalid startx/y and norm for now ... */ ptr->width = cap->width / (cap->decimation & 0xff); ptr->height = cap->height / (cap->decimation >> 8); zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst); /* Note that VSPol/HSPol bits in zr36060 have the opposite * meaning of their zr360x7 counterparts with the same names * N.b. for VSPol this is only true if FIVEdge = 0 (default, * left unchanged here - in accordance with datasheet). */ reg = (!pol->vsync_pol ? ZR060_VPR_VSPol : 0) | (!pol->hsync_pol ? ZR060_VPR_HSPol : 0) | (pol->field_pol ? ZR060_VPR_FIPol : 0) | (pol->blank_pol ? ZR060_VPR_BLPol : 0) | (pol->subimg_pol ? ZR060_VPR_SImgPol : 0) | (pol->poe_pol ? ZR060_VPR_PoePol : 0) | (pol->pvalid_pol ? ZR060_VPR_PValPol : 0) | (pol->vclk_pol ? ZR060_VPR_VCLKPol : 0); zr36060_write(ptr, ZR060_VPR, reg); reg = 0; switch (cap->decimation & 0xff) { default: case 1: break; case 2: reg |= ZR060_SR_HScale2; break; case 4: reg |= ZR060_SR_HScale4; break; } switch (cap->decimation >> 8) { default: case 1: break; case 2: reg |= ZR060_SR_VScale; break; } zr36060_write(ptr, ZR060_SR, reg); zr36060_write(ptr, ZR060_BCR_Y, 0x00); zr36060_write(ptr, ZR060_BCR_U, 0x80); zr36060_write(ptr, ZR060_BCR_V, 0x80); /* sync generator */ reg = norm->Ht - 1; /* Vtotal */ zr36060_write(ptr, ZR060_SGR_VTOTAL_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SGR_VTOTAL_LO, (reg >> 0) & 0xff); reg = norm->Wt - 1; /* Htotal */ zr36060_write(ptr, ZR060_SGR_HTOTAL_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SGR_HTOTAL_LO, (reg >> 0) & 0xff); reg = 6 - 1; /* VsyncSize */ zr36060_write(ptr, ZR060_SGR_VSYNC, reg); //reg = 30 - 1; /* HsyncSize */ ///*CP*/ reg = (zr->params.norm == 1 ? 57 : 68); reg = 68; zr36060_write(ptr, ZR060_SGR_HSYNC, reg); reg = norm->VStart - 1; /* BVstart */ zr36060_write(ptr, ZR060_SGR_BVSTART, reg); reg += norm->Ha / 2; /* BVend */ zr36060_write(ptr, ZR060_SGR_BVEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SGR_BVEND_LO, (reg >> 0) & 0xff); reg = norm->HStart - 1; /* BHstart */ zr36060_write(ptr, ZR060_SGR_BHSTART, reg); reg += norm->Wa; /* BHend */ zr36060_write(ptr, ZR060_SGR_BHEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SGR_BHEND_LO, (reg >> 0) & 0xff); /* active area */ reg = cap->y + norm->VStart; /* Vstart */ zr36060_write(ptr, ZR060_AAR_VSTART_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_AAR_VSTART_LO, (reg >> 0) & 0xff); reg += cap->height; /* Vend */ zr36060_write(ptr, ZR060_AAR_VEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_AAR_VEND_LO, (reg >> 0) & 0xff); reg = cap->x + norm->HStart; /* Hstart */ zr36060_write(ptr, ZR060_AAR_HSTART_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_AAR_HSTART_LO, (reg >> 0) & 0xff); reg += cap->width; /* Hend */ zr36060_write(ptr, ZR060_AAR_HEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_AAR_HEND_LO, (reg >> 0) & 0xff); /* subimage area */ reg = norm->VStart - 4; /* SVstart */ zr36060_write(ptr, ZR060_SWR_VSTART_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SWR_VSTART_LO, (reg >> 0) & 0xff); reg += norm->Ha / 2 + 8; /* SVend */ zr36060_write(ptr, ZR060_SWR_VEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SWR_VEND_LO, (reg >> 0) & 0xff); reg = norm->HStart /*+ 64 */ - 4; /* SHstart */ zr36060_write(ptr, ZR060_SWR_HSTART_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SWR_HSTART_LO, (reg >> 0) & 0xff); reg += norm->Wa + 8; /* SHend */ zr36060_write(ptr, ZR060_SWR_HEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SWR_HEND_LO, (reg >> 0) & 0xff); size = ptr->width * ptr->height; /* Target compressed field size in bits: */ size = size * 16; /* uncompressed size in bits */ /* (Ronald) by default, quality = 100 is a compression * ratio 1:2. Setting low_bitrate (insmod option) sets * it to 1:4 (instead of 1:2, zr36060 max) as limit because the * buz can't handle more at decimation=1... Use low_bitrate if * you have a Buz, unless you know what you're doing */ size = size * cap->quality / (low_bitrate ? 400 : 200); /* Lower limit (arbitrary, 1 KB) */ if (size < 8192) size = 8192; /* Upper limit: 7/8 of the code buffers */ if (size > ptr->total_code_vol * 7) size = ptr->total_code_vol * 7; ptr->real_code_vol = size >> 3; /* in bytes */ /* the MBCVR is the *maximum* block volume, according to the * JPEG ISO specs, this shouldn't be used, since that allows * for the best encoding quality. So set it to it's max value */ reg = ptr->max_block_vol; zr36060_write(ptr, ZR060_MBCVR, reg); return 0; } /* additional control functions */ static int zr36060_control (struct videocodec *codec, int type, int size, void *data) { struct zr36060 *ptr = (struct zr36060 *) codec->data; int *ival = (int *) data; dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, size); switch (type) { case CODEC_G_STATUS: /* get last status */ if (size != sizeof(int)) return -EFAULT; zr36060_read_status(ptr); *ival = ptr->status; break; case CODEC_G_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; *ival = CODEC_MODE_BJPG; break; case CODEC_S_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; if (*ival != CODEC_MODE_BJPG) return -EINVAL; /* not needed, do nothing */ return 0; case CODEC_G_VFE: case CODEC_S_VFE: /* not needed, do nothing */ return 0; case CODEC_S_MMAP: /* not available, give an error */ return -ENXIO; case CODEC_G_JPEG_TDS_BYTE: /* get target volume in byte */ if (size != sizeof(int)) return -EFAULT; *ival = ptr->total_code_vol; break; case CODEC_S_JPEG_TDS_BYTE: /* get target volume in byte */ if (size != sizeof(int)) return -EFAULT; ptr->total_code_vol = *ival; ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3; break; case CODEC_G_JPEG_SCALE: /* get scaling factor */ if (size != sizeof(int)) return -EFAULT; *ival = zr36060_read_scalefactor(ptr); break; case CODEC_S_JPEG_SCALE: /* set scaling factor */ if (size != sizeof(int)) return -EFAULT; ptr->scalefact = *ival; break; case CODEC_G_JPEG_APP_DATA: { /* get appn marker data */ struct jpeg_app_marker *app = data; if (size != sizeof(struct jpeg_app_marker)) return -EFAULT; *app = ptr->app; break; } case CODEC_S_JPEG_APP_DATA: { /* set appn marker data */ struct jpeg_app_marker *app = data; if (size != sizeof(struct jpeg_app_marker)) return -EFAULT; ptr->app = *app; break; } case CODEC_G_JPEG_COM_DATA: { /* get comment marker data */ struct jpeg_com_marker *com = data; if (size != sizeof(struct jpeg_com_marker)) return -EFAULT; *com = ptr->com; break; } case CODEC_S_JPEG_COM_DATA: { /* set comment marker data */ struct jpeg_com_marker *com = data; if (size != sizeof(struct jpeg_com_marker)) return -EFAULT; ptr->com = *com; break; } default: return -EINVAL; } return size; } /* ========================================================================= Exit and unregister function: Deinitializes Zoran's JPEG processor ========================================================================= */ static int zr36060_unset (struct videocodec *codec) { struct zr36060 *ptr = codec->data; if (ptr) { /* do wee need some codec deinit here, too ???? */ dprintk(1, "%s: finished codec #%d\n", ptr->name, ptr->num); kfree(ptr); codec->data = NULL; zr36060_codecs--; return 0; } return -EFAULT; } /* ========================================================================= Setup and registry function: Initializes Zoran's JPEG processor Also sets pixel size, average code size, mode (compr./decompr.) (the given size is determined by the processor with the video interface) ========================================================================= */ static int zr36060_setup (struct videocodec *codec) { struct zr36060 *ptr; int res; dprintk(2, "zr36060: initializing MJPEG subsystem #%d.\n", zr36060_codecs); if (zr36060_codecs == MAX_CODECS) { dprintk(1, KERN_ERR "zr36060: Can't attach more codecs!\n"); return -ENOSPC; } //mem structure init codec->data = ptr = kzalloc(sizeof(struct zr36060), GFP_KERNEL); if (NULL == ptr) { dprintk(1, KERN_ERR "zr36060: Can't get enough memory!\n"); return -ENOMEM; } snprintf(ptr->name, sizeof(ptr->name), "zr36060[%d]", zr36060_codecs); ptr->num = zr36060_codecs++; ptr->codec = codec; //testing res = zr36060_basic_test(ptr); if (res < 0) { zr36060_unset(codec); return res; } //final setup memcpy(ptr->h_samp_ratio, zr36060_decimation_h, 8); memcpy(ptr->v_samp_ratio, zr36060_decimation_v, 8); ptr->bitrate_ctrl = 0; /* 0 or 1 - fixed file size flag * (what is the difference?) */ ptr->mode = CODEC_DO_COMPRESSION; ptr->width = 384; ptr->height = 288; ptr->total_code_vol = 16000; /* CHECKME */ ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3; ptr->max_block_vol = 240; /* CHECKME, was 120 is 240 */ ptr->scalefact = 0x100; ptr->dri = 1; /* CHECKME, was 8 is 1 */ /* by default, no COM or APP markers - app should set those */ ptr->com.len = 0; ptr->app.appn = 0; ptr->app.len = 0; zr36060_init(ptr); dprintk(1, KERN_INFO "%s: codec attached and running\n", ptr->name); return 0; } static const struct videocodec zr36060_codec = { .owner = THIS_MODULE, .name = "zr36060", .magic = 0L, // magic not used .flags = CODEC_FLAG_JPEG | CODEC_FLAG_HARDWARE | CODEC_FLAG_ENCODER | CODEC_FLAG_DECODER | CODEC_FLAG_VFE, .type = CODEC_TYPE_ZR36060, .setup = zr36060_setup, // functionality .unset = zr36060_unset, .set_mode = zr36060_set_mode, .set_video = zr36060_set_video, .control = zr36060_control, // others are not used }; /* ========================================================================= HOOK IN DRIVER AS KERNEL MODULE ========================================================================= */ static int __init zr36060_init_module (void) { //dprintk(1, "zr36060 driver %s\n",ZR060_VERSION); zr36060_codecs = 0; return videocodec_register(&zr36060_codec); } static void __exit zr36060_cleanup_module (void) { if (zr36060_codecs) { dprintk(1, "zr36060: something's wrong - %d codecs left somehow.\n", zr36060_codecs); } /* however, we can't just stay alive */ videocodec_unregister(&zr36060_codec); } module_init(zr36060_init_module); module_exit(zr36060_cleanup_module); MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@skynet.be>"); MODULE_DESCRIPTION("Driver module for ZR36060 jpeg processors " ZR060_VERSION); MODULE_LICENSE("GPL");
mzhou/lge-kernel-p880-cyanogenmod
drivers/media/video/zoran/zr36060.c
C
gpl-2.0
30,674
/* * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. * * Description: * MPC832xE MDS board specific routines. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/major.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/initrd.h> #include <linux/of_platform.h> #include <linux/of_device.h> #include <asm/system.h> #include <asm/atomic.h> #include <asm/time.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/irq.h> #include <asm/prom.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include <asm/qe.h> #include <asm/qe_ic.h> #include "mpc83xx.h" #undef DEBUG #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif /* ************************************************************************ * * Setup the architecture * */ static void __init mpc832x_sys_setup_arch(void) { struct device_node *np; u8 __iomem *bcsr_regs = NULL; if (ppc_md.progress) ppc_md.progress("mpc832x_sys_setup_arch()", 0); /* Map BCSR area */ np = of_find_node_by_name(NULL, "bcsr"); if (np) { struct resource res; of_address_to_resource(np, 0, &res); bcsr_regs = ioremap(res.start, res.end - res.start +1); of_node_put(np); } #ifdef CONFIG_PCI for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") mpc83xx_add_bridge(np); #endif #ifdef CONFIG_QUICC_ENGINE qe_reset(); if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { par_io_init(np); of_node_put(np); for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;) par_io_of_config(np); } if ((np = of_find_compatible_node(NULL, "network", "ucc_geth")) != NULL){ /* Reset the Ethernet PHYs */ #define BCSR8_FETH_RST 0x50 clrbits8(&bcsr_regs[8], BCSR8_FETH_RST); udelay(1000); setbits8(&bcsr_regs[8], BCSR8_FETH_RST); iounmap(bcsr_regs); of_node_put(np); } #endif /* CONFIG_QUICC_ENGINE */ } static struct of_device_id mpc832x_ids[] = { { .type = "soc", }, { .compatible = "soc", }, { .compatible = "simple-bus", }, { .type = "qe", }, { .compatible = "fsl,qe", }, {}, }; static int __init mpc832x_declare_of_platform_devices(void) { /* Publish the QE devices */ of_platform_bus_probe(NULL, mpc832x_ids, NULL); return 0; } machine_device_initcall(mpc832x_mds, mpc832x_declare_of_platform_devices); static void __init mpc832x_sys_init_IRQ(void) { struct device_node *np; np = of_find_node_by_type(NULL, "ipic"); if (!np) return; ipic_init(np, 0); /* Initialize the default interrupt mapping priorities, * in case the boot rom changed something on us. */ ipic_set_default_priority(); of_node_put(np); #ifdef CONFIG_QUICC_ENGINE np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); if (!np) { np = of_find_node_by_type(NULL, "qeic"); if (!np) return; } qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic); of_node_put(np); #endif /* CONFIG_QUICC_ENGINE */ } /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc832x_sys_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "MPC832xMDS"); } define_machine(mpc832x_mds) { .name = "MPC832x MDS", .probe = mpc832x_sys_probe, .setup_arch = mpc832x_sys_setup_arch, .init_IRQ = mpc832x_sys_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
vaginessa/twrp_kernel
arch/powerpc/platforms/83xx/mpc832x_mds.c
C
gpl-2.0
3,981
/* * max9877.c -- amp driver for max9877 * * Copyright (C) 2009 Samsung Electronics Co.Ltd * Author: Joonyoung Shim <jy0922.shim@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <sound/soc.h> #include <sound/tlv.h> #include "max9877.h" static struct i2c_client *i2c; static u8 max9877_regs[5] = { 0x40, 0x00, 0x00, 0x00, 0x49 }; static void max9877_write_regs(void) { unsigned int i; u8 data[6]; data[0] = MAX9877_INPUT_MODE; for (i = 0; i < ARRAY_SIZE(max9877_regs); i++) data[i + 1] = max9877_regs[i]; if (i2c_master_send(i2c, data, 6) != 6) dev_err(&i2c->dev, "i2c write failed\n"); } static int max9877_get_reg(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; unsigned int shift = mc->shift; unsigned int mask = mc->max; unsigned int invert = mc->invert; ucontrol->value.integer.value[0] = (max9877_regs[reg] >> shift) & mask; if (invert) ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; return 0; } static int max9877_set_reg(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; unsigned int shift = mc->shift; unsigned int mask = mc->max; unsigned int invert = mc->invert; unsigned int val = (ucontrol->value.integer.value[0] & mask); if (invert) val = mask - val; if (((max9877_regs[reg] >> shift) & mask) == val) return 0; max9877_regs[reg] &= ~(mask << shift); max9877_regs[reg] |= val << shift; max9877_write_regs(); return 1; } static int max9877_get_2reg(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; unsigned int reg2 = mc->rreg; unsigned int shift = mc->shift; unsigned int mask = mc->max; ucontrol->value.integer.value[0] = (max9877_regs[reg] >> shift) & mask; ucontrol->value.integer.value[1] = (max9877_regs[reg2] >> shift) & mask; return 0; } static int max9877_set_2reg(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; unsigned int reg2 = mc->rreg; unsigned int shift = mc->shift; unsigned int mask = mc->max; unsigned int val = (ucontrol->value.integer.value[0] & mask); unsigned int val2 = (ucontrol->value.integer.value[1] & mask); unsigned int change = 1; if (((max9877_regs[reg] >> shift) & mask) == val) change = 0; if (((max9877_regs[reg2] >> shift) & mask) == val2) change = 0; if (change) { max9877_regs[reg] &= ~(mask << shift); max9877_regs[reg] |= val << shift; max9877_regs[reg2] &= ~(mask << shift); max9877_regs[reg2] |= val2 << shift; max9877_write_regs(); } return change; } static int max9877_get_out_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { u8 value = max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OUTMODE_MASK; if (value) value -= 1; ucontrol->value.integer.value[0] = value; return 0; } static int max9877_set_out_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { u8 value = ucontrol->value.integer.value[0]; value += 1; if ((max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OUTMODE_MASK) == value) return 0; max9877_regs[MAX9877_OUTPUT_MODE] &= ~MAX9877_OUTMODE_MASK; max9877_regs[MAX9877_OUTPUT_MODE] |= value; max9877_write_regs(); return 1; } static int max9877_get_osc_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { u8 value = (max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OSC_MASK); value = value >> MAX9877_OSC_OFFSET; ucontrol->value.integer.value[0] = value; return 0; } static int max9877_set_osc_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { u8 value = ucontrol->value.integer.value[0]; value = value << MAX9877_OSC_OFFSET; if ((max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OSC_MASK) == value) return 0; max9877_regs[MAX9877_OUTPUT_MODE] &= ~MAX9877_OSC_MASK; max9877_regs[MAX9877_OUTPUT_MODE] |= value; max9877_write_regs(); return 1; } static const unsigned int max9877_pgain_tlv[] = { TLV_DB_RANGE_HEAD(2), 0, 1, TLV_DB_SCALE_ITEM(0, 900, 0), 2, 2, TLV_DB_SCALE_ITEM(2000, 0, 0), }; static const unsigned int max9877_output_tlv[] = { TLV_DB_RANGE_HEAD(4), 0, 7, TLV_DB_SCALE_ITEM(-7900, 400, 1), 8, 15, TLV_DB_SCALE_ITEM(-4700, 300, 0), 16, 23, TLV_DB_SCALE_ITEM(-2300, 200, 0), 24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0), }; static const char *max9877_out_mode[] = { "INA -> SPK", "INA -> HP", "INA -> SPK and HP", "INB -> SPK", "INB -> HP", "INB -> SPK and HP", "INA + INB -> SPK", "INA + INB -> HP", "INA + INB -> SPK and HP", }; static const char *max9877_osc_mode[] = { "1176KHz", "1100KHz", "700KHz", }; static const struct soc_enum max9877_enum[] = { SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(max9877_out_mode), max9877_out_mode), SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(max9877_osc_mode), max9877_osc_mode), }; static const struct snd_kcontrol_new max9877_controls[] = { SOC_SINGLE_EXT_TLV("MAX9877 PGAINA Playback Volume", MAX9877_INPUT_MODE, 0, 2, 0, max9877_get_reg, max9877_set_reg, max9877_pgain_tlv), SOC_SINGLE_EXT_TLV("MAX9877 PGAINB Playback Volume", MAX9877_INPUT_MODE, 2, 2, 0, max9877_get_reg, max9877_set_reg, max9877_pgain_tlv), SOC_SINGLE_EXT_TLV("MAX9877 Amp Speaker Playback Volume", MAX9877_SPK_VOLUME, 0, 31, 0, max9877_get_reg, max9877_set_reg, max9877_output_tlv), SOC_DOUBLE_R_EXT_TLV("MAX9877 Amp HP Playback Volume", MAX9877_HPL_VOLUME, MAX9877_HPR_VOLUME, 0, 31, 0, max9877_get_2reg, max9877_set_2reg, max9877_output_tlv), SOC_SINGLE_EXT("MAX9877 INB Stereo Switch", MAX9877_INPUT_MODE, 4, 1, 1, max9877_get_reg, max9877_set_reg), SOC_SINGLE_EXT("MAX9877 INA Stereo Switch", MAX9877_INPUT_MODE, 5, 1, 1, max9877_get_reg, max9877_set_reg), SOC_SINGLE_EXT("MAX9877 Zero-crossing detection Switch", MAX9877_INPUT_MODE, 6, 1, 0, max9877_get_reg, max9877_set_reg), SOC_SINGLE_EXT("MAX9877 Bypass Mode Switch", MAX9877_OUTPUT_MODE, 6, 1, 0, max9877_get_reg, max9877_set_reg), SOC_SINGLE_EXT("MAX9877 Shutdown Mode Switch", MAX9877_OUTPUT_MODE, 7, 1, 1, max9877_get_reg, max9877_set_reg), SOC_ENUM_EXT("MAX9877 Output Mode", max9877_enum[0], max9877_get_out_mode, max9877_set_out_mode), SOC_ENUM_EXT("MAX9877 Oscillator Mode", max9877_enum[1], max9877_get_osc_mode, max9877_set_osc_mode), }; /* This function is called from ASoC machine driver */ int max9877_add_controls(struct snd_soc_codec *codec) { return snd_soc_add_controls(codec, max9877_controls, ARRAY_SIZE(max9877_controls)); } EXPORT_SYMBOL_GPL(max9877_add_controls); static int __devinit max9877_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { i2c = client; max9877_write_regs(); return 0; } static __devexit int max9877_i2c_remove(struct i2c_client *client) { i2c = NULL; return 0; } static const struct i2c_device_id max9877_i2c_id[] = { { "max9877", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, max9877_i2c_id); static struct i2c_driver max9877_i2c_driver = { .driver = { .name = "max9877", .owner = THIS_MODULE, }, .probe = max9877_i2c_probe, .remove = __devexit_p(max9877_i2c_remove), .id_table = max9877_i2c_id, }; static int __init max9877_init(void) { return i2c_add_driver(&max9877_i2c_driver); } module_init(max9877_init); static void __exit max9877_exit(void) { i2c_del_driver(&max9877_i2c_driver); } module_exit(max9877_exit); MODULE_DESCRIPTION("ASoC MAX9877 amp driver"); MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); MODULE_LICENSE("GPL");
antonizoon/linux_on_wince_htc
sound/soc/codecs/max9877.c
C
gpl-2.0
8,188
/* * linux/arch/arm/plat-omap/mux.c * * Utility to set the Omap MUX and PULL_DWN registers from a table in mux.h * * Copyright (C) 2003 - 2008 Nokia Corporation * * Written by Tony Lindgren * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/spinlock.h> #include <asm/system.h> #include <plat/cpu.h> #include <plat/mux.h> #ifdef CONFIG_OMAP_MUX static struct omap_mux_cfg *mux_cfg; int __init omap_mux_register(struct omap_mux_cfg *arch_mux_cfg) { if (!arch_mux_cfg || !arch_mux_cfg->pins || arch_mux_cfg->size == 0 || !arch_mux_cfg->cfg_reg) { printk(KERN_ERR "Invalid pin table\n"); return -EINVAL; } mux_cfg = arch_mux_cfg; return 0; } /* * Sets the Omap MUX and PULL_DWN registers based on the table */ int __init_or_module omap_cfg_reg(const unsigned long index) { struct pin_config *reg; if (!cpu_class_is_omap1()) { printk(KERN_ERR "mux: Broken omap_cfg_reg(%lu) entry\n", index); WARN_ON(1); return -EINVAL; } if (mux_cfg == NULL) { printk(KERN_ERR "Pin mux table not initialized\n"); return -ENODEV; } if (index >= mux_cfg->size) { printk(KERN_ERR "Invalid pin mux index: %lu (%lu)\n", index, mux_cfg->size); dump_stack(); return -ENODEV; } reg = (struct pin_config *)&mux_cfg->pins[index]; if (!mux_cfg->cfg_reg) return -ENODEV; return mux_cfg->cfg_reg(reg); } EXPORT_SYMBOL(omap_cfg_reg); #else #define omap_mux_init() do {} while(0) #define omap_cfg_reg(x) do {} while(0) #endif /* CONFIG_OMAP_MUX */
sicknemesis/AK-OnePone
arch/arm/plat-omap/mux.c
C
gpl-2.0
2,270
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright Novell Inc 2010 * * Authors: Alexander Graf <agraf@suse.de> */ #include <asm/kvm.h> #include <asm/kvm_ppc.h> #include <asm/disassemble.h> #include <asm/kvm_book3s.h> #include <asm/kvm_fpu.h> #include <asm/reg.h> #include <asm/cacheflush.h> #include <asm/switch_to.h> #include <linux/vmalloc.h> /* #define DEBUG */ #ifdef DEBUG #define dprintk printk #else #define dprintk(...) do { } while(0); #endif #define OP_LFS 48 #define OP_LFSU 49 #define OP_LFD 50 #define OP_LFDU 51 #define OP_STFS 52 #define OP_STFSU 53 #define OP_STFD 54 #define OP_STFDU 55 #define OP_PSQ_L 56 #define OP_PSQ_LU 57 #define OP_PSQ_ST 60 #define OP_PSQ_STU 61 #define OP_31_LFSX 535 #define OP_31_LFSUX 567 #define OP_31_LFDX 599 #define OP_31_LFDUX 631 #define OP_31_STFSX 663 #define OP_31_STFSUX 695 #define OP_31_STFX 727 #define OP_31_STFUX 759 #define OP_31_LWIZX 887 #define OP_31_STFIWX 983 #define OP_59_FADDS 21 #define OP_59_FSUBS 20 #define OP_59_FSQRTS 22 #define OP_59_FDIVS 18 #define OP_59_FRES 24 #define OP_59_FMULS 25 #define OP_59_FRSQRTES 26 #define OP_59_FMSUBS 28 #define OP_59_FMADDS 29 #define OP_59_FNMSUBS 30 #define OP_59_FNMADDS 31 #define OP_63_FCMPU 0 #define OP_63_FCPSGN 8 #define OP_63_FRSP 12 #define OP_63_FCTIW 14 #define OP_63_FCTIWZ 15 #define OP_63_FDIV 18 #define OP_63_FADD 21 #define OP_63_FSQRT 22 #define OP_63_FSEL 23 #define OP_63_FRE 24 #define OP_63_FMUL 25 #define OP_63_FRSQRTE 26 #define OP_63_FMSUB 28 #define OP_63_FMADD 29 #define OP_63_FNMSUB 30 #define OP_63_FNMADD 31 #define OP_63_FCMPO 32 #define OP_63_MTFSB1 38 // XXX #define OP_63_FSUB 20 #define OP_63_FNEG 40 #define OP_63_MCRFS 64 #define OP_63_MTFSB0 70 #define OP_63_FMR 72 #define OP_63_MTFSFI 134 #define OP_63_FABS 264 #define OP_63_MFFS 583 #define OP_63_MTFSF 711 #define OP_4X_PS_CMPU0 0 #define OP_4X_PSQ_LX 6 #define OP_4XW_PSQ_STX 7 #define OP_4A_PS_SUM0 10 #define OP_4A_PS_SUM1 11 #define OP_4A_PS_MULS0 12 #define OP_4A_PS_MULS1 13 #define OP_4A_PS_MADDS0 14 #define OP_4A_PS_MADDS1 15 #define OP_4A_PS_DIV 18 #define OP_4A_PS_SUB 20 #define OP_4A_PS_ADD 21 #define OP_4A_PS_SEL 23 #define OP_4A_PS_RES 24 #define OP_4A_PS_MUL 25 #define OP_4A_PS_RSQRTE 26 #define OP_4A_PS_MSUB 28 #define OP_4A_PS_MADD 29 #define OP_4A_PS_NMSUB 30 #define OP_4A_PS_NMADD 31 #define OP_4X_PS_CMPO0 32 #define OP_4X_PSQ_LUX 38 #define OP_4XW_PSQ_STUX 39 #define OP_4X_PS_NEG 40 #define OP_4X_PS_CMPU1 64 #define OP_4X_PS_MR 72 #define OP_4X_PS_CMPO1 96 #define OP_4X_PS_NABS 136 #define OP_4X_PS_ABS 264 #define OP_4X_PS_MERGE00 528 #define OP_4X_PS_MERGE01 560 #define OP_4X_PS_MERGE10 592 #define OP_4X_PS_MERGE11 624 #define SCALAR_NONE 0 #define SCALAR_HIGH (1 << 0) #define SCALAR_LOW (1 << 1) #define SCALAR_NO_PS0 (1 << 2) #define SCALAR_NO_PS1 (1 << 3) #define GQR_ST_TYPE_MASK 0x00000007 #define GQR_ST_TYPE_SHIFT 0 #define GQR_ST_SCALE_MASK 0x00003f00 #define GQR_ST_SCALE_SHIFT 8 #define GQR_LD_TYPE_MASK 0x00070000 #define GQR_LD_TYPE_SHIFT 16 #define GQR_LD_SCALE_MASK 0x3f000000 #define GQR_LD_SCALE_SHIFT 24 #define GQR_QUANTIZE_FLOAT 0 #define GQR_QUANTIZE_U8 4 #define GQR_QUANTIZE_U16 5 #define GQR_QUANTIZE_S8 6 #define GQR_QUANTIZE_S16 7 #define FPU_LS_SINGLE 0 #define FPU_LS_DOUBLE 1 #define FPU_LS_SINGLE_LOW 2 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) { kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt]); } static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) { u64 dsisr; struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; shared->msr = kvmppc_set_field(shared->msr, 33, 36, 0); shared->msr = kvmppc_set_field(shared->msr, 42, 47, 0); shared->dar = eaddr; /* Page Fault */ dsisr = kvmppc_set_field(0, 33, 33, 1); if (is_store) shared->dsisr = kvmppc_set_field(dsisr, 38, 38, 1); kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); } static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, int rs, ulong addr, int ls_type) { int emulated = EMULATE_FAIL; int r; char tmp[8]; int len = sizeof(u32); if (ls_type == FPU_LS_DOUBLE) len = sizeof(u64); /* read from memory */ r = kvmppc_ld(vcpu, &addr, len, tmp, true); vcpu->arch.paddr_accessed = addr; if (r < 0) { kvmppc_inject_pf(vcpu, addr, false); goto done_load; } else if (r == EMULATE_DO_MMIO) { emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs, len, 1); goto done_load; } emulated = EMULATE_DONE; /* put in registers */ switch (ls_type) { case FPU_LS_SINGLE: kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs]); vcpu->arch.qpr[rs] = *((u32*)tmp); break; case FPU_LS_DOUBLE: vcpu->arch.fpr[rs] = *((u64*)tmp); break; } dprintk(KERN_INFO "KVM: FPR_LD [0x%llx] at 0x%lx (%d)\n", *(u64*)tmp, addr, len); done_load: return emulated; } static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu, int rs, ulong addr, int ls_type) { int emulated = EMULATE_FAIL; int r; char tmp[8]; u64 val; int len; switch (ls_type) { case FPU_LS_SINGLE: kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp); val = *((u32*)tmp); len = sizeof(u32); break; case FPU_LS_SINGLE_LOW: *((u32*)tmp) = vcpu->arch.fpr[rs]; val = vcpu->arch.fpr[rs] & 0xffffffff; len = sizeof(u32); break; case FPU_LS_DOUBLE: *((u64*)tmp) = vcpu->arch.fpr[rs]; val = vcpu->arch.fpr[rs]; len = sizeof(u64); break; default: val = 0; len = 0; } r = kvmppc_st(vcpu, &addr, len, tmp, true); vcpu->arch.paddr_accessed = addr; if (r < 0) { kvmppc_inject_pf(vcpu, addr, true); } else if (r == EMULATE_DO_MMIO) { emulated = kvmppc_handle_store(run, vcpu, val, len, 1); } else { emulated = EMULATE_DONE; } dprintk(KERN_INFO "KVM: FPR_ST [0x%llx] at 0x%lx (%d)\n", val, addr, len); return emulated; } static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu, int rs, ulong addr, bool w, int i) { int emulated = EMULATE_FAIL; int r; float one = 1.0; u32 tmp[2]; /* read from memory */ if (w) { r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true); memcpy(&tmp[1], &one, sizeof(u32)); } else { r = kvmppc_ld(vcpu, &addr, sizeof(u32) * 2, tmp, true); } vcpu->arch.paddr_accessed = addr; if (r < 0) { kvmppc_inject_pf(vcpu, addr, false); goto done_load; } else if ((r == EMULATE_DO_MMIO) && w) { emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs, 4, 1); vcpu->arch.qpr[rs] = tmp[1]; goto done_load; } else if (r == EMULATE_DO_MMIO) { emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FQPR | rs, 8, 1); goto done_load; } emulated = EMULATE_DONE; /* put in registers */ kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs]); vcpu->arch.qpr[rs] = tmp[1]; dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], tmp[1], addr, w ? 4 : 8); done_load: return emulated; } static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu, int rs, ulong addr, bool w, int i) { int emulated = EMULATE_FAIL; int r; u32 tmp[2]; int len = w ? sizeof(u32) : sizeof(u64); kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0]); tmp[1] = vcpu->arch.qpr[rs]; r = kvmppc_st(vcpu, &addr, len, tmp, true); vcpu->arch.paddr_accessed = addr; if (r < 0) { kvmppc_inject_pf(vcpu, addr, true); } else if ((r == EMULATE_DO_MMIO) && w) { emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1); } else if (r == EMULATE_DO_MMIO) { u64 val = ((u64)tmp[0] << 32) | tmp[1]; emulated = kvmppc_handle_store(run, vcpu, val, 8, 1); } else { emulated = EMULATE_DONE; } dprintk(KERN_INFO "KVM: PSQ_ST [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], tmp[1], addr, len); return emulated; } /* * Cuts out inst bits with ordering according to spec. * That means the leftmost bit is zero. All given bits are included. */ static inline u32 inst_get_field(u32 inst, int msb, int lsb) { return kvmppc_get_field(inst, msb + 32, lsb + 32); } /* * Replaces inst bits with ordering according to spec. */ static inline u32 inst_set_field(u32 inst, int msb, int lsb, int value) { return kvmppc_set_field(inst, msb + 32, lsb + 32, value); } bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst) { if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) return false; switch (get_op(inst)) { case OP_PSQ_L: case OP_PSQ_LU: case OP_PSQ_ST: case OP_PSQ_STU: case OP_LFS: case OP_LFSU: case OP_LFD: case OP_LFDU: case OP_STFS: case OP_STFSU: case OP_STFD: case OP_STFDU: return true; case 4: /* X form */ switch (inst_get_field(inst, 21, 30)) { case OP_4X_PS_CMPU0: case OP_4X_PSQ_LX: case OP_4X_PS_CMPO0: case OP_4X_PSQ_LUX: case OP_4X_PS_NEG: case OP_4X_PS_CMPU1: case OP_4X_PS_MR: case OP_4X_PS_CMPO1: case OP_4X_PS_NABS: case OP_4X_PS_ABS: case OP_4X_PS_MERGE00: case OP_4X_PS_MERGE01: case OP_4X_PS_MERGE10: case OP_4X_PS_MERGE11: return true; } /* XW form */ switch (inst_get_field(inst, 25, 30)) { case OP_4XW_PSQ_STX: case OP_4XW_PSQ_STUX: return true; } /* A form */ switch (inst_get_field(inst, 26, 30)) { case OP_4A_PS_SUM1: case OP_4A_PS_SUM0: case OP_4A_PS_MULS0: case OP_4A_PS_MULS1: case OP_4A_PS_MADDS0: case OP_4A_PS_MADDS1: case OP_4A_PS_DIV: case OP_4A_PS_SUB: case OP_4A_PS_ADD: case OP_4A_PS_SEL: case OP_4A_PS_RES: case OP_4A_PS_MUL: case OP_4A_PS_RSQRTE: case OP_4A_PS_MSUB: case OP_4A_PS_MADD: case OP_4A_PS_NMSUB: case OP_4A_PS_NMADD: return true; } break; case 59: switch (inst_get_field(inst, 21, 30)) { case OP_59_FADDS: case OP_59_FSUBS: case OP_59_FDIVS: case OP_59_FRES: case OP_59_FRSQRTES: return true; } switch (inst_get_field(inst, 26, 30)) { case OP_59_FMULS: case OP_59_FMSUBS: case OP_59_FMADDS: case OP_59_FNMSUBS: case OP_59_FNMADDS: return true; } break; case 63: switch (inst_get_field(inst, 21, 30)) { case OP_63_MTFSB0: case OP_63_MTFSB1: case OP_63_MTFSF: case OP_63_MTFSFI: case OP_63_MCRFS: case OP_63_MFFS: case OP_63_FCMPU: case OP_63_FCMPO: case OP_63_FNEG: case OP_63_FMR: case OP_63_FABS: case OP_63_FRSP: case OP_63_FDIV: case OP_63_FADD: case OP_63_FSUB: case OP_63_FCTIW: case OP_63_FCTIWZ: case OP_63_FRSQRTE: case OP_63_FCPSGN: return true; } switch (inst_get_field(inst, 26, 30)) { case OP_63_FMUL: case OP_63_FSEL: case OP_63_FMSUB: case OP_63_FMADD: case OP_63_FNMSUB: case OP_63_FNMADD: return true; } break; case 31: switch (inst_get_field(inst, 21, 30)) { case OP_31_LFSX: case OP_31_LFSUX: case OP_31_LFDX: case OP_31_LFDUX: case OP_31_STFSX: case OP_31_STFSUX: case OP_31_STFX: case OP_31_STFUX: case OP_31_STFIWX: return true; } break; } return false; } static int get_d_signext(u32 inst) { int d = inst & 0x8ff; if (d & 0x800) return -(d & 0x7ff); return (d & 0x7ff); } static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, int reg_out, int reg_in1, int reg_in2, int reg_in3, int scalar, void (*func)(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, u32 *src3)) { u32 *qpr = vcpu->arch.qpr; u64 *fpr = vcpu->arch.fpr; u32 ps0_out; u32 ps0_in1, ps0_in2, ps0_in3; u32 ps1_in1, ps1_in2, ps1_in3; /* RC */ WARN_ON(rc); /* PS0 */ kvm_cvt_df(&fpr[reg_in1], &ps0_in1); kvm_cvt_df(&fpr[reg_in2], &ps0_in2); kvm_cvt_df(&fpr[reg_in3], &ps0_in3); if (scalar & SCALAR_LOW) ps0_in2 = qpr[reg_in2]; func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", ps0_in1, ps0_in2, ps0_in3, ps0_out); if (!(scalar & SCALAR_NO_PS0)) kvm_cvt_fd(&ps0_out, &fpr[reg_out]); /* PS1 */ ps1_in1 = qpr[reg_in1]; ps1_in2 = qpr[reg_in2]; ps1_in3 = qpr[reg_in3]; if (scalar & SCALAR_HIGH) ps1_in2 = ps0_in2; if (!(scalar & SCALAR_NO_PS1)) func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]); return EMULATE_DONE; } static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, int reg_out, int reg_in1, int reg_in2, int scalar, void (*func)(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2)) { u32 *qpr = vcpu->arch.qpr; u64 *fpr = vcpu->arch.fpr; u32 ps0_out; u32 ps0_in1, ps0_in2; u32 ps1_out; u32 ps1_in1, ps1_in2; /* RC */ WARN_ON(rc); /* PS0 */ kvm_cvt_df(&fpr[reg_in1], &ps0_in1); if (scalar & SCALAR_LOW) ps0_in2 = qpr[reg_in2]; else kvm_cvt_df(&fpr[reg_in2], &ps0_in2); func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2); if (!(scalar & SCALAR_NO_PS0)) { dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", ps0_in1, ps0_in2, ps0_out); kvm_cvt_fd(&ps0_out, &fpr[reg_out]); } /* PS1 */ ps1_in1 = qpr[reg_in1]; ps1_in2 = qpr[reg_in2]; if (scalar & SCALAR_HIGH) ps1_in2 = ps0_in2; func(&vcpu->arch.fpscr, &ps1_out, &ps1_in1, &ps1_in2); if (!(scalar & SCALAR_NO_PS1)) { qpr[reg_out] = ps1_out; dprintk(KERN_INFO "PS2 ps1 -> f(0x%x, 0x%x) = 0x%x\n", ps1_in1, ps1_in2, qpr[reg_out]); } return EMULATE_DONE; } static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, int reg_out, int reg_in, void (*func)(u64 *t, u32 *dst, u32 *src1)) { u32 *qpr = vcpu->arch.qpr; u64 *fpr = vcpu->arch.fpr; u32 ps0_out, ps0_in; u32 ps1_in; /* RC */ WARN_ON(rc); /* PS0 */ kvm_cvt_df(&fpr[reg_in], &ps0_in); func(&vcpu->arch.fpscr, &ps0_out, &ps0_in); dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", ps0_in, ps0_out); kvm_cvt_fd(&ps0_out, &fpr[reg_out]); /* PS1 */ ps1_in = qpr[reg_in]; func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in); dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n", ps1_in, qpr[reg_out]); return EMULATE_DONE; } int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = kvmppc_get_last_inst(vcpu); enum emulation_result emulated = EMULATE_DONE; int ax_rd = inst_get_field(inst, 6, 10); int ax_ra = inst_get_field(inst, 11, 15); int ax_rb = inst_get_field(inst, 16, 20); int ax_rc = inst_get_field(inst, 21, 25); short full_d = inst_get_field(inst, 16, 31); u64 *fpr_d = &vcpu->arch.fpr[ax_rd]; u64 *fpr_a = &vcpu->arch.fpr[ax_ra]; u64 *fpr_b = &vcpu->arch.fpr[ax_rb]; u64 *fpr_c = &vcpu->arch.fpr[ax_rc]; bool rcomp = (inst & 1) ? true : false; u32 cr = kvmppc_get_cr(vcpu); #ifdef DEBUG int i; #endif if (!kvmppc_inst_is_paired_single(vcpu, inst)) return EMULATE_FAIL; if (!(vcpu->arch.shared->msr & MSR_FP)) { kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); return EMULATE_AGAIN; } kvmppc_giveup_ext(vcpu, MSR_FP); preempt_disable(); enable_kernel_fp(); /* Do we need to clear FE0 / FE1 here? Don't think so. */ #ifdef DEBUG for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { u32 f; kvm_cvt_df(&vcpu->arch.fpr[i], &f); dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); } #endif switch (get_op(inst)) { case OP_PSQ_L: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); break; } case OP_PSQ_LU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_PSQ_ST: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); break; } case OP_PSQ_STU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case 4: /* X form */ switch (inst_get_field(inst, 21, 30)) { case OP_4X_PS_CMPU0: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PSQ_LX: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); break; } case OP_4X_PS_CMPO0: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PSQ_LUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_4X_PS_NEG: vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; vcpu->arch.qpr[ax_rd] ^= 0x80000000; break; case OP_4X_PS_CMPU1: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PS_MR: WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; case OP_4X_PS_CMPO1: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PS_NABS: WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; vcpu->arch.qpr[ax_rd] |= 0x80000000; break; case OP_4X_PS_ABS: WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; vcpu->arch.qpr[ax_rd] &= ~0x80000000; break; case OP_4X_PS_MERGE00: WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ kvm_cvt_df(&vcpu->arch.fpr[ax_rb], &vcpu->arch.qpr[ax_rd]); break; case OP_4X_PS_MERGE01: WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; case OP_4X_PS_MERGE10: WARN_ON(rcomp); /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], &vcpu->arch.fpr[ax_rd]); /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ kvm_cvt_df(&vcpu->arch.fpr[ax_rb], &vcpu->arch.qpr[ax_rd]); break; case OP_4X_PS_MERGE11: WARN_ON(rcomp); /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], &vcpu->arch.fpr[ax_rd]); vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; } /* XW form */ switch (inst_get_field(inst, 25, 30)) { case OP_4XW_PSQ_STX: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); break; } case OP_4XW_PSQ_STUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } } /* A form */ switch (inst_get_field(inst, 26, 30)) { case OP_4A_PS_SUM1: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc]; break; case OP_4A_PS_SUM0: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NO_PS1 | SCALAR_LOW, fps_fadds); vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc]; break; case OP_4A_PS_MULS0: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, SCALAR_HIGH, fps_fmuls); break; case OP_4A_PS_MULS1: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, SCALAR_LOW, fps_fmuls); break; case OP_4A_PS_MADDS0: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_HIGH, fps_fmadds); break; case OP_4A_PS_MADDS1: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_LOW, fps_fmadds); break; case OP_4A_PS_DIV: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NONE, fps_fdivs); break; case OP_4A_PS_SUB: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NONE, fps_fsubs); break; case OP_4A_PS_ADD: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NONE, fps_fadds); break; case OP_4A_PS_SEL: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fsel); break; case OP_4A_PS_RES: emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd, ax_rb, fps_fres); break; case OP_4A_PS_MUL: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, SCALAR_NONE, fps_fmuls); break; case OP_4A_PS_RSQRTE: emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd, ax_rb, fps_frsqrte); break; case OP_4A_PS_MSUB: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmsubs); break; case OP_4A_PS_MADD: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmadds); break; case OP_4A_PS_NMSUB: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmsubs); break; case OP_4A_PS_NMADD: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmadds); break; } break; /* Real FPU operations */ case OP_LFS: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_LFSU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_LFD: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_LFDU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_STFS: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_STFSU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_STFD: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_STFDU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case 31: switch (inst_get_field(inst, 21, 30)) { case OP_31_LFSX: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_31_LFSUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_LFDX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_31_LFDUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_STFSX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_31_STFSUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_STFX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_31_STFUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_STFIWX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_SINGLE_LOW); break; } break; } break; case 59: switch (inst_get_field(inst, 21, 30)) { case OP_59_FADDS: fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FSUBS: fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FDIVS: fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FRES: fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FRSQRTES: fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; } switch (inst_get_field(inst, 26, 30)) { case OP_59_FMULS: fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FMSUBS: fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FMADDS: fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FNMSUBS: fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FNMADDS: fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; } break; case 63: switch (inst_get_field(inst, 21, 30)) { case OP_63_MTFSB0: case OP_63_MTFSB1: case OP_63_MCRFS: case OP_63_MTFSFI: /* XXX need to implement */ break; case OP_63_MFFS: /* XXX missing CR */ *fpr_d = vcpu->arch.fpscr; break; case OP_63_MTFSF: /* XXX missing fm bits */ /* XXX missing CR */ vcpu->arch.fpscr = *fpr_b; break; case OP_63_FCMPU: { u32 tmp_cr; u32 cr0_mask = 0xf0000000; u32 cr_shift = inst_get_field(inst, 6, 8) * 4; fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); cr &= ~(cr0_mask >> cr_shift); cr |= (cr & cr0_mask) >> cr_shift; break; } case OP_63_FCMPO: { u32 tmp_cr; u32 cr0_mask = 0xf0000000; u32 cr_shift = inst_get_field(inst, 6, 8) * 4; fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); cr &= ~(cr0_mask >> cr_shift); cr |= (cr & cr0_mask) >> cr_shift; break; } case OP_63_FNEG: fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FMR: *fpr_d = *fpr_b; break; case OP_63_FABS: fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FCPSGN: fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FDIV: fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FADD: fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FSUB: fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FCTIW: fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FCTIWZ: fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FRSP: fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_63_FRSQRTE: { double one = 1.0f; /* fD = sqrt(fB) */ fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); /* fD = 1.0f / fD */ fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d); break; } } switch (inst_get_field(inst, 26, 30)) { case OP_63_FMUL: fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); break; case OP_63_FSEL: fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FMSUB: fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FMADD: fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FNMSUB: fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FNMADD: fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; } break; } #ifdef DEBUG for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { u32 f; kvm_cvt_df(&vcpu->arch.fpr[i], &f); dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); } #endif if (rcomp) kvmppc_set_cr(vcpu, cr); preempt_enable(); return emulated; }
multirom-op2/android_kernel_oneplus_msm8994
arch/powerpc/kvm/book3s_paired_singles.c
C
gpl-2.0
31,374
/* * Copyright (C) 2002 Benjamin Herrenschmidt (benh@kernel.crashing.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Todo: - add support for the OF persistent properties */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/nvram.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/adb.h> #include <linux/pmu.h> #include <linux/bootmem.h> #include <linux/completion.h> #include <linux/spinlock.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/nvram.h> #include "pmac.h" #define DEBUG #ifdef DEBUG #define DBG(x...) printk(x) #else #define DBG(x...) #endif #define NVRAM_SIZE 0x2000 /* 8kB of non-volatile RAM */ #define CORE99_SIGNATURE 0x5a #define CORE99_ADLER_START 0x14 /* On Core99, nvram is either a sharp, a micron or an AMD flash */ #define SM_FLASH_STATUS_DONE 0x80 #define SM_FLASH_STATUS_ERR 0x38 #define SM_FLASH_CMD_ERASE_CONFIRM 0xd0 #define SM_FLASH_CMD_ERASE_SETUP 0x20 #define SM_FLASH_CMD_RESET 0xff #define SM_FLASH_CMD_WRITE_SETUP 0x40 #define SM_FLASH_CMD_CLEAR_STATUS 0x50 #define SM_FLASH_CMD_READ_STATUS 0x70 /* CHRP NVRAM header */ struct chrp_header { u8 signature; u8 cksum; u16 len; char name[12]; u8 data[0]; }; struct core99_header { struct chrp_header hdr; u32 adler; u32 generation; u32 reserved[2]; }; /* * Read and write the non-volatile RAM on PowerMacs and CHRP machines. */ static int nvram_naddrs; static volatile unsigned char __iomem *nvram_data; static int is_core_99; static int core99_bank = 0; static int nvram_partitions[3]; // XXX Turn that into a sem static DEFINE_RAW_SPINLOCK(nv_lock); static int (*core99_write_bank)(int bank, u8* datas); static int (*core99_erase_bank)(int bank); static char *nvram_image; static unsigned char core99_nvram_read_byte(int addr) { if (nvram_image == NULL) return 0xff; return nvram_image[addr]; } static void core99_nvram_write_byte(int addr, unsigned char val) { if (nvram_image == NULL) return; nvram_image[addr] = val; } static ssize_t core99_nvram_read(char *buf, size_t count, loff_t *index) { int i; if (nvram_image == NULL) return -ENODEV; if (*index > NVRAM_SIZE) return 0; i = *index; if (i + count > NVRAM_SIZE) count = NVRAM_SIZE - i; memcpy(buf, &nvram_image[i], count); *index = i + count; return count; } static ssize_t core99_nvram_write(char *buf, size_t count, loff_t *index) { int i; if (nvram_image == NULL) return -ENODEV; if (*index > NVRAM_SIZE) return 0; i = *index; if (i + count > NVRAM_SIZE) count = NVRAM_SIZE - i; memcpy(&nvram_image[i], buf, count); *index = i + count; return count; } static ssize_t core99_nvram_size(void) { if (nvram_image == NULL) return -ENODEV; return NVRAM_SIZE; } #ifdef CONFIG_PPC32 static volatile unsigned char __iomem *nvram_addr; static int nvram_mult; static unsigned char direct_nvram_read_byte(int addr) { return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]); } static void direct_nvram_write_byte(int addr, unsigned char val) { out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val); } static unsigned char indirect_nvram_read_byte(int addr) { unsigned char val; unsigned long flags; raw_spin_lock_irqsave(&nv_lock, flags); out_8(nvram_addr, addr >> 5); val = in_8(&nvram_data[(addr & 0x1f) << 4]); raw_spin_unlock_irqrestore(&nv_lock, flags); return val; } static void indirect_nvram_write_byte(int addr, unsigned char val) { unsigned long flags; raw_spin_lock_irqsave(&nv_lock, flags); out_8(nvram_addr, addr >> 5); out_8(&nvram_data[(addr & 0x1f) << 4], val); raw_spin_unlock_irqrestore(&nv_lock, flags); } #ifdef CONFIG_ADB_PMU static void pmu_nvram_complete(struct adb_request *req) { if (req->arg) complete((struct completion *)req->arg); } static unsigned char pmu_nvram_read_byte(int addr) { struct adb_request req; DECLARE_COMPLETION_ONSTACK(req_complete); req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL; if (pmu_request(&req, pmu_nvram_complete, 3, PMU_READ_NVRAM, (addr >> 8) & 0xff, addr & 0xff)) return 0xff; if (system_state == SYSTEM_RUNNING) wait_for_completion(&req_complete); while (!req.complete) pmu_poll(); return req.reply[0]; } static void pmu_nvram_write_byte(int addr, unsigned char val) { struct adb_request req; DECLARE_COMPLETION_ONSTACK(req_complete); req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL; if (pmu_request(&req, pmu_nvram_complete, 4, PMU_WRITE_NVRAM, (addr >> 8) & 0xff, addr & 0xff, val)) return; if (system_state == SYSTEM_RUNNING) wait_for_completion(&req_complete); while (!req.complete) pmu_poll(); } #endif /* CONFIG_ADB_PMU */ #endif /* CONFIG_PPC32 */ static u8 chrp_checksum(struct chrp_header* hdr) { u8 *ptr; u16 sum = hdr->signature; for (ptr = (u8 *)&hdr->len; ptr < hdr->data; ptr++) sum += *ptr; while (sum > 0xFF) sum = (sum & 0xFF) + (sum>>8); return sum; } static u32 core99_calc_adler(u8 *buffer) { int cnt; u32 low, high; buffer += CORE99_ADLER_START; low = 1; high = 0; for (cnt=0; cnt<(NVRAM_SIZE-CORE99_ADLER_START); cnt++) { if ((cnt % 5000) == 0) { high %= 65521UL; high %= 65521UL; } low += buffer[cnt]; high += low; } low %= 65521UL; high %= 65521UL; return (high << 16) | low; } static u32 core99_check(u8* datas) { struct core99_header* hdr99 = (struct core99_header*)datas; if (hdr99->hdr.signature != CORE99_SIGNATURE) { DBG("Invalid signature\n"); return 0; } if (hdr99->hdr.cksum != chrp_checksum(&hdr99->hdr)) { DBG("Invalid checksum\n"); return 0; } if (hdr99->adler != core99_calc_adler(datas)) { DBG("Invalid adler\n"); return 0; } return hdr99->generation; } static int sm_erase_bank(int bank) { int stat; unsigned long timeout; u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; DBG("nvram: Sharp/Micron Erasing bank %d...\n", bank); out_8(base, SM_FLASH_CMD_ERASE_SETUP); out_8(base, SM_FLASH_CMD_ERASE_CONFIRM); timeout = 0; do { if (++timeout > 1000000) { printk(KERN_ERR "nvram: Sharp/Micron flash erase timeout !\n"); break; } out_8(base, SM_FLASH_CMD_READ_STATUS); stat = in_8(base); } while (!(stat & SM_FLASH_STATUS_DONE)); out_8(base, SM_FLASH_CMD_CLEAR_STATUS); out_8(base, SM_FLASH_CMD_RESET); if (memchr_inv(base, 0xff, NVRAM_SIZE)) { printk(KERN_ERR "nvram: Sharp/Micron flash erase failed !\n"); return -ENXIO; } return 0; } static int sm_write_bank(int bank, u8* datas) { int i, stat = 0; unsigned long timeout; u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; DBG("nvram: Sharp/Micron Writing bank %d...\n", bank); for (i=0; i<NVRAM_SIZE; i++) { out_8(base+i, SM_FLASH_CMD_WRITE_SETUP); udelay(1); out_8(base+i, datas[i]); timeout = 0; do { if (++timeout > 1000000) { printk(KERN_ERR "nvram: Sharp/Micron flash write timeout !\n"); break; } out_8(base, SM_FLASH_CMD_READ_STATUS); stat = in_8(base); } while (!(stat & SM_FLASH_STATUS_DONE)); if (!(stat & SM_FLASH_STATUS_DONE)) break; } out_8(base, SM_FLASH_CMD_CLEAR_STATUS); out_8(base, SM_FLASH_CMD_RESET); if (memcmp(base, datas, NVRAM_SIZE)) { printk(KERN_ERR "nvram: Sharp/Micron flash write failed !\n"); return -ENXIO; } return 0; } static int amd_erase_bank(int bank) { int stat = 0; unsigned long timeout; u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; DBG("nvram: AMD Erasing bank %d...\n", bank); /* Unlock 1 */ out_8(base+0x555, 0xaa); udelay(1); /* Unlock 2 */ out_8(base+0x2aa, 0x55); udelay(1); /* Sector-Erase */ out_8(base+0x555, 0x80); udelay(1); out_8(base+0x555, 0xaa); udelay(1); out_8(base+0x2aa, 0x55); udelay(1); out_8(base, 0x30); udelay(1); timeout = 0; do { if (++timeout > 1000000) { printk(KERN_ERR "nvram: AMD flash erase timeout !\n"); break; } stat = in_8(base) ^ in_8(base); } while (stat != 0); /* Reset */ out_8(base, 0xf0); udelay(1); if (memchr_inv(base, 0xff, NVRAM_SIZE)) { printk(KERN_ERR "nvram: AMD flash erase failed !\n"); return -ENXIO; } return 0; } static int amd_write_bank(int bank, u8* datas) { int i, stat = 0; unsigned long timeout; u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; DBG("nvram: AMD Writing bank %d...\n", bank); for (i=0; i<NVRAM_SIZE; i++) { /* Unlock 1 */ out_8(base+0x555, 0xaa); udelay(1); /* Unlock 2 */ out_8(base+0x2aa, 0x55); udelay(1); /* Write single word */ out_8(base+0x555, 0xa0); udelay(1); out_8(base+i, datas[i]); timeout = 0; do { if (++timeout > 1000000) { printk(KERN_ERR "nvram: AMD flash write timeout !\n"); break; } stat = in_8(base) ^ in_8(base); } while (stat != 0); if (stat != 0) break; } /* Reset */ out_8(base, 0xf0); udelay(1); if (memcmp(base, datas, NVRAM_SIZE)) { printk(KERN_ERR "nvram: AMD flash write failed !\n"); return -ENXIO; } return 0; } static void __init lookup_partitions(void) { u8 buffer[17]; int i, offset; struct chrp_header* hdr; if (pmac_newworld) { nvram_partitions[pmac_nvram_OF] = -1; nvram_partitions[pmac_nvram_XPRAM] = -1; nvram_partitions[pmac_nvram_NR] = -1; hdr = (struct chrp_header *)buffer; offset = 0; buffer[16] = 0; do { for (i=0;i<16;i++) buffer[i] = ppc_md.nvram_read_val(offset+i); if (!strcmp(hdr->name, "common")) nvram_partitions[pmac_nvram_OF] = offset + 0x10; if (!strcmp(hdr->name, "APL,MacOS75")) { nvram_partitions[pmac_nvram_XPRAM] = offset + 0x10; nvram_partitions[pmac_nvram_NR] = offset + 0x110; } offset += (hdr->len * 0x10); } while(offset < NVRAM_SIZE); } else { nvram_partitions[pmac_nvram_OF] = 0x1800; nvram_partitions[pmac_nvram_XPRAM] = 0x1300; nvram_partitions[pmac_nvram_NR] = 0x1400; } DBG("nvram: OF partition at 0x%x\n", nvram_partitions[pmac_nvram_OF]); DBG("nvram: XP partition at 0x%x\n", nvram_partitions[pmac_nvram_XPRAM]); DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]); } static void core99_nvram_sync(void) { struct core99_header* hdr99; unsigned long flags; if (!is_core_99 || !nvram_data || !nvram_image) return; raw_spin_lock_irqsave(&nv_lock, flags); if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE, NVRAM_SIZE)) goto bail; DBG("Updating nvram...\n"); hdr99 = (struct core99_header*)nvram_image; hdr99->generation++; hdr99->hdr.signature = CORE99_SIGNATURE; hdr99->hdr.cksum = chrp_checksum(&hdr99->hdr); hdr99->adler = core99_calc_adler(nvram_image); core99_bank = core99_bank ? 0 : 1; if (core99_erase_bank) if (core99_erase_bank(core99_bank)) { printk("nvram: Error erasing bank %d\n", core99_bank); goto bail; } if (core99_write_bank) if (core99_write_bank(core99_bank, nvram_image)) printk("nvram: Error writing bank %d\n", core99_bank); bail: raw_spin_unlock_irqrestore(&nv_lock, flags); #ifdef DEBUG mdelay(2000); #endif } static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr) { int i; u32 gen_bank0, gen_bank1; if (nvram_naddrs < 1) { printk(KERN_ERR "nvram: no address\n"); return -EINVAL; } nvram_image = alloc_bootmem(NVRAM_SIZE); if (nvram_image == NULL) { printk(KERN_ERR "nvram: can't allocate ram image\n"); return -ENOMEM; } nvram_data = ioremap(addr, NVRAM_SIZE*2); nvram_naddrs = 1; /* Make sure we get the correct case */ DBG("nvram: Checking bank 0...\n"); gen_bank0 = core99_check((u8 *)nvram_data); gen_bank1 = core99_check((u8 *)nvram_data + NVRAM_SIZE); core99_bank = (gen_bank0 < gen_bank1) ? 1 : 0; DBG("nvram: gen0=%d, gen1=%d\n", gen_bank0, gen_bank1); DBG("nvram: Active bank is: %d\n", core99_bank); for (i=0; i<NVRAM_SIZE; i++) nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE]; ppc_md.nvram_read_val = core99_nvram_read_byte; ppc_md.nvram_write_val = core99_nvram_write_byte; ppc_md.nvram_read = core99_nvram_read; ppc_md.nvram_write = core99_nvram_write; ppc_md.nvram_size = core99_nvram_size; ppc_md.nvram_sync = core99_nvram_sync; ppc_md.machine_shutdown = core99_nvram_sync; /* * Maybe we could be smarter here though making an exclusive list * of known flash chips is a bit nasty as older OF didn't provide us * with a useful "compatible" entry. A solution would be to really * identify the chip using flash id commands and base ourselves on * a list of known chips IDs */ if (of_device_is_compatible(dp, "amd-0137")) { core99_erase_bank = amd_erase_bank; core99_write_bank = amd_write_bank; } else { core99_erase_bank = sm_erase_bank; core99_write_bank = sm_write_bank; } return 0; } int __init pmac_nvram_init(void) { struct device_node *dp; struct resource r1, r2; unsigned int s1 = 0, s2 = 0; int err = 0; nvram_naddrs = 0; dp = of_find_node_by_name(NULL, "nvram"); if (dp == NULL) { printk(KERN_ERR "Can't find NVRAM device\n"); return -ENODEV; } /* Try to obtain an address */ if (of_address_to_resource(dp, 0, &r1) == 0) { nvram_naddrs = 1; s1 = resource_size(&r1); if (of_address_to_resource(dp, 1, &r2) == 0) { nvram_naddrs = 2; s2 = resource_size(&r2); } } is_core_99 = of_device_is_compatible(dp, "nvram,flash"); if (is_core_99) { err = core99_nvram_setup(dp, r1.start); goto bail; } #ifdef CONFIG_PPC32 if (machine_is(chrp) && nvram_naddrs == 1) { nvram_data = ioremap(r1.start, s1); nvram_mult = 1; ppc_md.nvram_read_val = direct_nvram_read_byte; ppc_md.nvram_write_val = direct_nvram_write_byte; } else if (nvram_naddrs == 1) { nvram_data = ioremap(r1.start, s1); nvram_mult = (s1 + NVRAM_SIZE - 1) / NVRAM_SIZE; ppc_md.nvram_read_val = direct_nvram_read_byte; ppc_md.nvram_write_val = direct_nvram_write_byte; } else if (nvram_naddrs == 2) { nvram_addr = ioremap(r1.start, s1); nvram_data = ioremap(r2.start, s2); ppc_md.nvram_read_val = indirect_nvram_read_byte; ppc_md.nvram_write_val = indirect_nvram_write_byte; } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) { #ifdef CONFIG_ADB_PMU nvram_naddrs = -1; ppc_md.nvram_read_val = pmu_nvram_read_byte; ppc_md.nvram_write_val = pmu_nvram_write_byte; #endif /* CONFIG_ADB_PMU */ } else { printk(KERN_ERR "Incompatible type of NVRAM\n"); err = -ENXIO; } #endif /* CONFIG_PPC32 */ bail: of_node_put(dp); if (err == 0) lookup_partitions(); return err; } int pmac_get_partition(int partition) { return nvram_partitions[partition]; } u8 pmac_xpram_read(int xpaddr) { int offset = pmac_get_partition(pmac_nvram_XPRAM); if (offset < 0 || xpaddr < 0 || xpaddr > 0x100) return 0xff; return ppc_md.nvram_read_val(xpaddr + offset); } void pmac_xpram_write(int xpaddr, u8 data) { int offset = pmac_get_partition(pmac_nvram_XPRAM); if (offset < 0 || xpaddr < 0 || xpaddr > 0x100) return; ppc_md.nvram_write_val(xpaddr + offset, data); } EXPORT_SYMBOL(pmac_get_partition); EXPORT_SYMBOL(pmac_xpram_read); EXPORT_SYMBOL(pmac_xpram_write);
TheTypoMaster/SM-G360T1_kernel
arch/powerpc/platforms/powermac/nvram.c
C
gpl-2.0
15,386
/* AFS volume location management * * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/sched.h> #include "internal.h" static unsigned afs_vlocation_timeout = 10; /* volume location timeout in seconds */ static unsigned afs_vlocation_update_timeout = 10 * 60; static void afs_vlocation_reaper(struct work_struct *); static void afs_vlocation_updater(struct work_struct *); static LIST_HEAD(afs_vlocation_updates); static LIST_HEAD(afs_vlocation_graveyard); static DEFINE_SPINLOCK(afs_vlocation_updates_lock); static DEFINE_SPINLOCK(afs_vlocation_graveyard_lock); static DECLARE_DELAYED_WORK(afs_vlocation_reap, afs_vlocation_reaper); static DECLARE_DELAYED_WORK(afs_vlocation_update, afs_vlocation_updater); static struct workqueue_struct *afs_vlocation_update_worker; /* * iterate through the VL servers in a cell until one of them admits knowing * about the volume in question */ static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vl, struct key *key, struct afs_cache_vlocation *vldb) { struct afs_cell *cell = vl->cell; struct in_addr addr; int count, ret; _enter("%s,%s", cell->name, vl->vldb.name); down_write(&vl->cell->vl_sem); ret = -ENOMEDIUM; for (count = cell->vl_naddrs; count > 0; count--) { addr = cell->vl_addrs[cell->vl_curr_svix]; _debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr); /* attempt to access the VL server */ ret = afs_vl_get_entry_by_name(&addr, key, vl->vldb.name, vldb, &afs_sync_call); switch (ret) { case 0: goto out; case -ENOMEM: case -ENONET: case -ENETUNREACH: case -EHOSTUNREACH: case -ECONNREFUSED: if (ret == -ENOMEM || ret == -ENONET) goto out; goto rotate; case -ENOMEDIUM: case -EKEYREJECTED: case -EKEYEXPIRED: goto out; default: ret = -EIO; goto rotate; } /* rotate the server records upon lookup failure */ rotate: cell->vl_curr_svix++; cell->vl_curr_svix %= cell->vl_naddrs; } out: up_write(&vl->cell->vl_sem); _leave(" = %d", ret); return ret; } /* * iterate through the VL servers in a cell until one of them admits knowing * about the volume in question */ static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl, struct key *key, afs_volid_t volid, afs_voltype_t voltype, struct afs_cache_vlocation *vldb) { struct afs_cell *cell = vl->cell; struct in_addr addr; int count, ret; _enter("%s,%x,%d,", cell->name, volid, voltype); down_write(&vl->cell->vl_sem); ret = -ENOMEDIUM; for (count = cell->vl_naddrs; count > 0; count--) { addr = cell->vl_addrs[cell->vl_curr_svix]; _debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr); /* attempt to access the VL server */ ret = afs_vl_get_entry_by_id(&addr, key, volid, voltype, vldb, &afs_sync_call); switch (ret) { case 0: goto out; case -ENOMEM: case -ENONET: case -ENETUNREACH: case -EHOSTUNREACH: case -ECONNREFUSED: if (ret == -ENOMEM || ret == -ENONET) goto out; goto rotate; case -EBUSY: vl->upd_busy_cnt++; if (vl->upd_busy_cnt <= 3) { if (vl->upd_busy_cnt > 1) { /* second+ BUSY - sleep a little bit */ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(1); __set_current_state(TASK_RUNNING); } continue; } break; case -ENOMEDIUM: vl->upd_rej_cnt++; goto rotate; default: ret = -EIO; goto rotate; } /* rotate the server records upon lookup failure */ rotate: cell->vl_curr_svix++; cell->vl_curr_svix %= cell->vl_naddrs; vl->upd_busy_cnt = 0; } out: if (ret < 0 && vl->upd_rej_cnt > 0) { printk(KERN_NOTICE "kAFS:" " Active volume no longer valid '%s'\n", vl->vldb.name); vl->valid = 0; ret = -ENOMEDIUM; } up_write(&vl->cell->vl_sem); _leave(" = %d", ret); return ret; } /* * allocate a volume location record */ static struct afs_vlocation *afs_vlocation_alloc(struct afs_cell *cell, const char *name, size_t namesz) { struct afs_vlocation *vl; vl = kzalloc(sizeof(struct afs_vlocation), GFP_KERNEL); if (vl) { vl->cell = cell; vl->state = AFS_VL_NEW; atomic_set(&vl->usage, 1); INIT_LIST_HEAD(&vl->link); INIT_LIST_HEAD(&vl->grave); INIT_LIST_HEAD(&vl->update); init_waitqueue_head(&vl->waitq); spin_lock_init(&vl->lock); memcpy(vl->vldb.name, name, namesz); } _leave(" = %p", vl); return vl; } /* * update record if we found it in the cache */ static int afs_vlocation_update_record(struct afs_vlocation *vl, struct key *key, struct afs_cache_vlocation *vldb) { afs_voltype_t voltype; afs_volid_t vid; int ret; /* try to look up a cached volume in the cell VL databases by ID */ _debug("Locally Cached: %s %02x { %08x(%x) %08x(%x) %08x(%x) }", vl->vldb.name, vl->vldb.vidmask, ntohl(vl->vldb.servers[0].s_addr), vl->vldb.srvtmask[0], ntohl(vl->vldb.servers[1].s_addr), vl->vldb.srvtmask[1], ntohl(vl->vldb.servers[2].s_addr), vl->vldb.srvtmask[2]); _debug("Vids: %08x %08x %08x", vl->vldb.vid[0], vl->vldb.vid[1], vl->vldb.vid[2]); if (vl->vldb.vidmask & AFS_VOL_VTM_RW) { vid = vl->vldb.vid[0]; voltype = AFSVL_RWVOL; } else if (vl->vldb.vidmask & AFS_VOL_VTM_RO) { vid = vl->vldb.vid[1]; voltype = AFSVL_ROVOL; } else if (vl->vldb.vidmask & AFS_VOL_VTM_BAK) { vid = vl->vldb.vid[2]; voltype = AFSVL_BACKVOL; } else { BUG(); vid = 0; voltype = 0; } /* contact the server to make sure the volume is still available * - TODO: need to handle disconnected operation here */ ret = afs_vlocation_access_vl_by_id(vl, key, vid, voltype, vldb); switch (ret) { /* net error */ default: printk(KERN_WARNING "kAFS:" " failed to update volume '%s' (%x) up in '%s': %d\n", vl->vldb.name, vid, vl->cell->name, ret); _leave(" = %d", ret); return ret; /* pulled from local cache into memory */ case 0: _leave(" = 0"); return 0; /* uh oh... looks like the volume got deleted */ case -ENOMEDIUM: printk(KERN_ERR "kAFS:" " volume '%s' (%x) does not exist '%s'\n", vl->vldb.name, vid, vl->cell->name); /* TODO: make existing record unavailable */ _leave(" = %d", ret); return ret; } } /* * apply the update to a VL record */ static void afs_vlocation_apply_update(struct afs_vlocation *vl, struct afs_cache_vlocation *vldb) { _debug("Done VL Lookup: %s %02x { %08x(%x) %08x(%x) %08x(%x) }", vldb->name, vldb->vidmask, ntohl(vldb->servers[0].s_addr), vldb->srvtmask[0], ntohl(vldb->servers[1].s_addr), vldb->srvtmask[1], ntohl(vldb->servers[2].s_addr), vldb->srvtmask[2]); _debug("Vids: %08x %08x %08x", vldb->vid[0], vldb->vid[1], vldb->vid[2]); if (strcmp(vldb->name, vl->vldb.name) != 0) printk(KERN_NOTICE "kAFS:" " name of volume '%s' changed to '%s' on server\n", vl->vldb.name, vldb->name); vl->vldb = *vldb; #ifdef CONFIG_AFS_FSCACHE fscache_update_cookie(vl->cache); #endif } /* * fill in a volume location record, consulting the cache and the VL server * both */ static int afs_vlocation_fill_in_record(struct afs_vlocation *vl, struct key *key) { struct afs_cache_vlocation vldb; int ret; _enter(""); ASSERTCMP(vl->valid, ==, 0); memset(&vldb, 0, sizeof(vldb)); /* see if we have an in-cache copy (will set vl->valid if there is) */ #ifdef CONFIG_AFS_FSCACHE vl->cache = fscache_acquire_cookie(vl->cell->cache, &afs_vlocation_cache_index_def, vl); #endif if (vl->valid) { /* try to update a known volume in the cell VL databases by * ID as the name may have changed */ _debug("found in cache"); ret = afs_vlocation_update_record(vl, key, &vldb); } else { /* try to look up an unknown volume in the cell VL databases by * name */ ret = afs_vlocation_access_vl_by_name(vl, key, &vldb); if (ret < 0) { printk("kAFS: failed to locate '%s' in cell '%s'\n", vl->vldb.name, vl->cell->name); return ret; } } afs_vlocation_apply_update(vl, &vldb); _leave(" = 0"); return 0; } /* * queue a vlocation record for updates */ static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl) { struct afs_vlocation *xvl; /* wait at least 10 minutes before updating... */ vl->update_at = get_seconds() + afs_vlocation_update_timeout; spin_lock(&afs_vlocation_updates_lock); if (!list_empty(&afs_vlocation_updates)) { /* ... but wait at least 1 second more than the newest record * already queued so that we don't spam the VL server suddenly * with lots of requests */ xvl = list_entry(afs_vlocation_updates.prev, struct afs_vlocation, update); if (vl->update_at <= xvl->update_at) vl->update_at = xvl->update_at + 1; } else { queue_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, afs_vlocation_update_timeout * HZ); } list_add_tail(&vl->update, &afs_vlocation_updates); spin_unlock(&afs_vlocation_updates_lock); } /* * lookup volume location * - iterate through the VL servers in a cell until one of them admits knowing * about the volume in question * - lookup in the local cache if not able to find on the VL server * - insert/update in the local cache if did get a VL response */ struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *cell, struct key *key, const char *name, size_t namesz) { struct afs_vlocation *vl; int ret; _enter("{%s},{%x},%*.*s,%zu", cell->name, key_serial(key), (int) namesz, (int) namesz, name, namesz); if (namesz >= sizeof(vl->vldb.name)) { _leave(" = -ENAMETOOLONG"); return ERR_PTR(-ENAMETOOLONG); } /* see if we have an in-memory copy first */ down_write(&cell->vl_sem); spin_lock(&cell->vl_lock); list_for_each_entry(vl, &cell->vl_list, link) { if (vl->vldb.name[namesz] != '\0') continue; if (memcmp(vl->vldb.name, name, namesz) == 0) goto found_in_memory; } spin_unlock(&cell->vl_lock); /* not in the cell's in-memory lists - create a new record */ vl = afs_vlocation_alloc(cell, name, namesz); if (!vl) { up_write(&cell->vl_sem); return ERR_PTR(-ENOMEM); } afs_get_cell(cell); list_add_tail(&vl->link, &cell->vl_list); vl->state = AFS_VL_CREATING; up_write(&cell->vl_sem); fill_in_record: ret = afs_vlocation_fill_in_record(vl, key); if (ret < 0) goto error_abandon; spin_lock(&vl->lock); vl->state = AFS_VL_VALID; spin_unlock(&vl->lock); wake_up(&vl->waitq); /* update volume entry in local cache */ #ifdef CONFIG_AFS_FSCACHE fscache_update_cookie(vl->cache); #endif /* schedule for regular updates */ afs_vlocation_queue_for_updates(vl); goto success; found_in_memory: /* found in memory */ _debug("found in memory"); atomic_inc(&vl->usage); spin_unlock(&cell->vl_lock); if (!list_empty(&vl->grave)) { spin_lock(&afs_vlocation_graveyard_lock); list_del_init(&vl->grave); spin_unlock(&afs_vlocation_graveyard_lock); } up_write(&cell->vl_sem); /* see if it was an abandoned record that we might try filling in */ spin_lock(&vl->lock); while (vl->state != AFS_VL_VALID) { afs_vlocation_state_t state = vl->state; _debug("invalid [state %d]", state); if (state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME) { vl->state = AFS_VL_CREATING; spin_unlock(&vl->lock); goto fill_in_record; } /* must now wait for creation or update by someone else to * complete */ _debug("wait"); spin_unlock(&vl->lock); ret = wait_event_interruptible(vl->waitq, vl->state == AFS_VL_NEW || vl->state == AFS_VL_VALID || vl->state == AFS_VL_NO_VOLUME); if (ret < 0) goto error; spin_lock(&vl->lock); } spin_unlock(&vl->lock); success: _leave(" = %p", vl); return vl; error_abandon: spin_lock(&vl->lock); vl->state = AFS_VL_NEW; spin_unlock(&vl->lock); wake_up(&vl->waitq); error: ASSERT(vl != NULL); afs_put_vlocation(vl); _leave(" = %d", ret); return ERR_PTR(ret); } /* * finish using a volume location record */ void afs_put_vlocation(struct afs_vlocation *vl) { if (!vl) return; _enter("%s", vl->vldb.name); ASSERTCMP(atomic_read(&vl->usage), >, 0); if (likely(!atomic_dec_and_test(&vl->usage))) { _leave(""); return; } spin_lock(&afs_vlocation_graveyard_lock); if (atomic_read(&vl->usage) == 0) { _debug("buried"); list_move_tail(&vl->grave, &afs_vlocation_graveyard); vl->time_of_death = get_seconds(); queue_delayed_work(afs_wq, &afs_vlocation_reap, afs_vlocation_timeout * HZ); /* suspend updates on this record */ if (!list_empty(&vl->update)) { spin_lock(&afs_vlocation_updates_lock); list_del_init(&vl->update); spin_unlock(&afs_vlocation_updates_lock); } } spin_unlock(&afs_vlocation_graveyard_lock); _leave(" [killed?]"); } /* * destroy a dead volume location record */ static void afs_vlocation_destroy(struct afs_vlocation *vl) { _enter("%p", vl); #ifdef CONFIG_AFS_FSCACHE fscache_relinquish_cookie(vl->cache, 0); #endif afs_put_cell(vl->cell); kfree(vl); } /* * reap dead volume location records */ static void afs_vlocation_reaper(struct work_struct *work) { LIST_HEAD(corpses); struct afs_vlocation *vl; unsigned long delay, expiry; time_t now; _enter(""); now = get_seconds(); spin_lock(&afs_vlocation_graveyard_lock); while (!list_empty(&afs_vlocation_graveyard)) { vl = list_entry(afs_vlocation_graveyard.next, struct afs_vlocation, grave); _debug("check %p", vl); /* the queue is ordered most dead first */ expiry = vl->time_of_death + afs_vlocation_timeout; if (expiry > now) { delay = (expiry - now) * HZ; _debug("delay %lu", delay); if (!queue_delayed_work(afs_wq, &afs_vlocation_reap, delay)) { cancel_delayed_work(&afs_vlocation_reap); queue_delayed_work(afs_wq, &afs_vlocation_reap, delay); } break; } spin_lock(&vl->cell->vl_lock); if (atomic_read(&vl->usage) > 0) { _debug("no reap"); list_del_init(&vl->grave); } else { _debug("reap"); list_move_tail(&vl->grave, &corpses); list_del_init(&vl->link); } spin_unlock(&vl->cell->vl_lock); } spin_unlock(&afs_vlocation_graveyard_lock); /* now reap the corpses we've extracted */ while (!list_empty(&corpses)) { vl = list_entry(corpses.next, struct afs_vlocation, grave); list_del(&vl->grave); afs_vlocation_destroy(vl); } _leave(""); } /* * initialise the VL update process */ int __init afs_vlocation_update_init(void) { afs_vlocation_update_worker = create_singlethread_workqueue("kafs_vlupdated"); return afs_vlocation_update_worker ? 0 : -ENOMEM; } /* * discard all the volume location records for rmmod */ void afs_vlocation_purge(void) { afs_vlocation_timeout = 0; spin_lock(&afs_vlocation_updates_lock); list_del_init(&afs_vlocation_updates); spin_unlock(&afs_vlocation_updates_lock); cancel_delayed_work(&afs_vlocation_update); queue_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, 0); destroy_workqueue(afs_vlocation_update_worker); cancel_delayed_work(&afs_vlocation_reap); queue_delayed_work(afs_wq, &afs_vlocation_reap, 0); } /* * update a volume location */ static void afs_vlocation_updater(struct work_struct *work) { struct afs_cache_vlocation vldb; struct afs_vlocation *vl, *xvl; time_t now; long timeout; int ret; _enter(""); now = get_seconds(); /* find a record to update */ spin_lock(&afs_vlocation_updates_lock); for (;;) { if (list_empty(&afs_vlocation_updates)) { spin_unlock(&afs_vlocation_updates_lock); _leave(" [nothing]"); return; } vl = list_entry(afs_vlocation_updates.next, struct afs_vlocation, update); if (atomic_read(&vl->usage) > 0) break; list_del_init(&vl->update); } timeout = vl->update_at - now; if (timeout > 0) { queue_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, timeout * HZ); spin_unlock(&afs_vlocation_updates_lock); _leave(" [nothing]"); return; } list_del_init(&vl->update); atomic_inc(&vl->usage); spin_unlock(&afs_vlocation_updates_lock); /* we can now perform the update */ _debug("update %s", vl->vldb.name); vl->state = AFS_VL_UPDATING; vl->upd_rej_cnt = 0; vl->upd_busy_cnt = 0; ret = afs_vlocation_update_record(vl, NULL, &vldb); spin_lock(&vl->lock); switch (ret) { case 0: afs_vlocation_apply_update(vl, &vldb); vl->state = AFS_VL_VALID; break; case -ENOMEDIUM: vl->state = AFS_VL_VOLUME_DELETED; break; default: vl->state = AFS_VL_UNCERTAIN; break; } spin_unlock(&vl->lock); wake_up(&vl->waitq); /* and then reschedule */ _debug("reschedule"); vl->update_at = get_seconds() + afs_vlocation_update_timeout; spin_lock(&afs_vlocation_updates_lock); if (!list_empty(&afs_vlocation_updates)) { /* next update in 10 minutes, but wait at least 1 second more * than the newest record already queued so that we don't spam * the VL server suddenly with lots of requests */ xvl = list_entry(afs_vlocation_updates.prev, struct afs_vlocation, update); if (vl->update_at <= xvl->update_at) vl->update_at = xvl->update_at + 1; xvl = list_entry(afs_vlocation_updates.next, struct afs_vlocation, update); timeout = xvl->update_at - now; if (timeout < 0) timeout = 0; } else { timeout = afs_vlocation_update_timeout; } ASSERT(list_empty(&vl->update)); list_add_tail(&vl->update, &afs_vlocation_updates); _debug("timeout %ld", timeout); queue_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, timeout * HZ); spin_unlock(&afs_vlocation_updates_lock); afs_put_vlocation(vl); }
HighwindONE/android_kernel_lge_msm8226
fs/afs/vlocation.c
C
gpl-2.0
18,106
/* * Architecture-specific signal handling support. * * Copyright (C) 1999-2004 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * * Derived from i386 and Alpha versions. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <linux/tracehook.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/unistd.h> #include <linux/wait.h> #include <asm/intrinsics.h> #include <asm/uaccess.h> #include <asm/rse.h> #include <asm/sigcontext.h> #include "sigframe.h" #define DEBUG_SIG 0 #define STACK_ALIGN 16 /* minimal alignment for stack pointer */ #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #if _NSIG_WORDS > 1 # define PUT_SIGSET(k,u) __copy_to_user((u)->sig, (k)->sig, sizeof(sigset_t)) # define GET_SIGSET(k,u) __copy_from_user((k)->sig, (u)->sig, sizeof(sigset_t)) #else # define PUT_SIGSET(k,u) __put_user((k)->sig[0], &(u)->sig[0]) # define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0]) #endif asmlinkage long sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2, long arg3, long arg4, long arg5, long arg6, long arg7, struct pt_regs regs) { return do_sigaltstack(uss, uoss, regs.r12); } static long restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) { unsigned long ip, flags, nat, um, cfm, rsc; long err; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; /* restore scratch that always needs gets updated during signal delivery: */ err = __get_user(flags, &sc->sc_flags); err |= __get_user(nat, &sc->sc_nat); err |= __get_user(ip, &sc->sc_ip); /* instruction pointer */ err |= __get_user(cfm, &sc->sc_cfm); err |= __get_user(um, &sc->sc_um); /* user mask */ err |= __get_user(rsc, &sc->sc_ar_rsc); err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat); err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs); err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */ err |= __get_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */ err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */ err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 8); /* r1 */ err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */ err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8); /* r12-r13 */ err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15 */ scr->pt.cr_ifs = cfm | (1UL << 63); scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */ /* establish new instruction pointer: */ scr->pt.cr_iip = ip & ~0x3UL; ia64_psr(&scr->pt)->ri = ip & 0x3; scr->pt.cr_ipsr = (scr->pt.cr_ipsr & ~IA64_PSR_UM) | (um & IA64_PSR_UM); scr->scratch_unat = ia64_put_scratch_nat_bits(&scr->pt, nat); if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) { /* Restore most scratch-state only when not in syscall. */ err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ err |= __get_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */ err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */ err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */ err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */ } if ((flags & IA64_SC_FLAG_FPH_VALID) != 0) { struct ia64_psr *psr = ia64_psr(&scr->pt); err |= __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16); psr->mfh = 0; /* drop signal handler's fph contents... */ preempt_disable(); if (psr->dfh) ia64_drop_fpu(current); else { /* We already own the local fph, otherwise psr->dfh wouldn't be 0. */ __ia64_load_fpu(current->thread.fph); ia64_set_local_fpu_owner(current); } preempt_enable(); } return err; } int copy_siginfo_to_user (siginfo_t __user *to, siginfo_t *from) { if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t))) return -EFAULT; if (from->si_code < 0) { if (__copy_to_user(to, from, sizeof(siginfo_t))) return -EFAULT; return 0; } else { int err; /* * If you change siginfo_t structure, please be sure this code is fixed * accordingly. It should never copy any pad contained in the structure * to avoid security leaks, but must copy the generic 3 ints plus the * relevant union member. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); err |= __put_user((short)from->si_code, &to->si_code); switch (from->si_code >> 16) { case __SI_FAULT >> 16: err |= __put_user(from->si_flags, &to->si_flags); err |= __put_user(from->si_isr, &to->si_isr); case __SI_POLL >> 16: err |= __put_user(from->si_addr, &to->si_addr); err |= __put_user(from->si_imm, &to->si_imm); break; case __SI_TIMER >> 16: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(from->si_ptr, &to->si_ptr); break; case __SI_RT >> 16: /* Not generated by the kernel as of now. */ case __SI_MESGQ >> 16: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_ptr, &to->si_ptr); break; case __SI_CHLD >> 16: err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); err |= __put_user(from->si_status, &to->si_status); default: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_pid, &to->si_pid); break; } return err; } } long ia64_rt_sigreturn (struct sigscratch *scr) { extern char ia64_strace_leave_kernel, ia64_leave_kernel; struct sigcontext __user *sc; struct siginfo si; sigset_t set; long retval; sc = &((struct sigframe __user *) (scr->pt.r12 + 16))->sc; /* * When we return to the previously executing context, r8 and r10 have already * been setup the way we want them. Indeed, if the signal wasn't delivered while * in a system call, we must not touch r8 or r10 as otherwise user-level state * could be corrupted. */ retval = (long) &ia64_leave_kernel; if (test_thread_flag(TIF_SYSCALL_TRACE) || test_thread_flag(TIF_SYSCALL_AUDIT)) /* * strace expects to be notified after sigreturn returns even though the * context to which we return may not be in the middle of a syscall. * Thus, the return-value that strace displays for sigreturn is * meaningless. */ retval = (long) &ia64_strace_leave_kernel; if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) goto give_sigsegv; if (GET_SIGSET(&set, &sc->sc_mask)) goto give_sigsegv; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); { current->blocked = set; recalc_sigpending(); } spin_unlock_irq(&current->sighand->siglock); if (restore_sigcontext(sc, scr)) goto give_sigsegv; #if DEBUG_SIG printk("SIG return (%s:%d): sp=%lx ip=%lx\n", current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip); #endif /* * It is more difficult to avoid calling this function than to * call it and ignore errors. */ do_sigaltstack(&sc->sc_stack, NULL, scr->pt.r12); return retval; give_sigsegv: si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SI_KERNEL; si.si_pid = task_pid_vnr(current); si.si_uid = current_uid(); si.si_addr = sc; force_sig_info(SIGSEGV, &si, current); return retval; } /* * This does just the minimum required setup of sigcontext. * Specifically, it only installs data that is either not knowable at * the user-level or that gets modified before execution in the * trampoline starts. Everything else is done at the user-level. */ static long setup_sigcontext (struct sigcontext __user *sc, sigset_t *mask, struct sigscratch *scr) { unsigned long flags = 0, ifs, cfm, nat; long err = 0; ifs = scr->pt.cr_ifs; if (on_sig_stack((unsigned long) sc)) flags |= IA64_SC_FLAG_ONSTACK; if ((ifs & (1UL << 63)) == 0) /* if cr_ifs doesn't have the valid bit set, we got here through a syscall */ flags |= IA64_SC_FLAG_IN_SYSCALL; cfm = ifs & ((1UL << 38) - 1); ia64_flush_fph(current); if ((current->thread.flags & IA64_THREAD_FPH_VALID)) { flags |= IA64_SC_FLAG_FPH_VALID; err = __copy_to_user(&sc->sc_fr[32], current->thread.fph, 96*16); } nat = ia64_get_scratch_nat_bits(&scr->pt, scr->scratch_unat); err |= __put_user(flags, &sc->sc_flags); err |= __put_user(nat, &sc->sc_nat); err |= PUT_SIGSET(mask, &sc->sc_mask); err |= __put_user(cfm, &sc->sc_cfm); err |= __put_user(scr->pt.cr_ipsr & IA64_PSR_UM, &sc->sc_um); err |= __put_user(scr->pt.ar_rsc, &sc->sc_ar_rsc); err |= __put_user(scr->pt.ar_unat, &sc->sc_ar_unat); /* ar.unat */ err |= __put_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */ err |= __put_user(scr->pt.ar_pfs, &sc->sc_ar_pfs); err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */ err |= __put_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */ err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */ err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 8); /* r1 */ err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */ err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 2*8); /* r12-r13 */ err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */ err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip); if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) { /* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */ err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ err |= __put_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */ err |= __copy_to_user(&sc->sc_ar25, &scr->pt.ar_csd, 2*8); /* ar.csd & ar.ssd */ err |= __copy_to_user(&sc->sc_gr[2], &scr->pt.r2, 2*8); /* r2-r3 */ err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */ } return err; } /* * Check whether the register-backing store is already on the signal stack. */ static inline int rbs_on_sig_stack (unsigned long bsp) { return (bsp - current->sas_ss_sp < current->sas_ss_size); } static long force_sigsegv_info (int sig, void __user *addr) { unsigned long flags; struct siginfo si; if (sig == SIGSEGV) { /* * Acquiring siglock around the sa_handler-update is almost * certainly overkill, but this isn't a * performance-critical path and I'd rather play it safe * here than having to debug a nasty race if and when * something changes in kernel/signal.c that would make it * no longer safe to modify sa_handler without holding the * lock. */ spin_lock_irqsave(&current->sighand->siglock, flags); current->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; spin_unlock_irqrestore(&current->sighand->siglock, flags); } si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SI_KERNEL; si.si_pid = task_pid_vnr(current); si.si_uid = current_uid(); si.si_addr = addr; force_sig_info(SIGSEGV, &si, current); return 0; } static long setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct sigscratch *scr) { extern char __kernel_sigtramp[]; unsigned long tramp_addr, new_rbs = 0, new_sp; struct sigframe __user *frame; long err; new_sp = scr->pt.r12; tramp_addr = (unsigned long) __kernel_sigtramp; if (ka->sa.sa_flags & SA_ONSTACK) { int onstack = sas_ss_flags(new_sp); if (onstack == 0) { new_sp = current->sas_ss_sp + current->sas_ss_size; /* * We need to check for the register stack being on the * signal stack separately, because it's switched * separately (memory stack is switched in the kernel, * register stack is switched in the signal trampoline). */ if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) new_rbs = ALIGN(current->sas_ss_sp, sizeof(long)); } else if (onstack == SS_ONSTACK) { unsigned long check_sp; /* * If we are on the alternate signal stack and would * overflow it, don't. Return an always-bogus address * instead so we will die with SIGSEGV. */ check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN; if (!likely(on_sig_stack(check_sp))) return force_sigsegv_info(sig, (void __user *) check_sp); } } frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) return force_sigsegv_info(sig, frame); err = __put_user(sig, &frame->arg0); err |= __put_user(&frame->info, &frame->arg1); err |= __put_user(&frame->sc, &frame->arg2); err |= __put_user(new_rbs, &frame->sc.sc_rbs_base); err |= __put_user(0, &frame->sc.sc_loadrs); /* initialize to zero */ err |= __put_user(ka->sa.sa_handler, &frame->handler); err |= copy_siginfo_to_user(&frame->info, info); err |= __put_user(current->sas_ss_sp, &frame->sc.sc_stack.ss_sp); err |= __put_user(current->sas_ss_size, &frame->sc.sc_stack.ss_size); err |= __put_user(sas_ss_flags(scr->pt.r12), &frame->sc.sc_stack.ss_flags); err |= setup_sigcontext(&frame->sc, set, scr); if (unlikely(err)) return force_sigsegv_info(sig, frame); scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */ scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */ scr->pt.cr_iip = tramp_addr; ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */ ia64_psr(&scr->pt)->be = 0; /* force little-endian byte-order */ /* * Force the interruption function mask to zero. This has no effect when a * system-call got interrupted by a signal (since, in that case, scr->pt_cr_ifs is * ignored), but it has the desirable effect of making it possible to deliver a * signal with an incomplete register frame (which happens when a mandatory RSE * load faults). Furthermore, it has no negative effect on the getting the user's * dirty partition preserved, because that's governed by scr->pt.loadrs. */ scr->pt.cr_ifs = (1UL << 63); /* * Note: this affects only the NaT bits of the scratch regs (the ones saved in * pt_regs), which is exactly what we want. */ scr->scratch_unat = 0; /* ensure NaT bits of r12 is clear */ #if DEBUG_SIG printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%p\n", current->comm, current->pid, sig, scr->pt.r12, frame->sc.sc_ip, frame->handler); #endif return 1; } static long handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct sigscratch *scr) { if (!setup_frame(sig, ka, info, oldset, scr)) return 0; spin_lock_irq(&current->sighand->siglock); sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) sigaddset(&current->blocked, sig); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); /* * Let tracing know that we've done the handler setup. */ tracehook_signal_handler(sig, info, ka, &scr->pt, test_thread_flag(TIF_SINGLESTEP)); return 1; } /* * Note that `init' is a special process: it doesn't get signals it doesn't want to * handle. Thus you cannot kill init even with a SIGKILL even by mistake. */ void ia64_do_signal (struct sigscratch *scr, long in_syscall) { struct k_sigaction ka; sigset_t *oldset; siginfo_t info; long restart = in_syscall; long errno = scr->pt.r8; /* * In the ia64_leave_kernel code path, we want the common case to go fast, which * is why we may in certain cases get here from kernel mode. Just return without * doing anything if so. */ if (!user_mode(&scr->pt)) return; if (current_thread_info()->status & TS_RESTORE_SIGMASK) oldset = &current->saved_sigmask; else oldset = &current->blocked; /* * This only loops in the rare cases of handle_signal() failing, in which case we * need to push through a forced SIGSEGV. */ while (1) { int signr = get_signal_to_deliver(&info, &ka, &scr->pt, NULL); /* * get_signal_to_deliver() may have run a debugger (via notify_parent()) * and the debugger may have modified the state (e.g., to arrange for an * inferior call), thus it's important to check for restarting _after_ * get_signal_to_deliver(). */ if ((long) scr->pt.r10 != -1) /* * A system calls has to be restarted only if one of the error codes * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 * isn't -1 then r8 doesn't hold an error code and we don't need to * restart the syscall, so we can clear the "restart" flag here. */ restart = 0; if (signr <= 0) break; if (unlikely(restart)) { switch (errno) { case ERESTART_RESTARTBLOCK: case ERESTARTNOHAND: scr->pt.r8 = EINTR; /* note: scr->pt.r10 is already -1 */ break; case ERESTARTSYS: if ((ka.sa.sa_flags & SA_RESTART) == 0) { scr->pt.r8 = EINTR; /* note: scr->pt.r10 is already -1 */ break; } case ERESTARTNOINTR: ia64_decrement_ip(&scr->pt); restart = 0; /* don't restart twice if handle_signal() fails... */ } } /* * Whee! Actually deliver the signal. If the delivery failed, we need to * continue to iterate in this loop so we can deliver the SIGSEGV... */ if (handle_signal(signr, &ka, &info, oldset, scr)) { /* * A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply * clear the TS_RESTORE_SIGMASK flag. */ current_thread_info()->status &= ~TS_RESTORE_SIGMASK; return; } } /* Did we come from a system call? */ if (restart) { /* Restart the system call - no handlers present */ if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR || errno == ERESTART_RESTARTBLOCK) { /* * Note: the syscall number is in r15 which is saved in * pt_regs so all we need to do here is adjust ip so that * the "break" instruction gets re-executed. */ ia64_decrement_ip(&scr->pt); if (errno == ERESTART_RESTARTBLOCK) scr->pt.r15 = __NR_restart_syscall; } } /* if there's no signal to deliver, we just put the saved sigmask * back */ if (current_thread_info()->status & TS_RESTORE_SIGMASK) { current_thread_info()->status &= ~TS_RESTORE_SIGMASK; sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } }
Snuzzo/B14CKB1RD_kernel_m8
arch/ia64/kernel/signal.c
C
gpl-2.0
18,467
/* * Copyright (c) 2010 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicensen * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Thomas Eaton <thomas.g.eaton@intel.com> * Scott Rowe <scott.m.rowe@intel.com> */ #include "mdfld_output.h" #include "mdfld_dsi_dpi.h" #include "mdfld_dsi_output.h" #include "tc35876x-dsi-lvds.h" int mdfld_get_panel_type(struct drm_device *dev, int pipe) { struct drm_psb_private *dev_priv = dev->dev_private; return dev_priv->mdfld_panel_id; } static void mdfld_init_panel(struct drm_device *dev, int mipi_pipe, int p_type) { switch (p_type) { case TPO_VID: mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tpo_vid_funcs); break; case TC35876X: tc35876x_init(dev); mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tc35876x_funcs); break; case TMD_VID: mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tmd_vid_funcs); break; case HDMI: /* if (dev_priv->mdfld_hdmi_present) mdfld_hdmi_init(dev, &dev_priv->mode_dev); */ break; } } int mdfld_output_init(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; /* FIXME: hardcoded for now */ dev_priv->mdfld_panel_id = TC35876X; /* MIPI panel 1 */ mdfld_init_panel(dev, 0, dev_priv->mdfld_panel_id); /* HDMI panel */ mdfld_init_panel(dev, 1, HDMI); return 0; }
Hellybean/android_kernel_samsung_jf
drivers/gpu/drm/gma500/mdfld_output.c
C
gpl-2.0
2,343
/* * Copyright (c) Contributors, http://opensimulator.org/ * See CONTRIBUTORS.TXT for a full list of copyright holders. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the OpenSimulator Project nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ using System; using System.IO; using System.Collections; using System.Collections.Generic; using System.Text; using OpenSim.Framework; namespace OpenSim.Region.UserStatistics { public class Prototype_distributor : IStatsController { private string prototypejs=string.Empty; public string ReportName { get { return ""; } } public Hashtable ProcessModel(Hashtable pParams) { Hashtable pResult = new Hashtable(); if (prototypejs.Length == 0) { StreamReader fs = new StreamReader(new FileStream(Util.dataDir() + "/data/prototype.js", FileMode.Open)); prototypejs = fs.ReadToEnd(); fs.Close(); fs.Dispose(); } pResult["js"] = prototypejs; return pResult; } public string RenderView(Hashtable pModelResult) { return pModelResult["js"].ToString(); } } }
justasabc/wifi
OpenSim/Region/UserStatistics/Prototype_distributor.cs
C#
gpl-2.0
2,622
tinymce.PluginManager.add("media",function(e,t){function n(e){return-1!=e.indexOf(".mp3")?"audio/mpeg":-1!=e.indexOf(".wav")?"audio/wav":-1!=e.indexOf(".mp4")?"video/mp4":-1!=e.indexOf(".webm")?"video/webm":-1!=e.indexOf(".ogg")?"video/ogg":""}function i(){function t(e){var t,o,a,r;t=n.find("#width")[0],o=n.find("#height")[0],a=t.value(),r=o.value(),n.find("#constrain")[0].checked()&&i&&c&&a&&r&&(e.control==t?(r=Math.round(a/i*r),o.value(r)):(a=Math.round(r/c*a),t.value(a))),i=a,c=r}var n,i,c,s;s=l(e.selection.getNode()),i=s.width,c=s.height,n=e.windowManager.open({title:"Insert media",data:s,bodyType:"tabpanel",body:[{title:"General",type:"form",onShowTab:function(){this.fromJSON(r(this.next().find("#embed").value()))},items:[{name:"source1",type:"filepicker",filetype:"image",size:40,autofocus:!0,label:"Source"},{name:"source2",type:"filepicker",filetype:"image",size:40,label:"Alternative source"},{name:"poster",type:"filepicker",filetype:"image",size:40,label:"Poster"},{type:"container",label:"Dimensions",layout:"flex",direction:"row",align:"center",spacing:5,items:[{name:"width",type:"textbox",maxLength:3,size:3,onchange:t},{type:"label",text:"x"},{name:"height",type:"textbox",maxLength:3,size:3,onchange:t},{name:"constrain",type:"checkbox",checked:!0,text:"Constrain proportions"}]}]},{title:"Embed",type:"panel",layout:"flex",direction:"column",align:"stretch",padding:10,spacing:10,onShowTab:function(){this.find("#embed").value(a(this.parent().toJSON()))},items:[{type:"label",text:"Paste your embed code below:"},{type:"textbox",flex:1,name:"embed",value:o(),multiline:!0,label:"Source"}]}],onSubmit:function(){e.insertContent(a(this.toJSON()))}})}function o(){var t=e.selection.getNode();return t.getAttribute("data-mce-object")?e.selection.getContent():void 0}function a(i){var o="";return i.source1||(tinymce.extend(i,r(i.embed)),i.source1)?(i.source1=e.convertURL(i.source1,"source"),i.source2=e.convertURL(i.source2,"source"),i.source1mime=n(i.source1),i.source2mime=n(i.source2),i.poster=e.convertURL(i.poster,"poster"),i.flashPlayerUrl=e.convertURL(t+"/moxieplayer.swf","movie"),i.embed?o=c(i.embed,i,!0):(tinymce.each(s,function(e){var t,n,o;if(t=e.regex.exec(i.source1)){for(o=e.url,n=0;t[n];n++)o=o.replace("$"+n,function(){return t[n]});i.source1=o,i.type=e.type,i.width=e.w,i.height=e.h}}),i.width=i.width||300,i.height=i.height||150,tinymce.each(i,function(t,n){i[n]=e.dom.encode(t)}),"iframe"==i.type?o+='<iframe src="'+i.source1+'" width="'+i.width+'" height="'+i.height+'"></iframe>':-1!=i.source1mime.indexOf("audio")?e.settings.audio_template_callback?o=e.settings.audio_template_callback(i):o+='<audio controls="controls" src="'+i.source1+'">'+(i.source2?'\n<source src="'+i.source2+'"'+(i.source2mime?' type="'+i.source2mime+'"':"")+" />\n":"")+"</audio>":o=e.settings.video_template_callback?e.settings.video_template_callback(i):'<video width="'+i.width+'" height="'+i.height+'"'+(i.poster?' poster="'+i.poster+'"':"")+' controls="controls">\n'+'<source src="'+i.source1+'"'+(i.source1mime?' type="'+i.source1mime+'"':"")+" />\n"+(i.source2?'<source src="'+i.source2+'"'+(i.source2mime?' type="'+i.source2mime+'"':"")+" />\n":"")+"</video>"),o):""}function r(e){var t={};return new tinymce.html.SaxParser({validate:!1,special:"script,noscript",start:function(e,n){t.source1||"param"!=e||(t.source1=n.map.movie),("iframe"==e||"object"==e||"embed"==e||"video"==e||"audio"==e)&&(t=tinymce.extend(n.map,t)),"source"==e&&(t.source1?t.source2||(t.source2=n.map.src):t.source1=n.map.src)}}).parse(e),t.source1=t.source1||t.src||t.data,t.source2=t.source2||"",t.poster=t.poster||"",t}function l(t){return t.getAttribute("data-mce-object")?r(e.serializer.serialize(t,{selection:!0})):{}}function c(e,t,n){function i(e,t){var n,i,o,a;for(n in t)if(o=""+t[n],e.map[n])for(i=e.length;i--;)a=e[i],a.name==n&&(o?(e.map[n]=o,a.value=o):(delete e.map[n],e.splice(i,1)));else o&&(e.push({name:n,value:o}),e.map[n]=o)}var o=new tinymce.html.Writer,a=0;return new tinymce.html.SaxParser({validate:!1,special:"script,noscript",comment:function(e){o.comment(e)},cdata:function(e){o.cdata(e)},text:function(e,t){o.text(e,t)},start:function(e,r,l){switch(e){case"video":case"object":case"img":case"iframe":i(r,{width:t.width,height:t.height})}if(n)switch(e){case"video":i(r,{poster:t.poster,src:""}),t.source2&&i(r,{src:""});break;case"iframe":i(r,{src:t.source1});break;case"source":if(a++,2>=a&&(i(r,{src:t["source"+a],type:t["source"+a+"mime"]}),!t["source"+a]))return}o.start(e,r,l)},end:function(e){if("video"==e&&n)for(var r=1;2>=r;r++)if(t["source"+r]){var l=[];l.map={},r>a&&(i(l,{src:t["source"+r],type:t["source"+r+"mime"]}),o.start("source",l,!0))}o.end(e)}},new tinymce.html.Schema({})).parse(e),o.getContent()}var s=[{regex:/youtu\.be\/([a-z1-9.-_]+)/,type:"iframe",w:425,h:350,url:"http://www.youtube.com/embed/$1"},{regex:/youtube\.com(.+)v=([^&]+)/,type:"iframe",w:425,h:350,url:"http://www.youtube.com/embed/$2"},{regex:/vimeo\.com\/([0-9]+)/,type:"iframe",w:425,h:350,url:"http://player.vimeo.com/video/$1?title=0&byline=0&portrait=0&color=8dc7dc"},{regex:/maps\.google\.([a-z]{2,3})\/maps\/(.+)msid=(.+)/,type:"iframe",w:425,h:350,url:'http://maps.google.com/maps/ms?msid=$2&output=embed"'}];e.on("ResolveName",function(e){var t;(t=e.target.getAttribute("data-mce-object"))&&(e.name=t)}),e.on("preInit",function(){var t=e.schema.getSpecialElements();tinymce.each("video audio iframe object".split(" "),function(e){t[e]=RegExp("</"+e+"[^>]*>","gi")}),e.schema.addValidElements("object[id|style|width|height|classid|codebase|*],embed[id|style|width|height|type|src|*],video[*],audio[*]");var n=e.schema.getBoolAttrs();tinymce.each("webkitallowfullscreen mozallowfullscreen allowfullscreen".split(" "),function(e){n[e]={}}),e.parser.addNodeFilter("iframe,video,audio,object,embed",function(t,n){for(var i,o,a,r,l,c,s,d=t.length;d--;){for(o=t[d],a=new tinymce.html.Node("img",1),a.shortEnded=!0,c=o.attributes,i=c.length;i--;)r=c[i].name,l=c[i].value,"width"!==r&&"height"!==r&&"style"!==r&&(("data"==r||"src"==r)&&(l=e.convertURL(l,r)),a.attr("data-mce-p-"+r,l));s=o.firstChild&&o.firstChild.value,s&&(a.attr("data-mce-html",escape(s)),a.firstChild=null),a.attr({width:o.attr("width")||"300",height:o.attr("height")||("audio"==n?"30":"150"),style:o.attr("style"),src:tinymce.Env.transparentSrc,"data-mce-object":n,"class":"mce-object mce-object-"+n}),o.replace(a)}}),e.serializer.addAttributeFilter("data-mce-object",function(e,t){for(var n,i,o,a,r,l,c=e.length;c--;){for(n=e[c],i=new tinymce.html.Node(n.attr(t),1),"audio"!=n.attr(t)&&i.attr({width:n.attr("width"),height:n.attr("height")}),i.attr({style:n.attr("style")}),a=n.attributes,o=a.length;o--;){var s=a[o].name;0===s.indexOf("data-mce-p-")&&i.attr(s.substr(11),a[o].value)}r=n.attr("data-mce-html"),r&&(l=new tinymce.html.Node("#text",3),l.raw=!0,l.value=unescape(r),i.append(l)),n.replace(i)}})}),e.on("ObjectSelected",function(e){"audio"==e.target.getAttribute("data-mce-object")&&e.preventDefault()}),e.on("objectResized",function(e){var t,n=e.target;n.getAttribute("data-mce-object")&&(t=n.getAttribute("data-mce-html"),t&&(t=unescape(t),n.setAttribute("data-mce-html",escape(c(t,{width:e.width,height:e.height})))))}),e.addButton("media",{tooltip:"Insert/edit video",onclick:i,stateSelector:"img[data-mce-object=video]"}),e.addMenuItem("media",{icon:"media",text:"Insert video",onclick:i,context:"insert",prependToContext:!0})});
nachopavon/redprofesional
mod/tinymce/vendor/tinymce/js/tinymce/plugins/media/plugin.min.js
JavaScript
gpl-2.0
7,438
/* Implementation of the SUM intrinsic Copyright 2002 Free Software Foundation, Inc. Contributed by Paul Brook <paul@nowt.org> This file is part of the GNU Fortran 95 runtime library (libgfortran). Libgfortran is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) Libgfortran is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with libgfortran; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "config.h" #include <stdlib.h> #include <assert.h> #include "libgfortran.h" #if defined (HAVE_GFC_REAL_4) && defined (HAVE_GFC_REAL_4) extern void sum_r4 (gfc_array_r4 * const restrict, gfc_array_r4 * const restrict, const index_type * const restrict); export_proto(sum_r4); void sum_r4 (gfc_array_r4 * const restrict retarray, gfc_array_r4 * const restrict array, const index_type * const restrict pdim) { index_type count[GFC_MAX_DIMENSIONS]; index_type extent[GFC_MAX_DIMENSIONS]; index_type sstride[GFC_MAX_DIMENSIONS]; index_type dstride[GFC_MAX_DIMENSIONS]; const GFC_REAL_4 * restrict base; GFC_REAL_4 * restrict dest; index_type rank; index_type n; index_type len; index_type delta; index_type dim; /* Make dim zero based to avoid confusion. */ dim = (*pdim) - 1; rank = GFC_DESCRIPTOR_RANK (array) - 1; len = array->dim[dim].ubound + 1 - array->dim[dim].lbound; delta = array->dim[dim].stride; for (n = 0; n < dim; n++) { sstride[n] = array->dim[n].stride; extent[n] = array->dim[n].ubound + 1 - array->dim[n].lbound; if (extent[n] < 0) extent[n] = 0; } for (n = dim; n < rank; n++) { sstride[n] = array->dim[n + 1].stride; extent[n] = array->dim[n + 1].ubound + 1 - array->dim[n + 1].lbound; if (extent[n] < 0) extent[n] = 0; } if (retarray->data == NULL) { size_t alloc_size; for (n = 0; n < rank; n++) { retarray->dim[n].lbound = 0; retarray->dim[n].ubound = extent[n]-1; if (n == 0) retarray->dim[n].stride = 1; else retarray->dim[n].stride = retarray->dim[n-1].stride * extent[n-1]; } retarray->offset = 0; retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank; alloc_size = sizeof (GFC_REAL_4) * retarray->dim[rank-1].stride * extent[rank-1]; if (alloc_size == 0) { /* Make sure we have a zero-sized array. */ retarray->dim[0].lbound = 0; retarray->dim[0].ubound = -1; return; } else retarray->data = internal_malloc_size (alloc_size); } else { if (rank != GFC_DESCRIPTOR_RANK (retarray)) runtime_error ("rank of return array incorrect"); } for (n = 0; n < rank; n++) { count[n] = 0; dstride[n] = retarray->dim[n].stride; if (extent[n] <= 0) len = 0; } base = array->data; dest = retarray->data; while (base) { const GFC_REAL_4 * restrict src; GFC_REAL_4 result; src = base; { result = 0; if (len <= 0) *dest = 0; else { for (n = 0; n < len; n++, src += delta) { result += *src; } *dest = result; } } /* Advance to the next element. */ count[0]++; base += sstride[0]; dest += dstride[0]; n = 0; while (count[n] == extent[n]) { /* When we get to the end of a dimension, reset it and increment the next dimension. */ count[n] = 0; /* We could precalculate these products, but this is a less frequently used path so probably not worth it. */ base -= sstride[n] * extent[n]; dest -= dstride[n] * extent[n]; n++; if (n == rank) { /* Break out of the look. */ base = NULL; break; } else { count[n]++; base += sstride[n]; dest += dstride[n]; } } } } extern void msum_r4 (gfc_array_r4 * const restrict, gfc_array_r4 * const restrict, const index_type * const restrict, gfc_array_l4 * const restrict); export_proto(msum_r4); void msum_r4 (gfc_array_r4 * const restrict retarray, gfc_array_r4 * const restrict array, const index_type * const restrict pdim, gfc_array_l4 * const restrict mask) { index_type count[GFC_MAX_DIMENSIONS]; index_type extent[GFC_MAX_DIMENSIONS]; index_type sstride[GFC_MAX_DIMENSIONS]; index_type dstride[GFC_MAX_DIMENSIONS]; index_type mstride[GFC_MAX_DIMENSIONS]; GFC_REAL_4 * restrict dest; const GFC_REAL_4 * restrict base; const GFC_LOGICAL_4 * restrict mbase; int rank; int dim; index_type n; index_type len; index_type delta; index_type mdelta; dim = (*pdim) - 1; rank = GFC_DESCRIPTOR_RANK (array) - 1; len = array->dim[dim].ubound + 1 - array->dim[dim].lbound; if (len <= 0) return; delta = array->dim[dim].stride; mdelta = mask->dim[dim].stride; for (n = 0; n < dim; n++) { sstride[n] = array->dim[n].stride; mstride[n] = mask->dim[n].stride; extent[n] = array->dim[n].ubound + 1 - array->dim[n].lbound; if (extent[n] < 0) extent[n] = 0; } for (n = dim; n < rank; n++) { sstride[n] = array->dim[n + 1].stride; mstride[n] = mask->dim[n + 1].stride; extent[n] = array->dim[n + 1].ubound + 1 - array->dim[n + 1].lbound; if (extent[n] < 0) extent[n] = 0; } if (retarray->data == NULL) { size_t alloc_size; for (n = 0; n < rank; n++) { retarray->dim[n].lbound = 0; retarray->dim[n].ubound = extent[n]-1; if (n == 0) retarray->dim[n].stride = 1; else retarray->dim[n].stride = retarray->dim[n-1].stride * extent[n-1]; } alloc_size = sizeof (GFC_REAL_4) * retarray->dim[rank-1].stride * extent[rank-1]; retarray->offset = 0; retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank; if (alloc_size == 0) { /* Make sure we have a zero-sized array. */ retarray->dim[0].lbound = 0; retarray->dim[0].ubound = -1; return; } else retarray->data = internal_malloc_size (alloc_size); } else { if (rank != GFC_DESCRIPTOR_RANK (retarray)) runtime_error ("rank of return array incorrect"); } for (n = 0; n < rank; n++) { count[n] = 0; dstride[n] = retarray->dim[n].stride; if (extent[n] <= 0) return; } dest = retarray->data; base = array->data; mbase = mask->data; if (GFC_DESCRIPTOR_SIZE (mask) != 4) { /* This allows the same loop to be used for all logical types. */ assert (GFC_DESCRIPTOR_SIZE (mask) == 8); for (n = 0; n < rank; n++) mstride[n] <<= 1; mdelta <<= 1; mbase = (GFOR_POINTER_L8_TO_L4 (mbase)); } while (base) { const GFC_REAL_4 * restrict src; const GFC_LOGICAL_4 * restrict msrc; GFC_REAL_4 result; src = base; msrc = mbase; { result = 0; if (len <= 0) *dest = 0; else { for (n = 0; n < len; n++, src += delta, msrc += mdelta) { if (*msrc) result += *src; } *dest = result; } } /* Advance to the next element. */ count[0]++; base += sstride[0]; mbase += mstride[0]; dest += dstride[0]; n = 0; while (count[n] == extent[n]) { /* When we get to the end of a dimension, reset it and increment the next dimension. */ count[n] = 0; /* We could precalculate these products, but this is a less frequently used path so probably not worth it. */ base -= sstride[n] * extent[n]; mbase -= mstride[n] * extent[n]; dest -= dstride[n] * extent[n]; n++; if (n == rank) { /* Break out of the look. */ base = NULL; break; } else { count[n]++; base += sstride[n]; mbase += mstride[n]; dest += dstride[n]; } } } } extern void ssum_r4 (gfc_array_r4 * const restrict, gfc_array_r4 * const restrict, const index_type * const restrict, GFC_LOGICAL_4 *); export_proto(ssum_r4); void ssum_r4 (gfc_array_r4 * const restrict retarray, gfc_array_r4 * const restrict array, const index_type * const restrict pdim, GFC_LOGICAL_4 * mask) { index_type rank; index_type n; index_type dstride; GFC_REAL_4 *dest; if (*mask) { sum_r4 (retarray, array, pdim); return; } rank = GFC_DESCRIPTOR_RANK (array); if (rank <= 0) runtime_error ("Rank of array needs to be > 0"); if (retarray->data == NULL) { retarray->dim[0].lbound = 0; retarray->dim[0].ubound = rank-1; retarray->dim[0].stride = 1; retarray->dtype = (retarray->dtype & ~GFC_DTYPE_RANK_MASK) | 1; retarray->offset = 0; retarray->data = internal_malloc_size (sizeof (GFC_REAL_4) * rank); } else { if (GFC_DESCRIPTOR_RANK (retarray) != 1) runtime_error ("rank of return array does not equal 1"); if (retarray->dim[0].ubound + 1 - retarray->dim[0].lbound != rank) runtime_error ("dimension of return array incorrect"); } dstride = retarray->dim[0].stride; dest = retarray->data; for (n = 0; n < rank; n++) dest[n * dstride] = 0 ; } #endif
bathtub/llvm-gcc
libgfortran/generated/sum_r4.c
C
gpl-2.0
10,546
/* Copyright 2004,2007,2010 ENSEIRB, INRIA & CNRS ** ** This file is part of the Scotch software package for static mapping, ** graph partitioning and sparse matrix ordering. ** ** This software is governed by the CeCILL-C license under French law ** and abiding by the rules of distribution of free software. You can ** use, modify and/or redistribute the software under the terms of the ** CeCILL-C license as circulated by CEA, CNRS and INRIA at the following ** URL: "http://www.cecill.info". ** ** As a counterpart to the access to the source code and rights to copy, ** modify and redistribute granted by the license, users are provided ** only with a limited warranty and the software's author, the holder of ** the economic rights, and the successive licensors have only limited ** liability. ** ** In this respect, the user's attention is drawn to the risks associated ** with loading, using, modifying and/or developing or reproducing the ** software by the user in light of its specific status of free software, ** that may mean that it is complicated to manipulate, and that also ** therefore means that it is reserved for developers and experienced ** professionals having in-depth computer knowledge. Users are therefore ** encouraged to load and test the software's suitability as regards ** their requirements in conditions enabling the security of their ** systems and/or data to be ensured and, more generally, to use and ** operate it in the same conditions as regards security. ** ** The fact that you are presently reading this means that you have had ** knowledge of the CeCILL-C license and that you accept its terms. */ /************************************************************/ /** **/ /** NAME : hgraph_induce_edge.c **/ /** **/ /** AUTHOR : Francois PELLEGRINI **/ /** **/ /** FUNCTION : This commodity file contains the edge **/ /** arrays building subroutine which is **/ /** duplicated, with minor modifications, **/ /** into hgraph_induce.c **/ /** **/ /** DATES : # Version 4.0 : from : 10 jan 2002 **/ /** to 17 jan 2003 **/ /** # Version 5.0 : from : 19 dec 2006 **/ /** to 19 dec 2006 **/ /** # Version 5.1 : from : 24 oct 2010 **/ /** to 24 oct 2010 **/ /** **/ /************************************************************/ static void HGRAPHINDUCE2NAME ( const Hgraph * restrict const orggrafptr, /* Pointer to original halo graph */ Gnum * restrict const orgindxtax, /* Array of numbers of selected vertices */ Hgraph * restrict const indgrafptr) /* Pointer to induced halo graph */ { Gnum indvertnum; /* Number of current induced vertex */ Gnum indvertnnd; /* Number of after-last induced (halo) vertex */ Gnum indvelosum; /* Overall induced vertex load */ Gnum indedgenum; /* Number of current edge in induced halo subgraph */ Gnum indenohnbr; /* Number of non-halo edges in halo subgraph */ Gnum inddegrmax; /* Maximum degree */ #ifdef SCOTCH_DEBUG_HGRAPH2 Gnum indedgenbs; /* Revised number of edges in halo subgraph */ #endif /* SCOTCH_DEBUG_HGRAPH2 */ #ifdef HGRAPHINDUCE2L /* If edge loads present */ Gnum indedlosum; Gnum indenohsum; indedlosum = indenohsum = 0; #endif /* HGRAPHINDUCE2L */ inddegrmax = 0; for (indvertnum = indedgenum = indgrafptr->s.baseval, indvelosum = indenohnbr = 0, indvertnnd = indgrafptr->vnohnnd; /* For all non-halo vertices */ indvertnum < indgrafptr->vnohnnd; indvertnum ++) { Gnum orgvertnum; /* Number of current vertex in original halo graph */ Gnum orgedgenum; /* Number of current edge in original halo graph */ Gnum indedgennd; /* Index of after-last edge position in edge array */ Gnum indedhdnum; /* Index of after-last edge linking to non-halo vertices */ Gnum inddegrval; orgvertnum = indgrafptr->s.vnumtax[indvertnum]; indgrafptr->s.verttax[indvertnum] = indedgenum; indenohnbr -= indedgenum; /* Subtract base of non-halo edges */ if (indgrafptr->s.velotax != NULL) { /* If graph has vertex weights */ indvelosum += /* Accumulate vertex loads */ indgrafptr->s.velotax[indvertnum] = orggrafptr->s.velotax[orgvertnum]; } inddegrval = orggrafptr->s.vendtax[orgvertnum] - orggrafptr->s.verttax[orgvertnum]; /* Get degree of non-halo node */ if (inddegrmax < inddegrval) /* Keep maximum degree */ inddegrmax = inddegrval; for (orgedgenum = orggrafptr->s.verttax[orgvertnum], indedhdnum = indedgennd = indedgenum + inddegrval; orgedgenum < orggrafptr->s.vendtax[orgvertnum]; orgedgenum ++) { Gnum orgvertend; /* Number of current end vertex in original halo graph */ Gnum indvertend; /* Number of current end vertex in induced halo subgraph */ orgvertend = orggrafptr->s.edgetax[orgedgenum]; indvertend = orgindxtax[orgvertend]; if (indvertend == ~0) { /* If neighbor is yet undeclared halo vertex */ indgrafptr->s.vnumtax[indvertnnd] = orgvertend; /* Add number of halo vertex to array */ indvertend = orgindxtax[orgvertend] = indvertnnd ++; /* Get induced number of vertex */ } if (indvertend >= indgrafptr->vnohnnd) { /* If neighbor is halo vertex */ indedhdnum --; /* Add neighbor at end of edge sub-array */ indgrafptr->s.edgetax[indedhdnum] = indvertend; HGRAPHINDUCE2EDLOINIT (indedhdnum); } else { /* If heighbor is non-halo vertex */ indgrafptr->s.edgetax[indedgenum] = indvertend; /* Add neighbor at beginning of edge sub-array */ HGRAPHINDUCE2EDLOINIT (indedgenum); HGRAPHINDUCE2ENOHINIT; indedgenum ++; } } #ifdef SCOTCH_DEBUG_HGRAPH2 if (indedgenum != indedhdnum) { errorPrint (STRINGIFY (HGRAPHINDUCE2NAME) ": internal error (1)"); return; } #endif /* SCOTCH_DEBUG_HGRAPH2 */ indenohnbr += indedhdnum; /* Add position to number of non-halo edges */ indgrafptr->vnhdtax[indvertnum] = indedhdnum; /* Set end of non-halo sub-array */ indedgenum = indedgennd; /* Point to next free space in edge array */ } indgrafptr->vnlosum = (indgrafptr->s.velotax != NULL) ? indvelosum : indgrafptr->vnohnbr; indgrafptr->enohnbr = indenohnbr; #ifdef SCOTCH_DEBUG_HGRAPH2 indedgenbs = 2 * (indedgenum - indgrafptr->s.baseval) - indenohnbr; /* Compute total number of edges */ #endif /* SCOTCH_DEBUG_HGRAPH2 */ #ifdef HGRAPHINDUCE2L /* If edge loads present */ { Gnum * indedgetab; /* Dummy area to recieve un-based edgetab */ Gnum * indedlotab; /* Save of old position of edgetab array */ #ifndef SCOTCH_DEBUG_HGRAPH2 Gnum indedgenbs; /* Revised number of edges in halo subgraph */ indedgenbs = 2 * (indedgenum - indgrafptr->s.baseval) - indenohnbr; /* Compute total number of edges */ #endif /* SCOTCH_DEBUG_HGRAPH2 */ indedlotab = indgrafptr->s.edlotax + indgrafptr->s.baseval; /* Save old offset of move area */ memOffset (indgrafptr->s.edgetax + indgrafptr->s.baseval, /* Compute new offsets */ &indedgetab, (size_t) indedgenbs, &indgrafptr->s.edlotax, (size_t) indedgenbs, NULL); memMov (indgrafptr->s.edlotax, indedlotab, (indedgenum - indgrafptr->s.baseval) * sizeof (Gnum)); /* Move already existing edge load array */ indgrafptr->s.edlotax -= indgrafptr->s.baseval; } #endif /* HGRAPHINDUCE2L */ for ( ; indvertnum < indvertnnd; indvertnum ++) { /* For all halo vertices found during first pass */ Gnum orgvertnum; /* Number of current vertex in original halo graph */ Gnum orgedgenum; /* Number of current edge in original halo graph */ orgvertnum = indgrafptr->s.vnumtax[indvertnum]; indgrafptr->s.verttax[indvertnum] = indedgenum; if (indgrafptr->s.velotax != NULL) { /* If graph has vertex weights */ indvelosum += /* Accumulate vertex loads */ indgrafptr->s.velotax[indvertnum] = orggrafptr->s.velotax[orgvertnum]; } for (orgedgenum = orggrafptr->s.verttax[orgvertnum]; orgedgenum < orggrafptr->s.vendtax[orgvertnum]; orgedgenum ++) { Gnum orgvertend; /* Number of current end vertex in original halo graph */ Gnum indvertend; /* Number of current end vertex in induced halo subgraph */ orgvertend = orggrafptr->s.edgetax[orgedgenum]; indvertend = orgindxtax[orgvertend]; if ((indvertend != ~0) && /* If end vertex in induced halo subgraph */ (indvertend < indgrafptr->vnohnnd)) { /* And in its non-halo part only */ indgrafptr->s.edgetax[indedgenum] = indvertend; HGRAPHINDUCE2EDLOINIT (indedgenum); indedgenum ++; } } if (inddegrmax < (indedgenum - indgrafptr->s.verttax[indvertnum])) inddegrmax = (indedgenum - indgrafptr->s.verttax[indvertnum]); } #ifdef SCOTCH_DEBUG_HGRAPH2 if ((indedgenum - indgrafptr->s.baseval) != indedgenbs) { errorPrint (STRINGIFY (HGRAPHINDUCE2NAME) ": internal error (2)"); return; } #endif /* SCOTCH_DEBUG_HGRAPH2 */ indgrafptr->s.verttax[indvertnnd] = indedgenum; /* Set end of compact vertex array */ indgrafptr->s.vertnbr = indvertnnd - indgrafptr->s.baseval; indgrafptr->s.vertnnd = indvertnnd; indgrafptr->s.velosum = (indgrafptr->s.velotax != NULL) ? indvelosum : indgrafptr->s.vertnbr; indgrafptr->s.edgenbr = indedgenum - indgrafptr->s.baseval; /* Set actual number of edges */ indgrafptr->s.edlosum = HGRAPHINDUCE2EDLOSUM; indgrafptr->s.degrmax = inddegrmax; indgrafptr->enohsum = HGRAPHINDUCE2ENOHSUM; }
Chancylin/specfem2d
src/meshfem2D/scotch_5.1.12b/src/libscotch/hgraph_induce_edge.c
C
gpl-2.0
11,158
/**************************************************************************** * * Filename: cpia2_v4l.c * * Copyright 2001, STMicrolectronics, Inc. * Contact: steve.miller@st.com * Copyright 2001,2005, Scott J. Bertin <scottbertin@yahoo.com> * * Description: * This is a USB driver for CPia2 based video cameras. * The infrastructure of this driver is based on the cpia usb driver by * Jochen Scharrlach and Johannes Erdfeldt. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Stripped of 2.4 stuff ready for main kernel submit by * Alan Cox <alan@redhat.com> ****************************************************************************/ #include <linux/version.h> #include <linux/module.h> #include <linux/time.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/moduleparam.h> #include "cpia2.h" #include "cpia2dev.h" //#define _CPIA2_DEBUG_ #define MAKE_STRING_1(x) #x #define MAKE_STRING(x) MAKE_STRING_1(x) static int video_nr = -1; module_param(video_nr, int, 0); MODULE_PARM_DESC(video_nr,"video device to register (0=/dev/video0, etc)"); static int buffer_size = 68*1024; module_param(buffer_size, int, 0); MODULE_PARM_DESC(buffer_size, "Size for each frame buffer in bytes (default 68k)"); static int num_buffers = 3; module_param(num_buffers, int, 0); MODULE_PARM_DESC(num_buffers, "Number of frame buffers (1-" MAKE_STRING(VIDEO_MAX_FRAME) ", default 3)"); static int alternate = DEFAULT_ALT; module_param(alternate, int, 0); MODULE_PARM_DESC(alternate, "USB Alternate (" MAKE_STRING(USBIF_ISO_1) "-" MAKE_STRING(USBIF_ISO_6) ", default " MAKE_STRING(DEFAULT_ALT) ")"); static int flicker_freq = 60; module_param(flicker_freq, int, 0); MODULE_PARM_DESC(flicker_freq, "Flicker frequency (" MAKE_STRING(50) "or" MAKE_STRING(60) ", default " MAKE_STRING(60) ")"); static int flicker_mode = NEVER_FLICKER; module_param(flicker_mode, int, 0); MODULE_PARM_DESC(flicker_mode, "Flicker supression (" MAKE_STRING(NEVER_FLICKER) "or" MAKE_STRING(ANTI_FLICKER_ON) ", default " MAKE_STRING(NEVER_FLICKER) ")"); MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>"); MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras"); MODULE_SUPPORTED_DEVICE("video"); MODULE_LICENSE("GPL"); #define ABOUT "V4L-Driver for Vision CPiA2 based cameras" #ifndef VID_HARDWARE_CPIA2 #error "VID_HARDWARE_CPIA2 should have been defined in linux/videodev.h" #endif struct control_menu_info { int value; char name[32]; }; static struct control_menu_info framerate_controls[] = { { CPIA2_VP_FRAMERATE_6_25, "6.25 fps" }, { CPIA2_VP_FRAMERATE_7_5, "7.5 fps" }, { CPIA2_VP_FRAMERATE_12_5, "12.5 fps" }, { CPIA2_VP_FRAMERATE_15, "15 fps" }, { CPIA2_VP_FRAMERATE_25, "25 fps" }, { CPIA2_VP_FRAMERATE_30, "30 fps" }, }; #define NUM_FRAMERATE_CONTROLS (sizeof(framerate_controls)/sizeof(framerate_controls[0])) static struct control_menu_info flicker_controls[] = { { NEVER_FLICKER, "Off" }, { FLICKER_50, "50 Hz" }, { FLICKER_60, "60 Hz" }, }; #define NUM_FLICKER_CONTROLS (sizeof(flicker_controls)/sizeof(flicker_controls[0])) static struct control_menu_info lights_controls[] = { { 0, "Off" }, { 64, "Top" }, { 128, "Bottom" }, { 192, "Both" }, }; #define NUM_LIGHTS_CONTROLS (sizeof(lights_controls)/sizeof(lights_controls[0])) #define GPIO_LIGHTS_MASK 192 static struct v4l2_queryctrl controls[] = { { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness", .minimum = 0, .maximum = 255, .step = 1, .default_value = DEFAULT_BRIGHTNESS, }, { .id = V4L2_CID_CONTRAST, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Contrast", .minimum = 0, .maximum = 255, .step = 1, .default_value = DEFAULT_CONTRAST, }, { .id = V4L2_CID_SATURATION, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Saturation", .minimum = 0, .maximum = 255, .step = 1, .default_value = DEFAULT_SATURATION, }, { .id = V4L2_CID_HFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Mirror Horizontally", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, { .id = V4L2_CID_VFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Flip Vertically", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, { .id = CPIA2_CID_TARGET_KB, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Target KB", .minimum = 0, .maximum = 255, .step = 1, .default_value = DEFAULT_TARGET_KB, }, { .id = CPIA2_CID_GPIO, .type = V4L2_CTRL_TYPE_INTEGER, .name = "GPIO", .minimum = 0, .maximum = 255, .step = 1, .default_value = 0, }, { .id = CPIA2_CID_FLICKER_MODE, .type = V4L2_CTRL_TYPE_MENU, .name = "Flicker Reduction", .minimum = 0, .maximum = NUM_FLICKER_CONTROLS-1, .step = 1, .default_value = 0, }, { .id = CPIA2_CID_FRAMERATE, .type = V4L2_CTRL_TYPE_MENU, .name = "Framerate", .minimum = 0, .maximum = NUM_FRAMERATE_CONTROLS-1, .step = 1, .default_value = NUM_FRAMERATE_CONTROLS-1, }, { .id = CPIA2_CID_USB_ALT, .type = V4L2_CTRL_TYPE_INTEGER, .name = "USB Alternate", .minimum = USBIF_ISO_1, .maximum = USBIF_ISO_6, .step = 1, .default_value = DEFAULT_ALT, }, { .id = CPIA2_CID_LIGHTS, .type = V4L2_CTRL_TYPE_MENU, .name = "Lights", .minimum = 0, .maximum = NUM_LIGHTS_CONTROLS-1, .step = 1, .default_value = 0, }, { .id = CPIA2_CID_RESET_CAMERA, .type = V4L2_CTRL_TYPE_BUTTON, .name = "Reset Camera", .minimum = 0, .maximum = 0, .step = 0, .default_value = 0, }, }; #define NUM_CONTROLS (sizeof(controls)/sizeof(controls[0])) /****************************************************************************** * * cpia2_open * *****************************************************************************/ static int cpia2_open(struct inode *inode, struct file *file) { struct video_device *dev = video_devdata(file); struct camera_data *cam = video_get_drvdata(dev); int retval = 0; if (!cam) { ERR("Internal error, camera_data not found!\n"); return -ENODEV; } if(mutex_lock_interruptible(&cam->busy_lock)) return -ERESTARTSYS; if(!cam->present) { retval = -ENODEV; goto err_return; } if (cam->open_count > 0) { goto skip_init; } if (cpia2_allocate_buffers(cam)) { retval = -ENOMEM; goto err_return; } /* reset the camera */ if (cpia2_reset_camera(cam) < 0) { retval = -EIO; goto err_return; } cam->APP_len = 0; cam->COM_len = 0; skip_init: { struct cpia2_fh *fh = kmalloc(sizeof(*fh),GFP_KERNEL); if(!fh) { retval = -ENOMEM; goto err_return; } file->private_data = fh; fh->prio = V4L2_PRIORITY_UNSET; v4l2_prio_open(&cam->prio, &fh->prio); fh->mmapped = 0; } ++cam->open_count; cpia2_dbg_dump_registers(cam); err_return: mutex_unlock(&cam->busy_lock); return retval; } /****************************************************************************** * * cpia2_close * *****************************************************************************/ static int cpia2_close(struct inode *inode, struct file *file) { struct video_device *dev = video_devdata(file); struct camera_data *cam = video_get_drvdata(dev); struct cpia2_fh *fh = file->private_data; mutex_lock(&cam->busy_lock); if (cam->present && (cam->open_count == 1 || fh->prio == V4L2_PRIORITY_RECORD )) { cpia2_usb_stream_stop(cam); if(cam->open_count == 1) { /* save camera state for later open */ cpia2_save_camera_state(cam); cpia2_set_low_power(cam); cpia2_free_buffers(cam); } } { if(fh->mmapped) cam->mmapped = 0; v4l2_prio_close(&cam->prio,&fh->prio); file->private_data = NULL; kfree(fh); } if (--cam->open_count == 0) { cpia2_free_buffers(cam); if (!cam->present) { video_unregister_device(dev); mutex_unlock(&cam->busy_lock); kfree(cam); return 0; } } mutex_unlock(&cam->busy_lock); return 0; } /****************************************************************************** * * cpia2_v4l_read * *****************************************************************************/ static ssize_t cpia2_v4l_read(struct file *file, char __user *buf, size_t count, loff_t *off) { struct video_device *dev = video_devdata(file); struct camera_data *cam = video_get_drvdata(dev); int noblock = file->f_flags&O_NONBLOCK; struct cpia2_fh *fh = file->private_data; if(!cam) return -EINVAL; /* Priority check */ if(fh->prio != V4L2_PRIORITY_RECORD) { return -EBUSY; } return cpia2_read(cam, buf, count, noblock); } /****************************************************************************** * * cpia2_v4l_poll * *****************************************************************************/ static unsigned int cpia2_v4l_poll(struct file *filp, struct poll_table_struct *wait) { struct video_device *dev = video_devdata(filp); struct camera_data *cam = video_get_drvdata(dev); struct cpia2_fh *fh = filp->private_data; if(!cam) return POLLERR; /* Priority check */ if(fh->prio != V4L2_PRIORITY_RECORD) { return POLLERR; } return cpia2_poll(cam, filp, wait); } /****************************************************************************** * * ioctl_cap_query * *****************************************************************************/ static int ioctl_cap_query(void *arg, struct camera_data *cam) { struct video_capability *vc; int retval = 0; vc = arg; if (cam->params.pnp_id.product == 0x151) strcpy(vc->name, "QX5 Microscope"); else strcpy(vc->name, "CPiA2 Camera"); vc->type = VID_TYPE_CAPTURE | VID_TYPE_MJPEG_ENCODER; vc->channels = 1; vc->audios = 0; vc->minwidth = 176; /* VIDEOSIZE_QCIF */ vc->minheight = 144; switch (cam->params.version.sensor_flags) { case CPIA2_VP_SENSOR_FLAGS_500: vc->maxwidth = STV_IMAGE_VGA_COLS; vc->maxheight = STV_IMAGE_VGA_ROWS; break; case CPIA2_VP_SENSOR_FLAGS_410: vc->maxwidth = STV_IMAGE_CIF_COLS; vc->maxheight = STV_IMAGE_CIF_ROWS; break; default: return -EINVAL; } return retval; } /****************************************************************************** * * ioctl_get_channel * *****************************************************************************/ static int ioctl_get_channel(void *arg) { int retval = 0; struct video_channel *v; v = arg; if (v->channel != 0) return -EINVAL; v->channel = 0; strcpy(v->name, "Camera"); v->tuners = 0; v->flags = 0; v->type = VIDEO_TYPE_CAMERA; v->norm = 0; return retval; } /****************************************************************************** * * ioctl_set_channel * *****************************************************************************/ static int ioctl_set_channel(void *arg) { struct video_channel *v; int retval = 0; v = arg; if (retval == 0 && v->channel != 0) retval = -EINVAL; return retval; } /****************************************************************************** * * ioctl_set_image_prop * *****************************************************************************/ static int ioctl_set_image_prop(void *arg, struct camera_data *cam) { struct video_picture *vp; int retval = 0; vp = arg; /* brightness, color, contrast need no check 0-65535 */ memcpy(&cam->vp, vp, sizeof(*vp)); /* update cam->params.colorParams */ cam->params.color_params.brightness = vp->brightness / 256; cam->params.color_params.saturation = vp->colour / 256; cam->params.color_params.contrast = vp->contrast / 256; DBG("Requested params: bright 0x%X, sat 0x%X, contrast 0x%X\n", cam->params.color_params.brightness, cam->params.color_params.saturation, cam->params.color_params.contrast); cpia2_set_color_params(cam); return retval; } static int sync(struct camera_data *cam, int frame_nr) { struct framebuf *frame = &cam->buffers[frame_nr]; while (1) { if (frame->status == FRAME_READY) return 0; if (!cam->streaming) { frame->status = FRAME_READY; frame->length = 0; return 0; } mutex_unlock(&cam->busy_lock); wait_event_interruptible(cam->wq_stream, !cam->streaming || frame->status == FRAME_READY); mutex_lock(&cam->busy_lock); if (signal_pending(current)) return -ERESTARTSYS; if(!cam->present) return -ENOTTY; } } /****************************************************************************** * * ioctl_set_window_size * *****************************************************************************/ static int ioctl_set_window_size(void *arg, struct camera_data *cam, struct cpia2_fh *fh) { /* copy_from_user, check validity, copy to internal structure */ struct video_window *vw; int frame, err; vw = arg; if (vw->clipcount != 0) /* clipping not supported */ return -EINVAL; if (vw->clips != NULL) /* clipping not supported */ return -EINVAL; /* Ensure that only this process can change the format. */ err = v4l2_prio_change(&cam->prio, &fh->prio, V4L2_PRIORITY_RECORD); if(err != 0) return err; cam->pixelformat = V4L2_PIX_FMT_JPEG; /* Be sure to supply the Huffman tables, this isn't MJPEG */ cam->params.compression.inhibit_htables = 0; /* we set the video window to something smaller or equal to what * is requested by the user??? */ DBG("Requested width = %d, height = %d\n", vw->width, vw->height); if (vw->width != cam->vw.width || vw->height != cam->vw.height) { cam->vw.width = vw->width; cam->vw.height = vw->height; cam->params.roi.width = vw->width; cam->params.roi.height = vw->height; cpia2_set_format(cam); } for (frame = 0; frame < cam->num_frames; ++frame) { if (cam->buffers[frame].status == FRAME_READING) if ((err = sync(cam, frame)) < 0) return err; cam->buffers[frame].status = FRAME_EMPTY; } return 0; } /****************************************************************************** * * ioctl_get_mbuf * *****************************************************************************/ static int ioctl_get_mbuf(void *arg, struct camera_data *cam) { struct video_mbuf *vm; int i; vm = arg; memset(vm, 0, sizeof(*vm)); vm->size = cam->frame_size*cam->num_frames; vm->frames = cam->num_frames; for (i = 0; i < cam->num_frames; i++) vm->offsets[i] = cam->frame_size * i; return 0; } /****************************************************************************** * * ioctl_mcapture * *****************************************************************************/ static int ioctl_mcapture(void *arg, struct camera_data *cam, struct cpia2_fh *fh) { struct video_mmap *vm; int video_size, err; vm = arg; if (vm->frame < 0 || vm->frame >= cam->num_frames) return -EINVAL; /* set video size */ video_size = cpia2_match_video_size(vm->width, vm->height); if (cam->video_size < 0) { return -EINVAL; } /* Ensure that only this process can change the format. */ err = v4l2_prio_change(&cam->prio, &fh->prio, V4L2_PRIORITY_RECORD); if(err != 0) return err; if (video_size != cam->video_size) { cam->video_size = video_size; cam->params.roi.width = vm->width; cam->params.roi.height = vm->height; cpia2_set_format(cam); } if (cam->buffers[vm->frame].status == FRAME_READING) if ((err=sync(cam, vm->frame)) < 0) return err; cam->buffers[vm->frame].status = FRAME_EMPTY; return cpia2_usb_stream_start(cam,cam->params.camera_state.stream_mode); } /****************************************************************************** * * ioctl_sync * *****************************************************************************/ static int ioctl_sync(void *arg, struct camera_data *cam) { int frame; frame = *(int*)arg; if (frame < 0 || frame >= cam->num_frames) return -EINVAL; return sync(cam, frame); } /****************************************************************************** * * ioctl_set_gpio * *****************************************************************************/ static int ioctl_set_gpio(void *arg, struct camera_data *cam) { __u32 gpio_val; gpio_val = *(__u32*) arg; if (gpio_val &~ 0xFFU) return -EINVAL; return cpia2_set_gpio(cam, (unsigned char)gpio_val); } /****************************************************************************** * * ioctl_querycap * * V4L2 device capabilities * *****************************************************************************/ static int ioctl_querycap(void *arg, struct camera_data *cam) { struct v4l2_capability *vc = arg; memset(vc, 0, sizeof(*vc)); strcpy(vc->driver, "cpia2"); if (cam->params.pnp_id.product == 0x151) strcpy(vc->card, "QX5 Microscope"); else strcpy(vc->card, "CPiA2 Camera"); switch (cam->params.pnp_id.device_type) { case DEVICE_STV_672: strcat(vc->card, " (672/"); break; case DEVICE_STV_676: strcat(vc->card, " (676/"); break; default: strcat(vc->card, " (???/"); break; } switch (cam->params.version.sensor_flags) { case CPIA2_VP_SENSOR_FLAGS_404: strcat(vc->card, "404)"); break; case CPIA2_VP_SENSOR_FLAGS_407: strcat(vc->card, "407)"); break; case CPIA2_VP_SENSOR_FLAGS_409: strcat(vc->card, "409)"); break; case CPIA2_VP_SENSOR_FLAGS_410: strcat(vc->card, "410)"); break; case CPIA2_VP_SENSOR_FLAGS_500: strcat(vc->card, "500)"); break; default: strcat(vc->card, "???)"); break; } if (usb_make_path(cam->dev, vc->bus_info, sizeof(vc->bus_info)) <0) memset(vc->bus_info,0, sizeof(vc->bus_info)); vc->version = KERNEL_VERSION(CPIA2_MAJ_VER, CPIA2_MIN_VER, CPIA2_PATCH_VER); vc->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; return 0; } /****************************************************************************** * * ioctl_input * * V4L2 input get/set/enumerate * *****************************************************************************/ static int ioctl_input(unsigned int ioclt_nr,void *arg,struct camera_data *cam) { struct v4l2_input *i = arg; if(ioclt_nr != VIDIOC_G_INPUT) { if (i->index != 0) return -EINVAL; } memset(i, 0, sizeof(*i)); strcpy(i->name, "Camera"); i->type = V4L2_INPUT_TYPE_CAMERA; return 0; } /****************************************************************************** * * ioctl_enum_fmt * * V4L2 format enumerate * *****************************************************************************/ static int ioctl_enum_fmt(void *arg,struct camera_data *cam) { struct v4l2_fmtdesc *f = arg; int index = f->index; if (index < 0 || index > 1) return -EINVAL; memset(f, 0, sizeof(*f)); f->index = index; f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; f->flags = V4L2_FMT_FLAG_COMPRESSED; switch(index) { case 0: strcpy(f->description, "MJPEG"); f->pixelformat = V4L2_PIX_FMT_MJPEG; break; case 1: strcpy(f->description, "JPEG"); f->pixelformat = V4L2_PIX_FMT_JPEG; break; default: return -EINVAL; } return 0; } /****************************************************************************** * * ioctl_try_fmt * * V4L2 format try * *****************************************************************************/ static int ioctl_try_fmt(void *arg,struct camera_data *cam) { struct v4l2_format *f = arg; if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (f->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG && f->fmt.pix.pixelformat != V4L2_PIX_FMT_JPEG) return -EINVAL; f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = cam->frame_size; f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG; f->fmt.pix.priv = 0; switch (cpia2_match_video_size(f->fmt.pix.width, f->fmt.pix.height)) { case VIDEOSIZE_VGA: f->fmt.pix.width = 640; f->fmt.pix.height = 480; break; case VIDEOSIZE_CIF: f->fmt.pix.width = 352; f->fmt.pix.height = 288; break; case VIDEOSIZE_QVGA: f->fmt.pix.width = 320; f->fmt.pix.height = 240; break; case VIDEOSIZE_288_216: f->fmt.pix.width = 288; f->fmt.pix.height = 216; break; case VIDEOSIZE_256_192: f->fmt.pix.width = 256; f->fmt.pix.height = 192; break; case VIDEOSIZE_224_168: f->fmt.pix.width = 224; f->fmt.pix.height = 168; break; case VIDEOSIZE_192_144: f->fmt.pix.width = 192; f->fmt.pix.height = 144; break; case VIDEOSIZE_QCIF: default: f->fmt.pix.width = 176; f->fmt.pix.height = 144; break; } return 0; } /****************************************************************************** * * ioctl_set_fmt * * V4L2 format set * *****************************************************************************/ static int ioctl_set_fmt(void *arg,struct camera_data *cam, struct cpia2_fh *fh) { struct v4l2_format *f = arg; int err, frame; err = ioctl_try_fmt(arg, cam); if(err != 0) return err; /* Ensure that only this process can change the format. */ err = v4l2_prio_change(&cam->prio, &fh->prio, V4L2_PRIORITY_RECORD); if(err != 0) { return err; } cam->pixelformat = f->fmt.pix.pixelformat; /* NOTE: This should be set to 1 for MJPEG, but some apps don't handle * the missing Huffman table properly. */ cam->params.compression.inhibit_htables = 0; /*f->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG;*/ /* we set the video window to something smaller or equal to what * is requested by the user??? */ DBG("Requested width = %d, height = %d\n", f->fmt.pix.width, f->fmt.pix.height); if (f->fmt.pix.width != cam->vw.width || f->fmt.pix.height != cam->vw.height) { cam->vw.width = f->fmt.pix.width; cam->vw.height = f->fmt.pix.height; cam->params.roi.width = f->fmt.pix.width; cam->params.roi.height = f->fmt.pix.height; cpia2_set_format(cam); } for (frame = 0; frame < cam->num_frames; ++frame) { if (cam->buffers[frame].status == FRAME_READING) if ((err = sync(cam, frame)) < 0) return err; cam->buffers[frame].status = FRAME_EMPTY; } return 0; } /****************************************************************************** * * ioctl_get_fmt * * V4L2 format get * *****************************************************************************/ static int ioctl_get_fmt(void *arg,struct camera_data *cam) { struct v4l2_format *f = arg; if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; f->fmt.pix.width = cam->vw.width; f->fmt.pix.height = cam->vw.height; f->fmt.pix.pixelformat = cam->pixelformat; f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = cam->frame_size; f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG; f->fmt.pix.priv = 0; return 0; } /****************************************************************************** * * ioctl_cropcap * * V4L2 query cropping capabilities * NOTE: cropping is currently disabled * *****************************************************************************/ static int ioctl_cropcap(void *arg,struct camera_data *cam) { struct v4l2_cropcap *c = arg; if (c->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; c->bounds.left = 0; c->bounds.top = 0; c->bounds.width = cam->vw.width; c->bounds.height = cam->vw.height; c->defrect.left = 0; c->defrect.top = 0; c->defrect.width = cam->vw.width; c->defrect.height = cam->vw.height; c->pixelaspect.numerator = 1; c->pixelaspect.denominator = 1; return 0; } /****************************************************************************** * * ioctl_queryctrl * * V4L2 query possible control variables * *****************************************************************************/ static int ioctl_queryctrl(void *arg,struct camera_data *cam) { struct v4l2_queryctrl *c = arg; int i; for(i=0; i<NUM_CONTROLS; ++i) { if(c->id == controls[i].id) { memcpy(c, controls+i, sizeof(*c)); break; } } if(i == NUM_CONTROLS) return -EINVAL; /* Some devices have additional limitations */ switch(c->id) { case V4L2_CID_BRIGHTNESS: /*** * Don't let the register be set to zero - bug in VP4 * flash of full brightness ***/ if (cam->params.pnp_id.device_type == DEVICE_STV_672) c->minimum = 1; break; case V4L2_CID_VFLIP: // VP5 Only if(cam->params.pnp_id.device_type == DEVICE_STV_672) c->flags |= V4L2_CTRL_FLAG_DISABLED; break; case CPIA2_CID_FRAMERATE: if(cam->params.pnp_id.device_type == DEVICE_STV_672 && cam->params.version.sensor_flags==CPIA2_VP_SENSOR_FLAGS_500){ // Maximum 15fps int i; for(i=0; i<c->maximum; ++i) { if(framerate_controls[i].value == CPIA2_VP_FRAMERATE_15) { c->maximum = i; c->default_value = i; } } } break; case CPIA2_CID_FLICKER_MODE: // Flicker control only valid for 672. if(cam->params.pnp_id.device_type != DEVICE_STV_672) c->flags |= V4L2_CTRL_FLAG_DISABLED; break; case CPIA2_CID_LIGHTS: // Light control only valid for the QX5 Microscope. if(cam->params.pnp_id.product != 0x151) c->flags |= V4L2_CTRL_FLAG_DISABLED; break; default: break; } return 0; } /****************************************************************************** * * ioctl_querymenu * * V4L2 query possible control variables * *****************************************************************************/ static int ioctl_querymenu(void *arg,struct camera_data *cam) { struct v4l2_querymenu *m = arg; memset(m->name, 0, sizeof(m->name)); m->reserved = 0; switch(m->id) { case CPIA2_CID_FLICKER_MODE: if(m->index < 0 || m->index >= NUM_FLICKER_CONTROLS) return -EINVAL; strcpy(m->name, flicker_controls[m->index].name); break; case CPIA2_CID_FRAMERATE: { int maximum = NUM_FRAMERATE_CONTROLS - 1; if(cam->params.pnp_id.device_type == DEVICE_STV_672 && cam->params.version.sensor_flags==CPIA2_VP_SENSOR_FLAGS_500){ // Maximum 15fps int i; for(i=0; i<maximum; ++i) { if(framerate_controls[i].value == CPIA2_VP_FRAMERATE_15) maximum = i; } } if(m->index < 0 || m->index > maximum) return -EINVAL; strcpy(m->name, framerate_controls[m->index].name); break; } case CPIA2_CID_LIGHTS: if(m->index < 0 || m->index >= NUM_LIGHTS_CONTROLS) return -EINVAL; strcpy(m->name, lights_controls[m->index].name); break; default: return -EINVAL; } return 0; } /****************************************************************************** * * ioctl_g_ctrl * * V4L2 get the value of a control variable * *****************************************************************************/ static int ioctl_g_ctrl(void *arg,struct camera_data *cam) { struct v4l2_control *c = arg; switch(c->id) { case V4L2_CID_BRIGHTNESS: cpia2_do_command(cam, CPIA2_CMD_GET_VP_BRIGHTNESS, TRANSFER_READ, 0); c->value = cam->params.color_params.brightness; break; case V4L2_CID_CONTRAST: cpia2_do_command(cam, CPIA2_CMD_GET_CONTRAST, TRANSFER_READ, 0); c->value = cam->params.color_params.contrast; break; case V4L2_CID_SATURATION: cpia2_do_command(cam, CPIA2_CMD_GET_VP_SATURATION, TRANSFER_READ, 0); c->value = cam->params.color_params.saturation; break; case V4L2_CID_HFLIP: cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS, TRANSFER_READ, 0); c->value = (cam->params.vp_params.user_effects & CPIA2_VP_USER_EFFECTS_MIRROR) != 0; break; case V4L2_CID_VFLIP: cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS, TRANSFER_READ, 0); c->value = (cam->params.vp_params.user_effects & CPIA2_VP_USER_EFFECTS_FLIP) != 0; break; case CPIA2_CID_TARGET_KB: c->value = cam->params.vc_params.target_kb; break; case CPIA2_CID_GPIO: cpia2_do_command(cam, CPIA2_CMD_GET_VP_GPIO_DATA, TRANSFER_READ, 0); c->value = cam->params.vp_params.gpio_data; break; case CPIA2_CID_FLICKER_MODE: { int i, mode; cpia2_do_command(cam, CPIA2_CMD_GET_FLICKER_MODES, TRANSFER_READ, 0); if(cam->params.flicker_control.cam_register & CPIA2_VP_FLICKER_MODES_NEVER_FLICKER) { mode = NEVER_FLICKER; } else { if(cam->params.flicker_control.cam_register & CPIA2_VP_FLICKER_MODES_50HZ) { mode = FLICKER_50; } else { mode = FLICKER_60; } } for(i=0; i<NUM_FLICKER_CONTROLS; i++) { if(flicker_controls[i].value == mode) { c->value = i; break; } } if(i == NUM_FLICKER_CONTROLS) return -EINVAL; break; } case CPIA2_CID_FRAMERATE: { int maximum = NUM_FRAMERATE_CONTROLS - 1; int i; for(i=0; i<= maximum; i++) { if(cam->params.vp_params.frame_rate == framerate_controls[i].value) break; } if(i > maximum) return -EINVAL; c->value = i; break; } case CPIA2_CID_USB_ALT: c->value = cam->params.camera_state.stream_mode; break; case CPIA2_CID_LIGHTS: { int i; cpia2_do_command(cam, CPIA2_CMD_GET_VP_GPIO_DATA, TRANSFER_READ, 0); for(i=0; i<NUM_LIGHTS_CONTROLS; i++) { if((cam->params.vp_params.gpio_data&GPIO_LIGHTS_MASK) == lights_controls[i].value) { break; } } if(i == NUM_LIGHTS_CONTROLS) return -EINVAL; c->value = i; break; } case CPIA2_CID_RESET_CAMERA: return -EINVAL; default: return -EINVAL; } DBG("Get control id:%d, value:%d\n", c->id, c->value); return 0; } /****************************************************************************** * * ioctl_s_ctrl * * V4L2 set the value of a control variable * *****************************************************************************/ static int ioctl_s_ctrl(void *arg,struct camera_data *cam) { struct v4l2_control *c = arg; int i; int retval = 0; DBG("Set control id:%d, value:%d\n", c->id, c->value); /* Check that the value is in range */ for(i=0; i<NUM_CONTROLS; i++) { if(c->id == controls[i].id) { if(c->value < controls[i].minimum || c->value > controls[i].maximum) { return -EINVAL; } break; } } if(i == NUM_CONTROLS) return -EINVAL; switch(c->id) { case V4L2_CID_BRIGHTNESS: cpia2_set_brightness(cam, c->value); break; case V4L2_CID_CONTRAST: cpia2_set_contrast(cam, c->value); break; case V4L2_CID_SATURATION: cpia2_set_saturation(cam, c->value); break; case V4L2_CID_HFLIP: cpia2_set_property_mirror(cam, c->value); break; case V4L2_CID_VFLIP: cpia2_set_property_flip(cam, c->value); break; case CPIA2_CID_TARGET_KB: retval = cpia2_set_target_kb(cam, c->value); break; case CPIA2_CID_GPIO: retval = cpia2_set_gpio(cam, c->value); break; case CPIA2_CID_FLICKER_MODE: retval = cpia2_set_flicker_mode(cam, flicker_controls[c->value].value); break; case CPIA2_CID_FRAMERATE: retval = cpia2_set_fps(cam, framerate_controls[c->value].value); break; case CPIA2_CID_USB_ALT: retval = cpia2_usb_change_streaming_alternate(cam, c->value); break; case CPIA2_CID_LIGHTS: retval = cpia2_set_gpio(cam, lights_controls[c->value].value); break; case CPIA2_CID_RESET_CAMERA: cpia2_usb_stream_pause(cam); cpia2_reset_camera(cam); cpia2_usb_stream_resume(cam); break; default: retval = -EINVAL; } return retval; } /****************************************************************************** * * ioctl_g_jpegcomp * * V4L2 get the JPEG compression parameters * *****************************************************************************/ static int ioctl_g_jpegcomp(void *arg,struct camera_data *cam) { struct v4l2_jpegcompression *parms = arg; memset(parms, 0, sizeof(*parms)); parms->quality = 80; // TODO: Can this be made meaningful? parms->jpeg_markers = V4L2_JPEG_MARKER_DQT | V4L2_JPEG_MARKER_DRI; if(!cam->params.compression.inhibit_htables) { parms->jpeg_markers |= V4L2_JPEG_MARKER_DHT; } parms->APPn = cam->APPn; parms->APP_len = cam->APP_len; if(cam->APP_len > 0) { memcpy(parms->APP_data, cam->APP_data, cam->APP_len); parms->jpeg_markers |= V4L2_JPEG_MARKER_APP; } parms->COM_len = cam->COM_len; if(cam->COM_len > 0) { memcpy(parms->COM_data, cam->COM_data, cam->COM_len); parms->jpeg_markers |= JPEG_MARKER_COM; } DBG("G_JPEGCOMP APP_len:%d COM_len:%d\n", parms->APP_len, parms->COM_len); return 0; } /****************************************************************************** * * ioctl_s_jpegcomp * * V4L2 set the JPEG compression parameters * NOTE: quality and some jpeg_markers are ignored. * *****************************************************************************/ static int ioctl_s_jpegcomp(void *arg,struct camera_data *cam) { struct v4l2_jpegcompression *parms = arg; DBG("S_JPEGCOMP APP_len:%d COM_len:%d\n", parms->APP_len, parms->COM_len); cam->params.compression.inhibit_htables = !(parms->jpeg_markers & V4L2_JPEG_MARKER_DHT); if(parms->APP_len != 0) { if(parms->APP_len > 0 && parms->APP_len <= sizeof(cam->APP_data) && parms->APPn >= 0 && parms->APPn <= 15) { cam->APPn = parms->APPn; cam->APP_len = parms->APP_len; memcpy(cam->APP_data, parms->APP_data, parms->APP_len); } else { LOG("Bad APPn Params n=%d len=%d\n", parms->APPn, parms->APP_len); return -EINVAL; } } else { cam->APP_len = 0; } if(parms->COM_len != 0) { if(parms->COM_len > 0 && parms->COM_len <= sizeof(cam->COM_data)) { cam->COM_len = parms->COM_len; memcpy(cam->COM_data, parms->COM_data, parms->COM_len); } else { LOG("Bad COM_len=%d\n", parms->COM_len); return -EINVAL; } } return 0; } /****************************************************************************** * * ioctl_reqbufs * * V4L2 Initiate memory mapping. * NOTE: The user's request is ignored. For now the buffers are fixed. * *****************************************************************************/ static int ioctl_reqbufs(void *arg,struct camera_data *cam) { struct v4l2_requestbuffers *req = arg; if(req->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || req->memory != V4L2_MEMORY_MMAP) return -EINVAL; DBG("REQBUFS requested:%d returning:%d\n", req->count, cam->num_frames); req->count = cam->num_frames; memset(&req->reserved, 0, sizeof(req->reserved)); return 0; } /****************************************************************************** * * ioctl_querybuf * * V4L2 Query memory buffer status. * *****************************************************************************/ static int ioctl_querybuf(void *arg,struct camera_data *cam) { struct v4l2_buffer *buf = arg; if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || buf->index > cam->num_frames) return -EINVAL; buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer; buf->length = cam->frame_size; buf->memory = V4L2_MEMORY_MMAP; if(cam->mmapped) buf->flags = V4L2_BUF_FLAG_MAPPED; else buf->flags = 0; switch (cam->buffers[buf->index].status) { case FRAME_EMPTY: case FRAME_ERROR: case FRAME_READING: buf->bytesused = 0; buf->flags = V4L2_BUF_FLAG_QUEUED; break; case FRAME_READY: buf->bytesused = cam->buffers[buf->index].length; buf->timestamp = cam->buffers[buf->index].timestamp; buf->sequence = cam->buffers[buf->index].seq; buf->flags = V4L2_BUF_FLAG_DONE; break; } DBG("QUERYBUF index:%d offset:%d flags:%d seq:%d bytesused:%d\n", buf->index, buf->m.offset, buf->flags, buf->sequence, buf->bytesused); return 0; } /****************************************************************************** * * ioctl_qbuf * * V4L2 User is freeing buffer * *****************************************************************************/ static int ioctl_qbuf(void *arg,struct camera_data *cam) { struct v4l2_buffer *buf = arg; if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || buf->memory != V4L2_MEMORY_MMAP || buf->index > cam->num_frames) return -EINVAL; DBG("QBUF #%d\n", buf->index); if(cam->buffers[buf->index].status == FRAME_READY) cam->buffers[buf->index].status = FRAME_EMPTY; return 0; } /****************************************************************************** * * find_earliest_filled_buffer * * Helper for ioctl_dqbuf. Find the next ready buffer. * *****************************************************************************/ static int find_earliest_filled_buffer(struct camera_data *cam) { int i; int found = -1; for (i=0; i<cam->num_frames; i++) { if(cam->buffers[i].status == FRAME_READY) { if(found < 0) { found = i; } else { /* find which buffer is earlier */ struct timeval *tv1, *tv2; tv1 = &cam->buffers[i].timestamp; tv2 = &cam->buffers[found].timestamp; if(tv1->tv_sec < tv2->tv_sec || (tv1->tv_sec == tv2->tv_sec && tv1->tv_usec < tv2->tv_usec)) found = i; } } } return found; } /****************************************************************************** * * ioctl_dqbuf * * V4L2 User is asking for a filled buffer. * *****************************************************************************/ static int ioctl_dqbuf(void *arg,struct camera_data *cam, struct file *file) { struct v4l2_buffer *buf = arg; int frame; if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || buf->memory != V4L2_MEMORY_MMAP) return -EINVAL; frame = find_earliest_filled_buffer(cam); if(frame < 0 && file->f_flags&O_NONBLOCK) return -EAGAIN; if(frame < 0) { /* Wait for a frame to become available */ struct framebuf *cb=cam->curbuff; mutex_unlock(&cam->busy_lock); wait_event_interruptible(cam->wq_stream, !cam->present || (cb=cam->curbuff)->status == FRAME_READY); mutex_lock(&cam->busy_lock); if (signal_pending(current)) return -ERESTARTSYS; if(!cam->present) return -ENOTTY; frame = cb->num; } buf->index = frame; buf->bytesused = cam->buffers[buf->index].length; buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE; buf->field = V4L2_FIELD_NONE; buf->timestamp = cam->buffers[buf->index].timestamp; buf->sequence = cam->buffers[buf->index].seq; buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer; buf->length = cam->frame_size; buf->input = 0; buf->reserved = 0; memset(&buf->timecode, 0, sizeof(buf->timecode)); DBG("DQBUF #%d status:%d seq:%d length:%d\n", buf->index, cam->buffers[buf->index].status, buf->sequence, buf->bytesused); return 0; } /****************************************************************************** * * cpia2_ioctl * *****************************************************************************/ static int cpia2_do_ioctl(struct inode *inode, struct file *file, unsigned int ioctl_nr, void *arg) { struct video_device *dev = video_devdata(file); struct camera_data *cam = video_get_drvdata(dev); int retval = 0; if (!cam) return -ENOTTY; /* make this _really_ smp-safe */ if (mutex_lock_interruptible(&cam->busy_lock)) return -ERESTARTSYS; if (!cam->present) { mutex_unlock(&cam->busy_lock); return -ENODEV; } /* Priority check */ switch (ioctl_nr) { case VIDIOCSWIN: case VIDIOCMCAPTURE: case VIDIOC_S_FMT: { struct cpia2_fh *fh = file->private_data; retval = v4l2_prio_check(&cam->prio, &fh->prio); if(retval) { mutex_unlock(&cam->busy_lock); return retval; } break; } case VIDIOCGMBUF: case VIDIOCSYNC: { struct cpia2_fh *fh = file->private_data; if(fh->prio != V4L2_PRIORITY_RECORD) { mutex_unlock(&cam->busy_lock); return -EBUSY; } break; } default: break; } switch (ioctl_nr) { case VIDIOCGCAP: /* query capabilities */ retval = ioctl_cap_query(arg, cam); break; case VIDIOCGCHAN: /* get video source - we are a camera, nothing else */ retval = ioctl_get_channel(arg); break; case VIDIOCSCHAN: /* set video source - we are a camera, nothing else */ retval = ioctl_set_channel(arg); break; case VIDIOCGPICT: /* image properties */ memcpy(arg, &cam->vp, sizeof(struct video_picture)); break; case VIDIOCSPICT: retval = ioctl_set_image_prop(arg, cam); break; case VIDIOCGWIN: /* get/set capture window */ memcpy(arg, &cam->vw, sizeof(struct video_window)); break; case VIDIOCSWIN: retval = ioctl_set_window_size(arg, cam, file->private_data); break; case VIDIOCGMBUF: /* mmap interface */ retval = ioctl_get_mbuf(arg, cam); break; case VIDIOCMCAPTURE: retval = ioctl_mcapture(arg, cam, file->private_data); break; case VIDIOCSYNC: retval = ioctl_sync(arg, cam); break; /* pointless to implement overlay with this camera */ case VIDIOCCAPTURE: case VIDIOCGFBUF: case VIDIOCSFBUF: case VIDIOCKEY: retval = -EINVAL; break; /* tuner interface - we have none */ case VIDIOCGTUNER: case VIDIOCSTUNER: case VIDIOCGFREQ: case VIDIOCSFREQ: retval = -EINVAL; break; /* audio interface - we have none */ case VIDIOCGAUDIO: case VIDIOCSAUDIO: retval = -EINVAL; break; /* CPIA2 extension to Video4Linux API */ case CPIA2_IOC_SET_GPIO: retval = ioctl_set_gpio(arg, cam); break; case VIDIOC_QUERYCAP: retval = ioctl_querycap(arg,cam); break; case VIDIOC_ENUMINPUT: case VIDIOC_G_INPUT: case VIDIOC_S_INPUT: retval = ioctl_input(ioctl_nr, arg,cam); break; case VIDIOC_ENUM_FMT: retval = ioctl_enum_fmt(arg,cam); break; case VIDIOC_TRY_FMT: retval = ioctl_try_fmt(arg,cam); break; case VIDIOC_G_FMT: retval = ioctl_get_fmt(arg,cam); break; case VIDIOC_S_FMT: retval = ioctl_set_fmt(arg,cam,file->private_data); break; case VIDIOC_CROPCAP: retval = ioctl_cropcap(arg,cam); break; case VIDIOC_G_CROP: case VIDIOC_S_CROP: // TODO: I think cropping can be implemented - SJB retval = -EINVAL; break; case VIDIOC_QUERYCTRL: retval = ioctl_queryctrl(arg,cam); break; case VIDIOC_QUERYMENU: retval = ioctl_querymenu(arg,cam); break; case VIDIOC_G_CTRL: retval = ioctl_g_ctrl(arg,cam); break; case VIDIOC_S_CTRL: retval = ioctl_s_ctrl(arg,cam); break; case VIDIOC_G_JPEGCOMP: retval = ioctl_g_jpegcomp(arg,cam); break; case VIDIOC_S_JPEGCOMP: retval = ioctl_s_jpegcomp(arg,cam); break; case VIDIOC_G_PRIORITY: { struct cpia2_fh *fh = file->private_data; *(enum v4l2_priority*)arg = fh->prio; break; } case VIDIOC_S_PRIORITY: { struct cpia2_fh *fh = file->private_data; enum v4l2_priority prio; prio = *(enum v4l2_priority*)arg; if(cam->streaming && prio != fh->prio && fh->prio == V4L2_PRIORITY_RECORD) { /* Can't drop record priority while streaming */ retval = -EBUSY; } else if(prio == V4L2_PRIORITY_RECORD && prio != fh->prio && v4l2_prio_max(&cam->prio) == V4L2_PRIORITY_RECORD) { /* Only one program can record at a time */ retval = -EBUSY; } else { retval = v4l2_prio_change(&cam->prio, &fh->prio, prio); } break; } case VIDIOC_REQBUFS: retval = ioctl_reqbufs(arg,cam); break; case VIDIOC_QUERYBUF: retval = ioctl_querybuf(arg,cam); break; case VIDIOC_QBUF: retval = ioctl_qbuf(arg,cam); break; case VIDIOC_DQBUF: retval = ioctl_dqbuf(arg,cam,file); break; case VIDIOC_STREAMON: { int type; DBG("VIDIOC_STREAMON, streaming=%d\n", cam->streaming); type = *(int*)arg; if(!cam->mmapped || type != V4L2_BUF_TYPE_VIDEO_CAPTURE) retval = -EINVAL; if(!cam->streaming) { retval = cpia2_usb_stream_start(cam, cam->params.camera_state.stream_mode); } else { retval = -EINVAL; } break; } case VIDIOC_STREAMOFF: { int type; DBG("VIDIOC_STREAMOFF, streaming=%d\n", cam->streaming); type = *(int*)arg; if(!cam->mmapped || type != V4L2_BUF_TYPE_VIDEO_CAPTURE) retval = -EINVAL; if(cam->streaming) { retval = cpia2_usb_stream_stop(cam); } else { retval = -EINVAL; } break; } case VIDIOC_ENUMOUTPUT: case VIDIOC_G_OUTPUT: case VIDIOC_S_OUTPUT: case VIDIOC_G_MODULATOR: case VIDIOC_S_MODULATOR: case VIDIOC_ENUMAUDIO: case VIDIOC_G_AUDIO: case VIDIOC_S_AUDIO: case VIDIOC_ENUMAUDOUT: case VIDIOC_G_AUDOUT: case VIDIOC_S_AUDOUT: case VIDIOC_ENUMSTD: case VIDIOC_QUERYSTD: case VIDIOC_G_STD: case VIDIOC_S_STD: case VIDIOC_G_TUNER: case VIDIOC_S_TUNER: case VIDIOC_G_FREQUENCY: case VIDIOC_S_FREQUENCY: case VIDIOC_OVERLAY: case VIDIOC_G_FBUF: case VIDIOC_S_FBUF: case VIDIOC_G_PARM: case VIDIOC_S_PARM: retval = -EINVAL; break; default: retval = -ENOIOCTLCMD; break; } mutex_unlock(&cam->busy_lock); return retval; } static int cpia2_ioctl(struct inode *inode, struct file *file, unsigned int ioctl_nr, unsigned long iarg) { return video_usercopy(inode, file, ioctl_nr, iarg, cpia2_do_ioctl); } /****************************************************************************** * * cpia2_mmap * *****************************************************************************/ static int cpia2_mmap(struct file *file, struct vm_area_struct *area) { int retval; struct video_device *dev = video_devdata(file); struct camera_data *cam = video_get_drvdata(dev); /* Priority check */ struct cpia2_fh *fh = file->private_data; if(fh->prio != V4L2_PRIORITY_RECORD) { return -EBUSY; } retval = cpia2_remap_buffer(cam, area); if(!retval) fh->mmapped = 1; return retval; } /****************************************************************************** * * reset_camera_struct_v4l * * Sets all values to the defaults *****************************************************************************/ static void reset_camera_struct_v4l(struct camera_data *cam) { /*** * Fill in the v4l structures. video_cap is filled in inside the VIDIOCCAP * Ioctl. Here, just do the window and picture stucts. ***/ cam->vp.palette = (u16) VIDEO_PALETTE_RGB24; /* Is this right? */ cam->vp.brightness = (u16) cam->params.color_params.brightness * 256; cam->vp.colour = (u16) cam->params.color_params.saturation * 256; cam->vp.contrast = (u16) cam->params.color_params.contrast * 256; cam->vw.x = 0; cam->vw.y = 0; cam->vw.width = cam->params.roi.width; cam->vw.height = cam->params.roi.height; cam->vw.flags = 0; cam->vw.clipcount = 0; cam->frame_size = buffer_size; cam->num_frames = num_buffers; /* FlickerModes */ cam->params.flicker_control.flicker_mode_req = flicker_mode; cam->params.flicker_control.mains_frequency = flicker_freq; /* streamMode */ cam->params.camera_state.stream_mode = alternate; cam->pixelformat = V4L2_PIX_FMT_JPEG; v4l2_prio_init(&cam->prio); return; } /*** * The v4l video device structure initialized for this device ***/ static struct file_operations fops_template = { .owner = THIS_MODULE, .open = cpia2_open, .release = cpia2_close, .read = cpia2_v4l_read, .poll = cpia2_v4l_poll, .ioctl = cpia2_ioctl, .llseek = no_llseek, .compat_ioctl = v4l_compat_ioctl32, .mmap = cpia2_mmap, }; static struct video_device cpia2_template = { /* I could not find any place for the old .initialize initializer?? */ .owner= THIS_MODULE, .name= "CPiA2 Camera", .type= VID_TYPE_CAPTURE, .type2 = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING, .hardware= VID_HARDWARE_CPIA2, .minor= -1, .fops= &fops_template, .release= video_device_release, }; /****************************************************************************** * * cpia2_register_camera * *****************************************************************************/ int cpia2_register_camera(struct camera_data *cam) { cam->vdev = video_device_alloc(); if(!cam->vdev) return -ENOMEM; memcpy(cam->vdev, &cpia2_template, sizeof(cpia2_template)); video_set_drvdata(cam->vdev, cam); reset_camera_struct_v4l(cam); /* register v4l device */ if (video_register_device (cam->vdev, VFL_TYPE_GRABBER, video_nr) == -1) { ERR("video_register_device failed\n"); video_device_release(cam->vdev); return -ENODEV; } return 0; } /****************************************************************************** * * cpia2_unregister_camera * *****************************************************************************/ void cpia2_unregister_camera(struct camera_data *cam) { if (!cam->open_count) { video_unregister_device(cam->vdev); } else { LOG("/dev/video%d removed while open, " "deferring video_unregister_device\n", cam->vdev->minor); } } /****************************************************************************** * * check_parameters * * Make sure that all user-supplied parameters are sensible *****************************************************************************/ static void __init check_parameters(void) { if(buffer_size < PAGE_SIZE) { buffer_size = PAGE_SIZE; LOG("buffer_size too small, setting to %d\n", buffer_size); } else if(buffer_size > 1024*1024) { /* arbitrary upper limiit */ buffer_size = 1024*1024; LOG("buffer_size ridiculously large, setting to %d\n", buffer_size); } else { buffer_size += PAGE_SIZE-1; buffer_size &= ~(PAGE_SIZE-1); } if(num_buffers < 1) { num_buffers = 1; LOG("num_buffers too small, setting to %d\n", num_buffers); } else if(num_buffers > VIDEO_MAX_FRAME) { num_buffers = VIDEO_MAX_FRAME; LOG("num_buffers too large, setting to %d\n", num_buffers); } if(alternate < USBIF_ISO_1 || alternate > USBIF_ISO_6) { alternate = DEFAULT_ALT; LOG("alternate specified is invalid, using %d\n", alternate); } if (flicker_mode != NEVER_FLICKER && flicker_mode != ANTI_FLICKER_ON) { flicker_mode = NEVER_FLICKER; LOG("Flicker mode specified is invalid, using %d\n", flicker_mode); } if (flicker_freq != FLICKER_50 && flicker_freq != FLICKER_60) { flicker_freq = FLICKER_60; LOG("Flicker mode specified is invalid, using %d\n", flicker_freq); } if(video_nr < -1 || video_nr > 64) { video_nr = -1; LOG("invalid video_nr specified, must be -1 to 64\n"); } DBG("Using %d buffers, each %d bytes, alternate=%d\n", num_buffers, buffer_size, alternate); } /************ Module Stuff ***************/ /****************************************************************************** * * cpia2_init/module_init * *****************************************************************************/ static int __init cpia2_init(void) { LOG("%s v%d.%d.%d\n", ABOUT, CPIA2_MAJ_VER, CPIA2_MIN_VER, CPIA2_PATCH_VER); check_parameters(); cpia2_usb_init(); return 0; } /****************************************************************************** * * cpia2_exit/module_exit * *****************************************************************************/ static void __exit cpia2_exit(void) { cpia2_usb_cleanup(); schedule_timeout(2 * HZ); } module_init(cpia2_init); module_exit(cpia2_exit);
rocklee104/linux-2.6.18
drivers/media/video/cpia2/cpia2_v4l.c
C
gpl-2.0
51,580
#!/bin/sh [ -n "$INCLUDE_ONLY" ] || { . /lib/functions.sh . ../netifd-proto.sh init_proto "$@" } proto_qmi_init_config() { available=1 no_device=1 proto_config_add_string "device:device" proto_config_add_string apn proto_config_add_string auth proto_config_add_string username proto_config_add_string password proto_config_add_string pincode proto_config_add_int delay proto_config_add_string modes proto_config_add_string pdptype proto_config_add_int profile proto_config_add_boolean dhcpv6 proto_config_add_boolean autoconnect proto_config_add_int plmn proto_config_add_int timeout proto_config_add_int mtu proto_config_add_defaults } proto_qmi_setup() { local interface="$1" local dataformat connstat local device apn auth username password pincode delay modes pdptype local profile dhcpv6 autoconnect plmn timeout mtu $PROTO_DEFAULT_OPTIONS local ip4table ip6table local cid_4 pdh_4 cid_6 pdh_6 local ip_6 ip_prefix_length gateway_6 dns1_6 dns2_6 json_get_vars device apn auth username password pincode delay modes json_get_vars pdptype profile dhcpv6 autoconnect plmn ip4table json_get_vars ip6table timeout mtu $PROTO_DEFAULT_OPTIONS [ "$timeout" = "" ] && timeout="10" [ "$metric" = "" ] && metric="0" [ -n "$ctl_device" ] && device=$ctl_device [ -n "$device" ] || { echo "No control device specified" proto_notify_error "$interface" NO_DEVICE proto_set_available "$interface" 0 return 1 } [ -n "$delay" ] && sleep "$delay" device="$(readlink -f $device)" [ -c "$device" ] || { echo "The specified control device does not exist" proto_notify_error "$interface" NO_DEVICE proto_set_available "$interface" 0 return 1 } devname="$(basename "$device")" devpath="$(readlink -f /sys/class/usbmisc/$devname/device/)" ifname="$( ls "$devpath"/net )" [ -n "$ifname" ] || { echo "The interface could not be found." proto_notify_error "$interface" NO_IFACE proto_set_available "$interface" 0 return 1 } [ -n "$mtu" ] && { echo "Setting MTU to $mtu" /sbin/ip link set dev $ifname mtu $mtu } echo "Waiting for SIM initialization" local uninitialized_timeout=0 while uqmi -s -d "$device" --get-pin-status | grep '"UIM uninitialized"' > /dev/null; do [ -e "$device" ] || return 1 if [ "$uninitialized_timeout" -lt "$timeout" ]; then let uninitialized_timeout++ sleep 1; else echo "SIM not initialized" proto_notify_error "$interface" SIM_NOT_INITIALIZED proto_block_restart "$interface" return 1 fi done if uqmi -s -d "$device" --get-pin-status | grep '"Not supported"\|"Invalid QMI command"' > /dev/null; then [ -n "$pincode" ] && { uqmi -s -d "$device" --verify-pin1 "$pincode" > /dev/null || uqmi -s -d "$device" --uim-verify-pin1 "$pincode" > /dev/null || { echo "Unable to verify PIN" proto_notify_error "$interface" PIN_FAILED proto_block_restart "$interface" return 1 } } else . /usr/share/libubox/jshn.sh json_load "$(uqmi -s -d "$device" --get-pin-status)" json_get_var pin1_status pin1_status json_get_var pin1_verify_tries pin1_verify_tries case "$pin1_status" in disabled) echo "PIN verification is disabled" ;; blocked) echo "SIM locked PUK required" proto_notify_error "$interface" PUK_NEEDED proto_block_restart "$interface" return 1 ;; not_verified) [ "$pin1_verify_tries" -lt "3" ] && { echo "PIN verify count value is $pin1_verify_tries this is below the limit of 3" proto_notify_error "$interface" PIN_TRIES_BELOW_LIMIT proto_block_restart "$interface" return 1 } if [ -n "$pincode" ]; then uqmi -s -d "$device" --verify-pin1 "$pincode" > /dev/null 2>&1 || uqmi -s -d "$device" --uim-verify-pin1 "$pincode" > /dev/null 2>&1 || { echo "Unable to verify PIN" proto_notify_error "$interface" PIN_FAILED proto_block_restart "$interface" return 1 } else echo "PIN not specified but required" proto_notify_error "$interface" PIN_NOT_SPECIFIED proto_block_restart "$interface" return 1 fi ;; verified) echo "PIN already verified" ;; *) echo "PIN status failed ($pin1_status)" proto_notify_error "$interface" PIN_STATUS_FAILED proto_block_restart "$interface" return 1 ;; esac fi [ -n "$plmn" ] && { local mcc mnc if [ "$plmn" = 0 ]; then mcc=0 mnc=0 echo "Setting PLMN to auto" else mcc=${plmn:0:3} mnc=${plmn:3} echo "Setting PLMN to $plmn" fi uqmi -s -d "$device" --set-plmn --mcc "$mcc" --mnc "$mnc" > /dev/null 2>&1 || { echo "Unable to set PLMN" proto_notify_error "$interface" PLMN_FAILED proto_block_restart "$interface" return 1 } } # Cleanup current state if any uqmi -s -d "$device" --stop-network 0xffffffff --autoconnect > /dev/null 2>&1 # Set IP format uqmi -s -d "$device" --set-data-format 802.3 > /dev/null 2>&1 uqmi -s -d "$device" --wda-set-data-format 802.3 > /dev/null 2>&1 dataformat="$(uqmi -s -d "$device" --wda-get-data-format)" if [ "$dataformat" = '"raw-ip"' ]; then [ -f /sys/class/net/$ifname/qmi/raw_ip ] || { echo "Device only supports raw-ip mode but is missing this required driver attribute: /sys/class/net/$ifname/qmi/raw_ip" return 1 } echo "Device does not support 802.3 mode. Informing driver of raw-ip only for $ifname .." echo "Y" > /sys/class/net/$ifname/qmi/raw_ip fi uqmi -s -d "$device" --sync > /dev/null 2>&1 echo "Waiting for network registration" local registration_timeout=0 while uqmi -s -d "$device" --get-serving-system | grep '"searching"' > /dev/null; do [ -e "$device" ] || return 1 if [ "$registration_timeout" -lt "$timeout" ]; then let registration_timeout++ sleep 1; else echo "Network registration failed" proto_notify_error "$interface" NETWORK_REGISTRATION_FAILED proto_block_restart "$interface" return 1 fi done [ -n "$modes" ] && uqmi -s -d "$device" --set-network-modes "$modes" > /dev/null 2>&1 echo "Starting network $interface" pdptype=$(echo "$pdptype" | awk '{print tolower($0)}') [ "$pdptype" = "ip" -o "$pdptype" = "ipv6" -o "$pdptype" = "ipv4v6" ] || pdptype="ip" if [ "$pdptype" = "ip" ]; then [ -z "$autoconnect" ] && autoconnect=1 [ "$autoconnect" = 0 ] && autoconnect="" else [ "$autoconnect" = 1 ] || autoconnect="" fi [ "$pdptype" = "ip" -o "$pdptype" = "ipv4v6" ] && { cid_4=$(uqmi -s -d "$device" --get-client-id wds) if ! [ "$cid_4" -eq "$cid_4" ] 2> /dev/null; then echo "Unable to obtain client ID" proto_notify_error "$interface" NO_CID return 1 fi uqmi -s -d "$device" --set-client-id wds,"$cid_4" --set-ip-family ipv4 > /dev/null 2>&1 pdh_4=$(uqmi -s -d "$device" --set-client-id wds,"$cid_4" \ --start-network \ ${apn:+--apn $apn} \ ${profile:+--profile $profile} \ ${auth:+--auth-type $auth} \ ${username:+--username $username} \ ${password:+--password $password} \ ${autoconnect:+--autoconnect}) # pdh_4 is a numeric value on success if ! [ "$pdh_4" -eq "$pdh_4" ] 2> /dev/null; then echo "Unable to connect IPv4" uqmi -s -d "$device" --set-client-id wds,"$cid_4" --release-client-id wds > /dev/null 2>&1 proto_notify_error "$interface" CALL_FAILED return 1 fi # Check data connection state connstat=$(uqmi -s -d "$device" --get-data-status) [ "$connstat" == '"connected"' ] || { echo "No data link!" uqmi -s -d "$device" --set-client-id wds,"$cid_4" --release-client-id wds > /dev/null 2>&1 proto_notify_error "$interface" CALL_FAILED return 1 } } [ "$pdptype" = "ipv6" -o "$pdptype" = "ipv4v6" ] && { cid_6=$(uqmi -s -d "$device" --get-client-id wds) if ! [ "$cid_6" -eq "$cid_6" ] 2> /dev/null; then echo "Unable to obtain client ID" proto_notify_error "$interface" NO_CID return 1 fi uqmi -s -d "$device" --set-client-id wds,"$cid_6" --set-ip-family ipv6 > /dev/null 2>&1 pdh_6=$(uqmi -s -d "$device" --set-client-id wds,"$cid_6" \ --start-network \ ${apn:+--apn $apn} \ ${profile:+--profile $profile} \ ${auth:+--auth-type $auth} \ ${username:+--username $username} \ ${password:+--password $password} \ ${autoconnect:+--autoconnect}) # pdh_6 is a numeric value on success if ! [ "$pdh_6" -eq "$pdh_6" ] 2> /dev/null; then echo "Unable to connect IPv6" uqmi -s -d "$device" --set-client-id wds,"$cid_6" --release-client-id wds > /dev/null 2>&1 proto_notify_error "$interface" CALL_FAILED return 1 fi # Check data connection state connstat=$(uqmi -s -d "$device" --get-data-status) [ "$connstat" == '"connected"' ] || { echo "No data link!" uqmi -s -d "$device" --set-client-id wds,"$cid_6" --release-client-id wds > /dev/null 2>&1 proto_notify_error "$interface" CALL_FAILED return 1 } } echo "Setting up $ifname" proto_init_update "$ifname" 1 proto_set_keep 1 proto_add_data [ -n "$pdh_4" ] && { json_add_string "cid_4" "$cid_4" json_add_string "pdh_4" "$pdh_4" } [ -n "$pdh_6" ] && { json_add_string "cid_6" "$cid_6" json_add_string "pdh_6" "$pdh_6" } proto_close_data proto_send_update "$interface" local zone="$(fw3 -q network "$interface" 2>/dev/null)" [ -n "$pdh_6" ] && { if [ -z "$dhcpv6" -o "$dhcpv6" = 0 ]; then json_load "$(uqmi -s -d $device --set-client-id wds,$cid_6 --get-current-settings)" json_select ipv6 json_get_var ip_6 ip json_get_var gateway_6 gateway json_get_var dns1_6 dns1 json_get_var dns2_6 dns2 json_get_var ip_prefix_length ip-prefix-length proto_init_update "$ifname" 1 proto_set_keep 1 proto_add_ipv6_address "$ip_6" "128" proto_add_ipv6_prefix "${ip_6}/${ip_prefix_length}" proto_add_ipv6_route "$gateway_6" "128" [ "$defaultroute" = 0 ] || proto_add_ipv6_route "::0" 0 "$gateway_6" "" "" "${ip_6}/${ip_prefix_length}" [ "$peerdns" = 0 ] || { proto_add_dns_server "$dns1_6" proto_add_dns_server "$dns2_6" } [ -n "$zone" ] && { proto_add_data json_add_string zone "$zone" proto_close_data } proto_send_update "$interface" else json_init json_add_string name "${interface}_6" json_add_string ifname "@$interface" json_add_string proto "dhcpv6" [ -n "$ip6table" ] && json_add_string ip6table "$ip6table" proto_add_dynamic_defaults # RFC 7278: Extend an IPv6 /64 Prefix to LAN json_add_string extendprefix 1 [ -n "$zone" ] && json_add_string zone "$zone" json_close_object ubus call network add_dynamic "$(json_dump)" fi } [ -n "$pdh_4" ] && { json_init json_add_string name "${interface}_4" json_add_string ifname "@$interface" json_add_string proto "dhcp" [ -n "$ip4table" ] && json_add_string ip4table "$ip4table" proto_add_dynamic_defaults [ -n "$zone" ] && json_add_string zone "$zone" json_close_object ubus call network add_dynamic "$(json_dump)" } } qmi_wds_stop() { local cid="$1" local pdh="$2" [ -n "$cid" ] || return uqmi -s -d "$device" --set-client-id wds,"$cid" \ --stop-network 0xffffffff \ --autoconnect > /dev/null 2>&1 [ -n "$pdh" ] && { uqmi -s -d "$device" --set-client-id wds,"$cid" \ --stop-network "$pdh" > /dev/null 2>&1 } uqmi -s -d "$device" --set-client-id wds,"$cid" \ --release-client-id wds > /dev/null 2>&1 } proto_qmi_teardown() { local interface="$1" local device cid_4 pdh_4 cid_6 pdh_6 json_get_vars device [ -n "$ctl_device" ] && device=$ctl_device echo "Stopping network $interface" json_load "$(ubus call network.interface.$interface status)" json_select data json_get_vars cid_4 pdh_4 cid_6 pdh_6 qmi_wds_stop "$cid_4" "$pdh_4" qmi_wds_stop "$cid_6" "$pdh_6" proto_init_update "*" 0 proto_send_update "$interface" } [ -n "$INCLUDE_ONLY" ] || { add_protocol qmi }
chaojin/openwrt
package/network/utils/uqmi/files/lib/netifd/proto/qmi.sh
Shell
gpl-2.0
11,756
/* * linux/mm/page_alloc.c * * Manages the free list, the system allocates free pages here. * Note that kmalloc() lives in slab.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) */ #include <linux/stddef.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/interrupt.h> #include <linux/pagemap.h> #include <linux/jiffies.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/kmemcheck.h> #include <linux/module.h> #include <linux/suspend.h> #include <linux/pagevec.h> #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/oom.h> #include <linux/notifier.h> #include <linux/topology.h> #include <linux/sysctl.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/memory_hotplug.h> #include <linux/nodemask.h> #include <linux/vmalloc.h> #include <linux/mempolicy.h> #include <linux/stop_machine.h> #include <linux/sort.h> #include <linux/pfn.h> #include <linux/backing-dev.h> #include <linux/fault-inject.h> #include <linux/page-isolation.h> #include <linux/page_cgroup.h> #include <linux/debugobjects.h> #include <linux/kmemleak.h> #include <linux/memory.h> #include <linux/compaction.h> #include <trace/events/kmem.h> #include <linux/ftrace_event.h> #include <linux/memcontrol.h> #include <asm/tlbflush.h> #include <asm/div64.h> #include "internal.h" #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID DEFINE_PER_CPU(int, numa_node); EXPORT_PER_CPU_SYMBOL(numa_node); #endif #ifdef CONFIG_HAVE_MEMORYLESS_NODES /* * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() * defined in <linux/topology.h>. */ DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ EXPORT_PER_CPU_SYMBOL(_numa_mem_); #endif /* * Array of node states. */ nodemask_t node_states[NR_NODE_STATES] __read_mostly = { [N_POSSIBLE] = NODE_MASK_ALL, [N_ONLINE] = { { [0] = 1UL } }, #ifndef CONFIG_NUMA [N_NORMAL_MEMORY] = { { [0] = 1UL } }, #ifdef CONFIG_HIGHMEM [N_HIGH_MEMORY] = { { [0] = 1UL } }, #endif [N_CPU] = { { [0] = 1UL } }, #endif /* NUMA */ }; EXPORT_SYMBOL(node_states); unsigned long totalram_pages __read_mostly; unsigned long totalreserve_pages __read_mostly; int percpu_pagelist_fraction; gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; #ifdef CONFIG_PM_SLEEP /* * The following functions are used by the suspend/hibernate code to temporarily * change gfp_allowed_mask in order to avoid using I/O during memory allocations * while devices are suspended. To avoid races with the suspend/hibernate code, * they should always be called with pm_mutex held (gfp_allowed_mask also should * only be modified with pm_mutex held, unless the suspend/hibernate code is * guaranteed not to run in parallel with that modification). */ static gfp_t saved_gfp_mask; void pm_restore_gfp_mask(void) { WARN_ON(!mutex_is_locked(&pm_mutex)); if (saved_gfp_mask) { gfp_allowed_mask = saved_gfp_mask; saved_gfp_mask = 0; } } void pm_restrict_gfp_mask(void) { WARN_ON(!mutex_is_locked(&pm_mutex)); WARN_ON(saved_gfp_mask); saved_gfp_mask = gfp_allowed_mask; gfp_allowed_mask &= ~GFP_IOFS; } #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE int pageblock_order __read_mostly; #endif static void __free_pages_ok(struct page *page, unsigned int order); /* * results with 256, 32 in the lowmem_reserve sysctl: * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) * 1G machine -> (16M dma, 784M normal, 224M high) * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA * * TBD: should special case ZONE_DMA32 machines here - in those we normally * don't need any ZONE_NORMAL reservation */ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { #ifdef CONFIG_ZONE_DMA 256, #endif #ifdef CONFIG_ZONE_DMA32 256, #endif #ifdef CONFIG_HIGHMEM 32, #endif 32, }; EXPORT_SYMBOL(totalram_pages); static char * const zone_names[MAX_NR_ZONES] = { #ifdef CONFIG_ZONE_DMA "DMA", #endif #ifdef CONFIG_ZONE_DMA32 "DMA32", #endif "Normal", #ifdef CONFIG_HIGHMEM "HighMem", #endif "Movable", }; int min_free_kbytes = 1024; static unsigned long __meminitdata nr_kernel_pages; static unsigned long __meminitdata nr_all_pages; static unsigned long __meminitdata dma_reserve; #ifdef CONFIG_ARCH_POPULATES_NODE_MAP /* * MAX_ACTIVE_REGIONS determines the maximum number of distinct * ranges of memory (RAM) that may be registered with add_active_range(). * Ranges passed to add_active_range() will be merged if possible * so the number of times add_active_range() can be called is * related to the number of nodes and the number of holes */ #ifdef CONFIG_MAX_ACTIVE_REGIONS /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */ #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS #else #if MAX_NUMNODES >= 32 /* If there can be many nodes, allow up to 50 holes per node */ #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50) #else /* By default, allow up to 256 distinct regions */ #define MAX_ACTIVE_REGIONS 256 #endif #endif static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; static int __meminitdata nr_nodemap_entries; static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; static unsigned long __initdata required_kernelcore; static unsigned long __initdata required_movablecore; static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ int movable_zone; EXPORT_SYMBOL(movable_zone); #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ #if MAX_NUMNODES > 1 int nr_node_ids __read_mostly = MAX_NUMNODES; int nr_online_nodes __read_mostly = 1; EXPORT_SYMBOL(nr_node_ids); EXPORT_SYMBOL(nr_online_nodes); #endif int page_group_by_mobility_disabled __read_mostly; static void set_pageblock_migratetype(struct page *page, int migratetype) { if (unlikely(page_group_by_mobility_disabled)) migratetype = MIGRATE_UNMOVABLE; set_pageblock_flags_group(page, (unsigned long)migratetype, PB_migrate, PB_migrate_end); } bool oom_killer_disabled __read_mostly; #ifdef CONFIG_DEBUG_VM static int page_outside_zone_boundaries(struct zone *zone, struct page *page) { int ret = 0; unsigned seq; unsigned long pfn = page_to_pfn(page); do { seq = zone_span_seqbegin(zone); if (pfn >= zone->zone_start_pfn + zone->spanned_pages) ret = 1; else if (pfn < zone->zone_start_pfn) ret = 1; } while (zone_span_seqretry(zone, seq)); return ret; } static int page_is_consistent(struct zone *zone, struct page *page) { if (!pfn_valid_within(page_to_pfn(page))) return 0; if (zone != page_zone(page)) return 0; return 1; } /* * Temporary debugging check for pages not lying within a given zone. */ static int bad_range(struct zone *zone, struct page *page) { if (page_outside_zone_boundaries(zone, page)) return 1; if (!page_is_consistent(zone, page)) return 1; return 0; } #else static inline int bad_range(struct zone *zone, struct page *page) { return 0; } #endif static void bad_page(struct page *page) { static unsigned long resume; static unsigned long nr_shown; static unsigned long nr_unshown; /* Don't complain about poisoned pages */ if (PageHWPoison(page)) { reset_page_mapcount(page); /* remove PageBuddy */ return; } /* * Allow a burst of 60 reports, then keep quiet for that minute; * or allow a steady drip of one report per second. */ if (nr_shown == 60) { if (time_before(jiffies, resume)) { nr_unshown++; goto out; } if (nr_unshown) { printk(KERN_ALERT "BUG: Bad page state: %lu messages suppressed\n", nr_unshown); nr_unshown = 0; } nr_shown = 0; } if (nr_shown++ == 0) resume = jiffies + 60 * HZ; printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", current->comm, page_to_pfn(page)); dump_page(page); dump_stack(); out: /* Leave bad fields for debug, except PageBuddy could make trouble */ reset_page_mapcount(page); /* remove PageBuddy */ add_taint(TAINT_BAD_PAGE); } /* * Higher-order pages are called "compound pages". They are structured thusly: * * The first PAGE_SIZE page is called the "head page". * * The remaining PAGE_SIZE pages are called "tail pages". * * All pages have PG_compound set. All pages have their ->private pointing at * the head page (even the head page has this). * * The first tail page's ->lru.next holds the address of the compound page's * put_page() function. Its ->lru.prev holds the order of allocation. * This usage means that zero-order pages may not be compound. */ static void free_compound_page(struct page *page) { __free_pages_ok(page, compound_order(page)); } void prep_compound_page(struct page *page, unsigned long order) { int i; int nr_pages = 1 << order; set_compound_page_dtor(page, free_compound_page); set_compound_order(page, order); __SetPageHead(page); for (i = 1; i < nr_pages; i++) { struct page *p = page + i; __SetPageTail(p); p->first_page = page; } } /* update __split_huge_page_refcount if you change this function */ static int destroy_compound_page(struct page *page, unsigned long order) { int i; int nr_pages = 1 << order; int bad = 0; if (unlikely(compound_order(page) != order) || unlikely(!PageHead(page))) { bad_page(page); bad++; } __ClearPageHead(page); for (i = 1; i < nr_pages; i++) { struct page *p = page + i; if (unlikely(!PageTail(p) || (p->first_page != page))) { bad_page(page); bad++; } __ClearPageTail(p); } return bad; } static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) { int i; /* * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO * and __GFP_HIGHMEM from hard or soft interrupt context. */ VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); for (i = 0; i < (1 << order); i++) clear_highpage(page + i); } static inline void set_page_order(struct page *page, int order) { set_page_private(page, order); __SetPageBuddy(page); } static inline void rmv_page_order(struct page *page) { __ClearPageBuddy(page); set_page_private(page, 0); } /* * Locate the struct page for both the matching buddy in our * pair (buddy1) and the combined O(n+1) page they form (page). * * 1) Any buddy B1 will have an order O twin B2 which satisfies * the following equation: * B2 = B1 ^ (1 << O) * For example, if the starting buddy (buddy2) is #8 its order * 1 buddy is #10: * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 * * 2) Any buddy B will have an order O+1 parent P which * satisfies the following equation: * P = B & ~(1 << O) * * Assumption: *_mem_map is contiguous at least up to MAX_ORDER */ static inline unsigned long __find_buddy_index(unsigned long page_idx, unsigned int order) { return page_idx ^ (1 << order); } /* * This function checks whether a page is free && is the buddy * we can do coalesce a page and its buddy if * (a) the buddy is not in a hole && * (b) the buddy is in the buddy system && * (c) a page and its buddy have the same order && * (d) a page and its buddy are in the same zone. * * For recording whether a page is in the buddy system, we set ->_mapcount -2. * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock. * * For recording page's order, we use page_private(page). */ static inline int page_is_buddy(struct page *page, struct page *buddy, int order) { if (!pfn_valid_within(page_to_pfn(buddy))) return 0; if (page_zone_id(page) != page_zone_id(buddy)) return 0; if (PageBuddy(buddy) && page_order(buddy) == order) { VM_BUG_ON(page_count(buddy) != 0); return 1; } return 0; } /* * Freeing function for a buddy system allocator. * * The concept of a buddy system is to maintain direct-mapped table * (containing bit values) for memory blocks of various "orders". * The bottom level table contains the map for the smallest allocatable * units of memory (here, pages), and each level above it describes * pairs of units from the levels below, hence, "buddies". * At a high level, all that happens here is marking the table entry * at the bottom level available, and propagating the changes upward * as necessary, plus some accounting needed to play nicely with other * parts of the VM system. * At each level, we keep a list of pages, which are heads of continuous * free pages of length of (1 << order) and marked with _mapcount -2. Page's * order is recorded in page_private(page) field. * So when we are allocating or freeing one, we can derive the state of the * other. That is, if we allocate a small block, and both were * free, the remainder of the region must be split into blocks. * If a block is freed, and its buddy is also free, then this * triggers coalescing into a block of larger size. * * -- wli */ static inline void __free_one_page(struct page *page, struct zone *zone, unsigned int order, int migratetype) { unsigned long page_idx; unsigned long combined_idx; unsigned long uninitialized_var(buddy_idx); struct page *buddy; if (unlikely(PageCompound(page))) if (unlikely(destroy_compound_page(page, order))) return; VM_BUG_ON(migratetype == -1); page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); VM_BUG_ON(page_idx & ((1 << order) - 1)); VM_BUG_ON(bad_range(zone, page)); while (order < MAX_ORDER-1) { buddy_idx = __find_buddy_index(page_idx, order); buddy = page + (buddy_idx - page_idx); if (!page_is_buddy(page, buddy, order)) break; /* Our buddy is free, merge with it and move up one order. */ list_del(&buddy->lru); zone->free_area[order].nr_free--; rmv_page_order(buddy); combined_idx = buddy_idx & page_idx; page = page + (combined_idx - page_idx); page_idx = combined_idx; order++; } set_page_order(page, order); /* * If this is not the largest possible page, check if the buddy * of the next-highest order is free. If it is, it's possible * that pages are being freed that will coalesce soon. In case, * that is happening, add the free page to the tail of the list * so it's less likely to be used soon and more likely to be merged * as a higher order page */ if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { struct page *higher_page, *higher_buddy; combined_idx = buddy_idx & page_idx; higher_page = page + (combined_idx - page_idx); buddy_idx = __find_buddy_index(combined_idx, order + 1); higher_buddy = page + (buddy_idx - combined_idx); if (page_is_buddy(higher_page, higher_buddy, order + 1)) { list_add_tail(&page->lru, &zone->free_area[order].free_list[migratetype]); goto out; } } list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); out: zone->free_area[order].nr_free++; } /* * free_page_mlock() -- clean up attempts to free and mlocked() page. * Page should not be on lru, so no need to fix that up. * free_pages_check() will verify... */ static inline void free_page_mlock(struct page *page) { __dec_zone_page_state(page, NR_MLOCK); __count_vm_event(UNEVICTABLE_MLOCKFREED); } static inline int free_pages_check(struct page *page) { if (unlikely(page_mapcount(page) | (page->mapping != NULL) | (atomic_read(&page->_count) != 0) | (page->flags & PAGE_FLAGS_CHECK_AT_FREE) | (mem_cgroup_bad_page_check(page)))) { bad_page(page); return 1; } if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; return 0; } /* * Frees a number of pages from the PCP lists * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free. * * If the zone was previously in an "all pages pinned" state then look to * see if this freeing clears that state. * * And clear the zone's pages_scanned counter, to hold off the "all pages are * pinned" detection logic. */ static void free_pcppages_bulk(struct zone *zone, int count, struct per_cpu_pages *pcp) { int migratetype = 0; int batch_free = 0; int to_free = count; spin_lock(&zone->lock); zone->all_unreclaimable = 0; zone->pages_scanned = 0; while (to_free) { struct page *page; struct list_head *list; /* * Remove pages from lists in a round-robin fashion. A * batch_free count is maintained that is incremented when an * empty list is encountered. This is so more pages are freed * off fuller lists instead of spinning excessively around empty * lists */ do { batch_free++; if (++migratetype == MIGRATE_PCPTYPES) migratetype = 0; list = &pcp->lists[migratetype]; } while (list_empty(list)); /* This is the only non-empty list. Free them all. */ if (batch_free == MIGRATE_PCPTYPES) batch_free = to_free; do { page = list_entry(list->prev, struct page, lru); /* must delete as __free_one_page list manipulates */ list_del(&page->lru); /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ __free_one_page(page, zone, 0, page_private(page)); trace_mm_page_pcpu_drain(page, 0, page_private(page)); } while (--to_free && --batch_free && !list_empty(list)); } __mod_zone_page_state(zone, NR_FREE_PAGES, count); spin_unlock(&zone->lock); } static void free_one_page(struct zone *zone, struct page *page, int order, int migratetype) { spin_lock(&zone->lock); zone->all_unreclaimable = 0; zone->pages_scanned = 0; __free_one_page(page, zone, order, migratetype); __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); spin_unlock(&zone->lock); } static bool free_pages_prepare(struct page *page, unsigned int order) { int i; int bad = 0; trace_mm_page_free_direct(page, order); kmemcheck_free_shadow(page, order); if (PageAnon(page)) page->mapping = NULL; for (i = 0; i < (1 << order); i++) bad += free_pages_check(page + i); if (bad) return false; if (!PageHighMem(page)) { debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); debug_check_no_obj_freed(page_address(page), PAGE_SIZE << order); } arch_free_page(page, order); kernel_map_pages(page, 1 << order, 0); return true; } static void __free_pages_ok(struct page *page, unsigned int order) { unsigned long flags; int wasMlocked = __TestClearPageMlocked(page); if (!free_pages_prepare(page, order)) return; local_irq_save(flags); if (unlikely(wasMlocked)) free_page_mlock(page); __count_vm_events(PGFREE, 1 << order); free_one_page(page_zone(page), page, order, get_pageblock_migratetype(page)); local_irq_restore(flags); } /* * permit the bootmem allocator to evade page validation on high-order frees */ void __meminit __free_pages_bootmem(struct page *page, unsigned int order) { if (order == 0) { __ClearPageReserved(page); set_page_count(page, 0); set_page_refcounted(page); __free_page(page); } else { int loop; prefetchw(page); for (loop = 0; loop < BITS_PER_LONG; loop++) { struct page *p = &page[loop]; if (loop + 1 < BITS_PER_LONG) prefetchw(p + 1); __ClearPageReserved(p); set_page_count(p, 0); } set_page_refcounted(page); __free_pages(page, order); } } /* * The order of subdivision here is critical for the IO subsystem. * Please do not alter this order without good reasons and regression * testing. Specifically, as large blocks of memory are subdivided, * the order in which smaller blocks are delivered depends on the order * they're subdivided in this function. This is the primary factor * influencing the order in which pages are delivered to the IO * subsystem according to empirical testing, and this is also justified * by considering the behavior of a buddy system containing a single * large block of memory acted on by a series of small allocations. * This behavior is a critical factor in sglist merging's success. * * -- wli */ static inline void expand(struct zone *zone, struct page *page, int low, int high, struct free_area *area, int migratetype) { unsigned long size = 1 << high; while (high > low) { area--; high--; size >>= 1; VM_BUG_ON(bad_range(zone, &page[size])); list_add(&page[size].lru, &area->free_list[migratetype]); area->nr_free++; set_page_order(&page[size], high); } } /* * This page is about to be returned from the page allocator */ static inline int check_new_page(struct page *page) { if (unlikely(page_mapcount(page) | (page->mapping != NULL) | (atomic_read(&page->_count) != 0) | (page->flags & PAGE_FLAGS_CHECK_AT_PREP) | (mem_cgroup_bad_page_check(page)))) { bad_page(page); return 1; } return 0; } static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) { int i; for (i = 0; i < (1 << order); i++) { struct page *p = page + i; if (unlikely(check_new_page(p))) return 1; } set_page_private(page, 0); set_page_refcounted(page); arch_alloc_page(page, order); kernel_map_pages(page, 1 << order, 1); if (gfp_flags & __GFP_ZERO) prep_zero_page(page, order, gfp_flags); if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); return 0; } /* * Go through the free lists for the given migratetype and remove * the smallest available page from the freelists */ static inline struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, int migratetype) { unsigned int current_order; struct free_area * area; struct page *page; /* Find a page of the appropriate size in the preferred list */ for (current_order = order; current_order < MAX_ORDER; ++current_order) { area = &(zone->free_area[current_order]); if (list_empty(&area->free_list[migratetype])) continue; page = list_entry(area->free_list[migratetype].next, struct page, lru); list_del(&page->lru); rmv_page_order(page); area->nr_free--; expand(zone, page, order, current_order, area, migratetype); return page; } return NULL; } /* * This array describes the order lists are fallen back to when * the free lists for the desirable migrate type are depleted */ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */ }; /* * Move the free pages in a range to the free lists of the requested type. * Note that start_page and end_pages are not aligned on a pageblock * boundary. If alignment is required, use move_freepages_block() */ static int move_freepages(struct zone *zone, struct page *start_page, struct page *end_page, int migratetype) { struct page *page; unsigned long order; int pages_moved = 0; #ifndef CONFIG_HOLES_IN_ZONE /* * page_zone is not safe to call in this context when * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant * anyway as we check zone boundaries in move_freepages_block(). * Remove at a later date when no bug reports exist related to * grouping pages by mobility */ BUG_ON(page_zone(start_page) != page_zone(end_page)); #endif for (page = start_page; page <= end_page;) { /* Make sure we are not inadvertently changing nodes */ VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone)); if (!pfn_valid_within(page_to_pfn(page))) { page++; continue; } if (!PageBuddy(page)) { page++; continue; } order = page_order(page); list_move(&page->lru, &zone->free_area[order].free_list[migratetype]); page += 1 << order; pages_moved += 1 << order; } return pages_moved; } static int move_freepages_block(struct zone *zone, struct page *page, int migratetype) { unsigned long start_pfn, end_pfn; struct page *start_page, *end_page; start_pfn = page_to_pfn(page); start_pfn = start_pfn & ~(pageblock_nr_pages-1); start_page = pfn_to_page(start_pfn); end_page = start_page + pageblock_nr_pages - 1; end_pfn = start_pfn + pageblock_nr_pages - 1; /* Do not cross zone boundaries */ if (start_pfn < zone->zone_start_pfn) start_page = page; if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages) return 0; return move_freepages(zone, start_page, end_page, migratetype); } static void change_pageblock_range(struct page *pageblock_page, int start_order, int migratetype) { int nr_pageblocks = 1 << (start_order - pageblock_order); while (nr_pageblocks--) { set_pageblock_migratetype(pageblock_page, migratetype); pageblock_page += pageblock_nr_pages; } } /* Remove an element from the buddy allocator from the fallback list */ static inline struct page * __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) { struct free_area * area; int current_order; struct page *page; int migratetype, i; /* Find the largest possible block of pages in the other list */ for (current_order = MAX_ORDER-1; current_order >= order; --current_order) { for (i = 0; i < MIGRATE_TYPES - 1; i++) { migratetype = fallbacks[start_migratetype][i]; /* MIGRATE_RESERVE handled later if necessary */ if (migratetype == MIGRATE_RESERVE) continue; area = &(zone->free_area[current_order]); if (list_empty(&area->free_list[migratetype])) continue; page = list_entry(area->free_list[migratetype].next, struct page, lru); area->nr_free--; /* * If breaking a large block of pages, move all free * pages to the preferred allocation list. If falling * back for a reclaimable kernel allocation, be more * aggressive about taking ownership of free pages */ if (unlikely(current_order >= (pageblock_order >> 1)) || start_migratetype == MIGRATE_RECLAIMABLE || page_group_by_mobility_disabled) { unsigned long pages; pages = move_freepages_block(zone, page, start_migratetype); /* Claim the whole block if over half of it is free */ if (pages >= (1 << (pageblock_order-1)) || page_group_by_mobility_disabled) set_pageblock_migratetype(page, start_migratetype); migratetype = start_migratetype; } /* Remove the page from the freelists */ list_del(&page->lru); rmv_page_order(page); /* Take ownership for orders >= pageblock_order */ if (current_order >= pageblock_order) change_pageblock_range(page, current_order, start_migratetype); expand(zone, page, order, current_order, area, migratetype); trace_mm_page_alloc_extfrag(page, order, current_order, start_migratetype, migratetype); return page; } } return NULL; } /* * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. */ static struct page *__rmqueue(struct zone *zone, unsigned int order, int migratetype) { struct page *page; retry_reserve: page = __rmqueue_smallest(zone, order, migratetype); if (unlikely(!page) && migratetype != MIGRATE_RESERVE) { page = __rmqueue_fallback(zone, order, migratetype); /* * Use MIGRATE_RESERVE rather than fail an allocation. goto * is used because __rmqueue_smallest is an inline function * and we want just one call site */ if (!page) { migratetype = MIGRATE_RESERVE; goto retry_reserve; } } trace_mm_page_alloc_zone_locked(page, order, migratetype); return page; } /* * Obtain a specified number of elements from the buddy allocator, all under * a single hold of the lock, for efficiency. Add them to the supplied list. * Returns the number of new pages which were placed at *list. */ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, int migratetype, int cold) { int i; spin_lock(&zone->lock); for (i = 0; i < count; ++i) { struct page *page = __rmqueue(zone, order, migratetype); if (unlikely(page == NULL)) break; /* * Split buddy pages returned by expand() are received here * in physical page order. The page is added to the callers and * list and the list head then moves forward. From the callers * perspective, the linked list is ordered by page number in * some conditions. This is useful for IO devices that can * merge IO requests if the physical pages are ordered * properly. */ if (likely(cold == 0)) list_add(&page->lru, list); else list_add_tail(&page->lru, list); set_page_private(page, migratetype); list = &page->lru; } __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); spin_unlock(&zone->lock); return i; } #ifdef CONFIG_NUMA /* * Called from the vmstat counter updater to drain pagesets of this * currently executing processor on remote nodes after they have * expired. * * Note that this function must be called with the thread pinned to * a single processor. */ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { unsigned long flags; int to_drain; local_irq_save(flags); if (pcp->count >= pcp->batch) to_drain = pcp->batch; else to_drain = pcp->count; free_pcppages_bulk(zone, to_drain, pcp); pcp->count -= to_drain; local_irq_restore(flags); } #endif /* * Drain pages of the indicated processor. * * The processor must either be the current processor and the * thread pinned to the current processor or a processor that * is not online. */ static void drain_pages(unsigned int cpu) { unsigned long flags; struct zone *zone; for_each_populated_zone(zone) { struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; local_irq_save(flags); pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; if (pcp->count) { free_pcppages_bulk(zone, pcp->count, pcp); pcp->count = 0; } local_irq_restore(flags); } } /* * Spill all of this CPU's per-cpu pages back into the buddy allocator. */ void drain_local_pages(void *arg) { drain_pages(smp_processor_id()); } /* * Spill all the per-cpu pages from all CPUs back into the buddy allocator */ void drain_all_pages(void) { on_each_cpu(drain_local_pages, NULL, 1); } #ifdef CONFIG_HIBERNATION void mark_free_pages(struct zone *zone) { unsigned long pfn, max_zone_pfn; unsigned long flags; int order, t; struct list_head *curr; if (!zone->spanned_pages) return; spin_lock_irqsave(&zone->lock, flags); max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) if (pfn_valid(pfn)) { struct page *page = pfn_to_page(pfn); if (!swsusp_page_is_forbidden(page)) swsusp_unset_page_free(page); } for_each_migratetype_order(order, t) { list_for_each(curr, &zone->free_area[order].free_list[t]) { unsigned long i; pfn = page_to_pfn(list_entry(curr, struct page, lru)); for (i = 0; i < (1UL << order); i++) swsusp_set_page_free(pfn_to_page(pfn + i)); } } spin_unlock_irqrestore(&zone->lock, flags); } #endif /* CONFIG_PM */ /* * Free a 0-order page * cold == 1 ? free a cold page : free a hot page */ void free_hot_cold_page(struct page *page, int cold) { struct zone *zone = page_zone(page); struct per_cpu_pages *pcp; unsigned long flags; int migratetype; int wasMlocked = __TestClearPageMlocked(page); if (!free_pages_prepare(page, 0)) return; migratetype = get_pageblock_migratetype(page); set_page_private(page, migratetype); local_irq_save(flags); if (unlikely(wasMlocked)) free_page_mlock(page); __count_vm_event(PGFREE); /* * We only track unmovable, reclaimable and movable on pcp lists. * Free ISOLATE pages back to the allocator because they are being * offlined but treat RESERVE as movable pages so we can get those * areas back if necessary. Otherwise, we may have to free * excessively into the page allocator */ if (migratetype >= MIGRATE_PCPTYPES) { if (unlikely(migratetype == MIGRATE_ISOLATE)) { free_one_page(zone, page, 0, migratetype); goto out; } migratetype = MIGRATE_MOVABLE; } pcp = &this_cpu_ptr(zone->pageset)->pcp; if (cold) list_add_tail(&page->lru, &pcp->lists[migratetype]); else list_add(&page->lru, &pcp->lists[migratetype]); pcp->count++; if (pcp->count >= pcp->high) { free_pcppages_bulk(zone, pcp->batch, pcp); pcp->count -= pcp->batch; } out: local_irq_restore(flags); } /* * split_page takes a non-compound higher-order page, and splits it into * n (1<<order) sub-pages: page[0..n] * Each sub-page must be freed individually. * * Note: this is probably too low level an operation for use in drivers. * Please consult with lkml before using this in your driver. */ void split_page(struct page *page, unsigned int order) { int i; VM_BUG_ON(PageCompound(page)); VM_BUG_ON(!page_count(page)); #ifdef CONFIG_KMEMCHECK /* * Split shadow pages too, because free(page[0]) would * otherwise free the whole shadow. */ if (kmemcheck_page_is_tracked(page)) split_page(virt_to_page(page[0].shadow), order); #endif for (i = 1; i < (1 << order); i++) set_page_refcounted(page + i); } /* * Similar to split_page except the page is already free. As this is only * being used for migration, the migratetype of the block also changes. * As this is called with interrupts disabled, the caller is responsible * for calling arch_alloc_page() and kernel_map_page() after interrupts * are enabled. * * Note: this is probably too low level an operation for use in drivers. * Please consult with lkml before using this in your driver. */ int split_free_page(struct page *page) { unsigned int order; unsigned long watermark; struct zone *zone; BUG_ON(!PageBuddy(page)); zone = page_zone(page); order = page_order(page); /* Obey watermarks as if the page was being allocated */ watermark = low_wmark_pages(zone) + (1 << order); if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) return 0; /* Remove page from free list */ list_del(&page->lru); zone->free_area[order].nr_free--; rmv_page_order(page); __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); /* Split into individual pages */ set_page_refcounted(page); split_page(page, order); if (order >= pageblock_order - 1) { struct page *endpage = page + (1 << order) - 1; for (; page < endpage; page += pageblock_nr_pages) set_pageblock_migratetype(page, MIGRATE_MOVABLE); } return 1 << order; } /* * Really, prep_compound_page() should be called from __rmqueue_bulk(). But * we cheat by calling it from here, in the order > 0 path. Saves a branch * or two. */ static inline struct page *buffered_rmqueue(struct zone *preferred_zone, struct zone *zone, int order, gfp_t gfp_flags, int migratetype) { unsigned long flags; struct page *page; int cold = !!(gfp_flags & __GFP_COLD); again: if (likely(order == 0)) { struct per_cpu_pages *pcp; struct list_head *list; local_irq_save(flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; if (list_empty(list)) { pcp->count += rmqueue_bulk(zone, 0, pcp->batch, list, migratetype, cold); if (unlikely(list_empty(list))) goto failed; } if (cold) page = list_entry(list->prev, struct page, lru); else page = list_entry(list->next, struct page, lru); list_del(&page->lru); pcp->count--; } else { if (unlikely(gfp_flags & __GFP_NOFAIL)) { /* * __GFP_NOFAIL is not to be used in new code. * * All __GFP_NOFAIL callers should be fixed so that they * properly detect and handle allocation failures. * * We most definitely don't want callers attempting to * allocate greater than order-1 page units with * __GFP_NOFAIL. */ WARN_ON_ONCE(order > 1); } spin_lock_irqsave(&zone->lock, flags); page = __rmqueue(zone, order, migratetype); spin_unlock(&zone->lock); if (!page) goto failed; __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); } __count_zone_vm_events(PGALLOC, zone, 1 << order); zone_statistics(preferred_zone, zone, gfp_flags); local_irq_restore(flags); VM_BUG_ON(bad_range(zone, page)); if (prep_new_page(page, order, gfp_flags)) goto again; return page; failed: local_irq_restore(flags); return NULL; } /* The ALLOC_WMARK bits are used as an index to zone->watermark */ #define ALLOC_WMARK_MIN WMARK_MIN #define ALLOC_WMARK_LOW WMARK_LOW #define ALLOC_WMARK_HIGH WMARK_HIGH #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ /* Mask to get the watermark bits */ #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) #define ALLOC_HARDER 0x10 /* try to alloc harder */ #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ #ifdef CONFIG_FAIL_PAGE_ALLOC static struct fail_page_alloc_attr { struct fault_attr attr; u32 ignore_gfp_highmem; u32 ignore_gfp_wait; u32 min_order; #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS struct dentry *ignore_gfp_highmem_file; struct dentry *ignore_gfp_wait_file; struct dentry *min_order_file; #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ } fail_page_alloc = { .attr = FAULT_ATTR_INITIALIZER, .ignore_gfp_wait = 1, .ignore_gfp_highmem = 1, .min_order = 1, }; static int __init setup_fail_page_alloc(char *str) { return setup_fault_attr(&fail_page_alloc.attr, str); } __setup("fail_page_alloc=", setup_fail_page_alloc); static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) { if (order < fail_page_alloc.min_order) return 0; if (gfp_mask & __GFP_NOFAIL) return 0; if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) return 0; if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) return 0; return should_fail(&fail_page_alloc.attr, 1 << order); } #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS static int __init fail_page_alloc_debugfs(void) { mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; struct dentry *dir; int err; err = init_fault_attr_dentries(&fail_page_alloc.attr, "fail_page_alloc"); if (err) return err; dir = fail_page_alloc.attr.dentries.dir; fail_page_alloc.ignore_gfp_wait_file = debugfs_create_bool("ignore-gfp-wait", mode, dir, &fail_page_alloc.ignore_gfp_wait); fail_page_alloc.ignore_gfp_highmem_file = debugfs_create_bool("ignore-gfp-highmem", mode, dir, &fail_page_alloc.ignore_gfp_highmem); fail_page_alloc.min_order_file = debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); if (!fail_page_alloc.ignore_gfp_wait_file || !fail_page_alloc.ignore_gfp_highmem_file || !fail_page_alloc.min_order_file) { err = -ENOMEM; debugfs_remove(fail_page_alloc.ignore_gfp_wait_file); debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file); debugfs_remove(fail_page_alloc.min_order_file); cleanup_fault_attr_dentries(&fail_page_alloc.attr); } return err; } late_initcall(fail_page_alloc_debugfs); #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ #else /* CONFIG_FAIL_PAGE_ALLOC */ static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) { return 0; } #endif /* CONFIG_FAIL_PAGE_ALLOC */ /* * Return true if free pages are above 'mark'. This takes into account the order * of the allocation. */ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags, long free_pages) { /* free_pages my go negative - that's OK */ long min = mark; int o; free_pages -= (1 << order) + 1; if (alloc_flags & ALLOC_HIGH) min -= min / 2; if (alloc_flags & ALLOC_HARDER) min -= min / 4; if (free_pages <= min + z->lowmem_reserve[classzone_idx]) return false; for (o = 0; o < order; o++) { /* At the next order, this order's pages become unavailable */ free_pages -= z->free_area[o].nr_free << o; /* Require fewer higher order pages to be free */ min >>= 1; if (free_pages <= min) return false; } return true; } bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags) { return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, zone_page_state(z, NR_FREE_PAGES)); } bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags) { long free_pages = zone_page_state(z, NR_FREE_PAGES); if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, free_pages); } #ifdef CONFIG_NUMA /* * zlc_setup - Setup for "zonelist cache". Uses cached zone data to * skip over zones that are not allowed by the cpuset, or that have * been recently (in last second) found to be nearly full. See further * comments in mmzone.h. Reduces cache footprint of zonelist scans * that have to skip over a lot of full or unallowed zones. * * If the zonelist cache is present in the passed in zonelist, then * returns a pointer to the allowed node mask (either the current * tasks mems_allowed, or node_states[N_HIGH_MEMORY].) * * If the zonelist cache is not available for this zonelist, does * nothing and returns NULL. * * If the fullzones BITMAP in the zonelist cache is stale (more than * a second since last zap'd) then we zap it out (clear its bits.) * * We hold off even calling zlc_setup, until after we've checked the * first zone in the zonelist, on the theory that most allocations will * be satisfied from that first zone, so best to examine that zone as * quickly as we can. */ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) { struct zonelist_cache *zlc; /* cached zonelist speedup info */ nodemask_t *allowednodes; /* zonelist_cache approximation */ zlc = zonelist->zlcache_ptr; if (!zlc) return NULL; if (time_after(jiffies, zlc->last_full_zap + HZ)) { bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); zlc->last_full_zap = jiffies; } allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? &cpuset_current_mems_allowed : &node_states[N_HIGH_MEMORY]; return allowednodes; } /* * Given 'z' scanning a zonelist, run a couple of quick checks to see * if it is worth looking at further for free memory: * 1) Check that the zone isn't thought to be full (doesn't have its * bit set in the zonelist_cache fullzones BITMAP). * 2) Check that the zones node (obtained from the zonelist_cache * z_to_n[] mapping) is allowed in the passed in allowednodes mask. * Return true (non-zero) if zone is worth looking at further, or * else return false (zero) if it is not. * * This check -ignores- the distinction between various watermarks, * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is * found to be full for any variation of these watermarks, it will * be considered full for up to one second by all requests, unless * we are so low on memory on all allowed nodes that we are forced * into the second scan of the zonelist. * * In the second scan we ignore this zonelist cache and exactly * apply the watermarks to all zones, even it is slower to do so. * We are low on memory in the second scan, and should leave no stone * unturned looking for a free page. */ static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, nodemask_t *allowednodes) { struct zonelist_cache *zlc; /* cached zonelist speedup info */ int i; /* index of *z in zonelist zones */ int n; /* node that zone *z is on */ zlc = zonelist->zlcache_ptr; if (!zlc) return 1; i = z - zonelist->_zonerefs; n = zlc->z_to_n[i]; /* This zone is worth trying if it is allowed but not full */ return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones); } /* * Given 'z' scanning a zonelist, set the corresponding bit in * zlc->fullzones, so that subsequent attempts to allocate a page * from that zone don't waste time re-examining it. */ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) { struct zonelist_cache *zlc; /* cached zonelist speedup info */ int i; /* index of *z in zonelist zones */ zlc = zonelist->zlcache_ptr; if (!zlc) return; i = z - zonelist->_zonerefs; set_bit(i, zlc->fullzones); } #else /* CONFIG_NUMA */ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) { return NULL; } static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, nodemask_t *allowednodes) { return 1; } static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) { } #endif /* CONFIG_NUMA */ /* * get_page_from_freelist goes through the zonelist trying to allocate * a page. */ static struct page * get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, struct zonelist *zonelist, int high_zoneidx, int alloc_flags, struct zone *preferred_zone, int migratetype) { struct zoneref *z; struct page *page = NULL; int classzone_idx; struct zone *zone; nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ int zlc_active = 0; /* set if using zonelist_cache */ int did_zlc_setup = 0; /* just call zlc_setup() one time */ classzone_idx = zone_idx(preferred_zone); zonelist_scan: /* * Scan zonelist, looking for a zone with enough free. * See also cpuset_zone_allowed() comment in kernel/cpuset.c. */ for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, nodemask) { if (NUMA_BUILD && zlc_active && !zlc_zone_worth_trying(zonelist, z, allowednodes)) continue; if ((alloc_flags & ALLOC_CPUSET) && !cpuset_zone_allowed_softwall(zone, gfp_mask)) goto try_next_zone; BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { unsigned long mark; int ret; mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; if (zone_watermark_ok(zone, order, mark, classzone_idx, alloc_flags)) goto try_this_zone; if (zone_reclaim_mode == 0) goto this_zone_full; ret = zone_reclaim(zone, gfp_mask, order); switch (ret) { case ZONE_RECLAIM_NOSCAN: /* did not scan */ goto try_next_zone; case ZONE_RECLAIM_FULL: /* scanned but unreclaimable */ goto this_zone_full; default: /* did we reclaim enough */ if (!zone_watermark_ok(zone, order, mark, classzone_idx, alloc_flags)) goto this_zone_full; } } try_this_zone: page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask, migratetype); if (page) break; this_zone_full: if (NUMA_BUILD) zlc_mark_zone_full(zonelist, z); try_next_zone: if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) { /* * we do zlc_setup after the first zone is tried but only * if there are multiple nodes make it worthwhile */ allowednodes = zlc_setup(zonelist, alloc_flags); zlc_active = 1; did_zlc_setup = 1; } } if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { /* Disable zlc cache for second zonelist scan */ zlc_active = 0; goto zonelist_scan; } return page; } /* * Large machines with many possible nodes should not always dump per-node * meminfo in irq context. */ static inline bool should_suppress_show_mem(void) { bool ret = false; #if NODES_SHIFT > 8 ret = in_interrupt(); #endif return ret; } static inline int should_alloc_retry(gfp_t gfp_mask, unsigned int order, unsigned long pages_reclaimed) { /* Do not loop if specifically requested */ if (gfp_mask & __GFP_NORETRY) return 0; /* * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER * means __GFP_NOFAIL, but that may not be true in other * implementations. */ if (order <= PAGE_ALLOC_COSTLY_ORDER) return 1; /* * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is * specified, then we retry until we no longer reclaim any pages * (above), or we've reclaimed an order of pages at least as * large as the allocation's order. In both cases, if the * allocation still fails, we stop retrying. */ if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order)) return 1; /* * Don't let big-order allocations loop unless the caller * explicitly requests that. */ if (gfp_mask & __GFP_NOFAIL) return 1; return 0; } static inline struct page * __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, struct zone *preferred_zone, int migratetype) { struct page *page; /* Acquire the OOM killer lock for the zones in zonelist */ if (!try_set_zonelist_oom(zonelist, gfp_mask)) { schedule_timeout_uninterruptible(1); return NULL; } /* * Go through the zonelist yet one more time, keep very high watermark * here, this is only to catch a parallel oom killing, we must fail if * we're still under heavy pressure. */ page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, zonelist, high_zoneidx, ALLOC_WMARK_HIGH|ALLOC_CPUSET, preferred_zone, migratetype); if (page) goto out; if (!(gfp_mask & __GFP_NOFAIL)) { /* The OOM killer will not help higher order allocs */ if (order > PAGE_ALLOC_COSTLY_ORDER) goto out; /* The OOM killer does not needlessly kill tasks for lowmem */ if (high_zoneidx < ZONE_NORMAL) goto out; /* * GFP_THISNODE contains __GFP_NORETRY and we never hit this. * Sanity check for bare calls of __GFP_THISNODE, not real OOM. * The caller should handle page allocation failure by itself if * it specifies __GFP_THISNODE. * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER. */ if (gfp_mask & __GFP_THISNODE) goto out; } /* Exhausted what can be done so it's blamo time */ out_of_memory(zonelist, gfp_mask, order, nodemask); out: clear_zonelist_oom(zonelist, gfp_mask); return page; } #ifdef CONFIG_COMPACTION /* Try memory compaction for high-order allocations before reclaim */ static struct page * __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int migratetype, unsigned long *did_some_progress, bool sync_migration) { struct page *page; if (!order || compaction_deferred(preferred_zone)) return NULL; current->flags |= PF_MEMALLOC; *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, nodemask, sync_migration); current->flags &= ~PF_MEMALLOC; if (*did_some_progress != COMPACT_SKIPPED) { /* Page migration frees to the PCP lists but we want merging */ drain_pages(get_cpu()); put_cpu(); page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, high_zoneidx, alloc_flags, preferred_zone, migratetype); if (page) { preferred_zone->compact_considered = 0; preferred_zone->compact_defer_shift = 0; count_vm_event(COMPACTSUCCESS); return page; } /* * It's bad if compaction run occurs and fails. * The most likely reason is that pages exist, * but not enough to satisfy watermarks. */ count_vm_event(COMPACTFAIL); defer_compaction(preferred_zone); cond_resched(); } return NULL; } #else static inline struct page * __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int migratetype, unsigned long *did_some_progress, bool sync_migration) { return NULL; } #endif /* CONFIG_COMPACTION */ /* The really slow allocator path where we enter direct reclaim */ static inline struct page * __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int migratetype, unsigned long *did_some_progress) { struct page *page = NULL; struct reclaim_state reclaim_state; bool drained = false; cond_resched(); /* We now go into synchronous reclaim */ cpuset_memory_pressure_bump(); current->flags |= PF_MEMALLOC; lockdep_set_current_reclaim_state(gfp_mask); reclaim_state.reclaimed_slab = 0; current->reclaim_state = &reclaim_state; *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); current->reclaim_state = NULL; lockdep_clear_current_reclaim_state(); current->flags &= ~PF_MEMALLOC; cond_resched(); if (unlikely(!(*did_some_progress))) return NULL; retry: page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, high_zoneidx, alloc_flags, preferred_zone, migratetype); /* * If an allocation failed after direct reclaim, it could be because * pages are pinned on the per-cpu lists. Drain them and try again */ if (!page && !drained) { drain_all_pages(); drained = true; goto retry; } return page; } /* * This is called in the allocator slow-path if the allocation request is of * sufficient urgency to ignore watermarks and take other desperate measures */ static inline struct page * __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, struct zone *preferred_zone, int migratetype) { struct page *page; do { page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, high_zoneidx, ALLOC_NO_WATERMARKS, preferred_zone, migratetype); if (!page && gfp_mask & __GFP_NOFAIL) wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); } while (!page && (gfp_mask & __GFP_NOFAIL)); return page; } static inline void wake_all_kswapd(unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, enum zone_type classzone_idx) { struct zoneref *z; struct zone *zone; for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) wakeup_kswapd(zone, order, classzone_idx); } static inline int gfp_to_alloc_flags(gfp_t gfp_mask) { int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; const gfp_t wait = gfp_mask & __GFP_WAIT; /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); /* * The caller may dip into page reserves a bit more if the caller * cannot run direct reclaim, or if the caller has realtime scheduling * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). */ alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); if (!wait) { /* * Not worth trying to allocate harder for * __GFP_NOMEMALLOC even if it can't schedule. */ if (!(gfp_mask & __GFP_NOMEMALLOC)) alloc_flags |= ALLOC_HARDER; /* * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. * See also cpuset_zone_allowed() comment in kernel/cpuset.c. */ alloc_flags &= ~ALLOC_CPUSET; } else if (unlikely(rt_task(current)) && !in_interrupt()) alloc_flags |= ALLOC_HARDER; if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { if (!in_interrupt() && ((current->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))) alloc_flags |= ALLOC_NO_WATERMARKS; } return alloc_flags; } static inline struct page * __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, struct zone *preferred_zone, int migratetype) { const gfp_t wait = gfp_mask & __GFP_WAIT; struct page *page = NULL; int alloc_flags; unsigned long pages_reclaimed = 0; unsigned long did_some_progress; bool sync_migration = false; /* * In the slowpath, we sanity check order to avoid ever trying to * reclaim >= MAX_ORDER areas which will never succeed. Callers may * be using allocators in order of preference for an area that is * too large. */ if (order >= MAX_ORDER) { WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); return NULL; } /* * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and * __GFP_NOWARN set) should not cause reclaim since the subsystem * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim * using a larger set of nodes after it has established that the * allowed per node queues are empty and that nodes are * over allocated. */ if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) goto nopage; restart: if (!(gfp_mask & __GFP_NO_KSWAPD)) wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(preferred_zone)); /* * OK, we're below the kswapd watermark and have kicked background * reclaim. Now things get more complex, so set up alloc_flags according * to how we want to proceed. */ alloc_flags = gfp_to_alloc_flags(gfp_mask); /* * Find the true preferred zone if the allocation is unconstrained by * cpusets. */ if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone); rebalance: /* This is the last chance, in general, before the goto nopage. */ page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, preferred_zone, migratetype); if (page) goto got_pg; /* Allocate without watermarks if the context allows */ if (alloc_flags & ALLOC_NO_WATERMARKS) { page = __alloc_pages_high_priority(gfp_mask, order, zonelist, high_zoneidx, nodemask, preferred_zone, migratetype); if (page) goto got_pg; } /* Atomic allocations - we can't balance anything */ if (!wait) goto nopage; /* Avoid recursion of direct reclaim */ if (current->flags & PF_MEMALLOC) goto nopage; /* Avoid allocations with no watermarks from looping endlessly */ if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) goto nopage; /* * Try direct compaction. The first pass is asynchronous. Subsequent * attempts after direct reclaim are synchronous */ page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, high_zoneidx, nodemask, alloc_flags, preferred_zone, migratetype, &did_some_progress, sync_migration); if (page) goto got_pg; sync_migration = !(gfp_mask & __GFP_NO_KSWAPD); /* Try direct reclaim and then allocating */ page = __alloc_pages_direct_reclaim(gfp_mask, order, zonelist, high_zoneidx, nodemask, alloc_flags, preferred_zone, migratetype, &did_some_progress); if (page) goto got_pg; /* * If we failed to make any progress reclaiming, then we are * running out of options and have to consider going OOM */ if (!did_some_progress) { if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { if (oom_killer_disabled) goto nopage; page = __alloc_pages_may_oom(gfp_mask, order, zonelist, high_zoneidx, nodemask, preferred_zone, migratetype); if (page) goto got_pg; if (!(gfp_mask & __GFP_NOFAIL)) { /* * The oom killer is not called for high-order * allocations that may fail, so if no progress * is being made, there are no other options and * retrying is unlikely to help. */ if (order > PAGE_ALLOC_COSTLY_ORDER) goto nopage; /* * The oom killer is not called for lowmem * allocations to prevent needlessly killing * innocent tasks. */ if (high_zoneidx < ZONE_NORMAL) goto nopage; } goto restart; } } /* Check if we should retry the allocation */ pages_reclaimed += did_some_progress; if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { /* Wait for some write requests to complete then retry */ wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); goto rebalance; } else { /* * High-order allocations do not necessarily loop after * direct reclaim and reclaim/compaction depends on compaction * being called after reclaim so call directly if necessary */ page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, high_zoneidx, nodemask, alloc_flags, preferred_zone, migratetype, &did_some_progress, sync_migration); if (page) goto got_pg; } nopage: if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { unsigned int filter = SHOW_MEM_FILTER_NODES; /* * This documents exceptions given to allocations in certain * contexts that are allowed to allocate outside current's set * of allowed nodes. */ if (!(gfp_mask & __GFP_NOMEMALLOC)) if (test_thread_flag(TIF_MEMDIE) || (current->flags & (PF_MEMALLOC | PF_EXITING))) filter &= ~SHOW_MEM_FILTER_NODES; if (in_interrupt() || !wait) filter &= ~SHOW_MEM_FILTER_NODES; pr_warning("%s: page allocation failure. order:%d, mode:0x%x\n", current->comm, order, gfp_mask); dump_stack(); if (!should_suppress_show_mem()) show_mem(filter); } return page; got_pg: if (kmemcheck_enabled) kmemcheck_pagealloc_alloc(page, order, gfp_mask); return page; } /* * This is the 'heart' of the zoned buddy allocator. */ struct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, nodemask_t *nodemask) { enum zone_type high_zoneidx = gfp_zone(gfp_mask); struct zone *preferred_zone; struct page *page; int migratetype = allocflags_to_migratetype(gfp_mask); gfp_mask &= gfp_allowed_mask; lockdep_trace_alloc(gfp_mask); might_sleep_if(gfp_mask & __GFP_WAIT); if (should_fail_alloc_page(gfp_mask, order)) return NULL; /* * Check the zones suitable for the gfp_mask contain at least one * valid zone. It's possible to have an empty zonelist as a result * of GFP_THISNODE and a memoryless node */ if (unlikely(!zonelist->_zonerefs->zone)) return NULL; get_mems_allowed(); /* The preferred zone is used for statistics later */ first_zones_zonelist(zonelist, high_zoneidx, nodemask ? : &cpuset_current_mems_allowed, &preferred_zone); if (!preferred_zone) { put_mems_allowed(); return NULL; } /* First allocation attempt */ page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET, preferred_zone, migratetype); if (unlikely(!page)) page = __alloc_pages_slowpath(gfp_mask, order, zonelist, high_zoneidx, nodemask, preferred_zone, migratetype); put_mems_allowed(); trace_mm_page_alloc(page, order, gfp_mask, migratetype); return page; } EXPORT_SYMBOL(__alloc_pages_nodemask); /* * Common helper functions. */ unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) { struct page *page; /* * __get_free_pages() returns a 32-bit address, which cannot represent * a highmem page */ VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); page = alloc_pages(gfp_mask, order); if (!page) return 0; return (unsigned long) page_address(page); } EXPORT_SYMBOL(__get_free_pages); unsigned long get_zeroed_page(gfp_t gfp_mask) { return __get_free_pages(gfp_mask | __GFP_ZERO, 0); } EXPORT_SYMBOL(get_zeroed_page); void __pagevec_free(struct pagevec *pvec) { int i = pagevec_count(pvec); while (--i >= 0) { trace_mm_pagevec_free(pvec->pages[i], pvec->cold); free_hot_cold_page(pvec->pages[i], pvec->cold); } } void __free_pages(struct page *page, unsigned int order) { if (put_page_testzero(page)) { if (order == 0) free_hot_cold_page(page, 0); else __free_pages_ok(page, order); } } EXPORT_SYMBOL(__free_pages); void free_pages(unsigned long addr, unsigned int order) { if (addr != 0) { VM_BUG_ON(!virt_addr_valid((void *)addr)); __free_pages(virt_to_page((void *)addr), order); } } EXPORT_SYMBOL(free_pages); static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) { if (addr) { unsigned long alloc_end = addr + (PAGE_SIZE << order); unsigned long used = addr + PAGE_ALIGN(size); split_page(virt_to_page((void *)addr), order); while (used < alloc_end) { free_page(used); used += PAGE_SIZE; } } return (void *)addr; } /** * alloc_pages_exact - allocate an exact number physically-contiguous pages. * @size: the number of bytes to allocate * @gfp_mask: GFP flags for the allocation * * This function is similar to alloc_pages(), except that it allocates the * minimum number of pages to satisfy the request. alloc_pages() can only * allocate memory in power-of-two pages. * * This function is also limited by MAX_ORDER. * * Memory allocated by this function must be released by free_pages_exact(). */ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) { unsigned int order = get_order(size); unsigned long addr; addr = __get_free_pages(gfp_mask, order); return make_alloc_exact(addr, order, size); } EXPORT_SYMBOL(alloc_pages_exact); /** * alloc_pages_exact_nid - allocate an exact number of physically-contiguous * pages on a node. * @nid: the preferred node ID where memory should be allocated * @size: the number of bytes to allocate * @gfp_mask: GFP flags for the allocation * * Like alloc_pages_exact(), but try to allocate on node nid first before falling * back. * Note this is not alloc_pages_exact_node() which allocates on a specific node, * but is not exact. */ void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) { unsigned order = get_order(size); struct page *p = alloc_pages_node(nid, gfp_mask, order); if (!p) return NULL; return make_alloc_exact((unsigned long)page_address(p), order, size); } EXPORT_SYMBOL(alloc_pages_exact_nid); /** * free_pages_exact - release memory allocated via alloc_pages_exact() * @virt: the value returned by alloc_pages_exact. * @size: size of allocation, same value as passed to alloc_pages_exact(). * * Release the memory allocated by a previous call to alloc_pages_exact. */ void free_pages_exact(void *virt, size_t size) { unsigned long addr = (unsigned long)virt; unsigned long end = addr + PAGE_ALIGN(size); while (addr < end) { free_page(addr); addr += PAGE_SIZE; } } EXPORT_SYMBOL(free_pages_exact); static unsigned int nr_free_zone_pages(int offset) { struct zoneref *z; struct zone *zone; /* Just pick one node, since fallback list is circular */ unsigned int sum = 0; struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); for_each_zone_zonelist(zone, z, zonelist, offset) { unsigned long size = zone->present_pages; unsigned long high = high_wmark_pages(zone); if (size > high) sum += size - high; } return sum; } /* * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL */ unsigned int nr_free_buffer_pages(void) { return nr_free_zone_pages(gfp_zone(GFP_USER)); } EXPORT_SYMBOL_GPL(nr_free_buffer_pages); /* * Amount of free RAM allocatable within all zones */ unsigned int nr_free_pagecache_pages(void) { return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); } static inline void show_node(struct zone *zone) { if (NUMA_BUILD) printk("Node %d ", zone_to_nid(zone)); } void si_meminfo(struct sysinfo *val) { val->totalram = totalram_pages; val->sharedram = 0; val->freeram = global_page_state(NR_FREE_PAGES); val->bufferram = nr_blockdev_pages(); val->totalhigh = totalhigh_pages; val->freehigh = nr_free_highpages(); val->mem_unit = PAGE_SIZE; } EXPORT_SYMBOL(si_meminfo); #ifdef CONFIG_NUMA void si_meminfo_node(struct sysinfo *val, int nid) { pg_data_t *pgdat = NODE_DATA(nid); val->totalram = pgdat->node_present_pages; val->freeram = node_page_state(nid, NR_FREE_PAGES); #ifdef CONFIG_HIGHMEM val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], NR_FREE_PAGES); #else val->totalhigh = 0; val->freehigh = 0; #endif val->mem_unit = PAGE_SIZE; } #endif /* * Determine whether the zone's node should be displayed or not, depending on * whether SHOW_MEM_FILTER_NODES was passed to __show_free_areas(). */ static bool skip_free_areas_zone(unsigned int flags, const struct zone *zone) { bool ret = false; if (!(flags & SHOW_MEM_FILTER_NODES)) goto out; get_mems_allowed(); ret = !node_isset(zone->zone_pgdat->node_id, cpuset_current_mems_allowed); put_mems_allowed(); out: return ret; } #define K(x) ((x) << (PAGE_SHIFT-10)) /* * Show free area list (used inside shift_scroll-lock stuff) * We also calculate the percentage fragmentation. We do this by counting the * memory on each free list with the exception of the first item on the list. * Suppresses nodes that are not allowed by current's cpuset if * SHOW_MEM_FILTER_NODES is passed. */ void __show_free_areas(unsigned int filter) { int cpu; struct zone *zone; for_each_populated_zone(zone) { if (skip_free_areas_zone(filter, zone)) continue; show_node(zone); printk("%s per-cpu:\n", zone->name); for_each_online_cpu(cpu) { struct per_cpu_pageset *pageset; pageset = per_cpu_ptr(zone->pageset, cpu); printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", cpu, pageset->pcp.high, pageset->pcp.batch, pageset->pcp.count); } } printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" " active_file:%lu inactive_file:%lu isolated_file:%lu\n" " unevictable:%lu" " dirty:%lu writeback:%lu unstable:%lu\n" " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n", global_page_state(NR_ACTIVE_ANON), global_page_state(NR_INACTIVE_ANON), global_page_state(NR_ISOLATED_ANON), global_page_state(NR_ACTIVE_FILE), global_page_state(NR_INACTIVE_FILE), global_page_state(NR_ISOLATED_FILE), global_page_state(NR_UNEVICTABLE), global_page_state(NR_FILE_DIRTY), global_page_state(NR_WRITEBACK), global_page_state(NR_UNSTABLE_NFS), global_page_state(NR_FREE_PAGES), global_page_state(NR_SLAB_RECLAIMABLE), global_page_state(NR_SLAB_UNRECLAIMABLE), global_page_state(NR_FILE_MAPPED), global_page_state(NR_SHMEM), global_page_state(NR_PAGETABLE), global_page_state(NR_BOUNCE)); for_each_populated_zone(zone) { int i; if (skip_free_areas_zone(filter, zone)) continue; show_node(zone); printk("%s" " free:%lukB" " min:%lukB" " low:%lukB" " high:%lukB" " active_anon:%lukB" " inactive_anon:%lukB" " active_file:%lukB" " inactive_file:%lukB" " unevictable:%lukB" " isolated(anon):%lukB" " isolated(file):%lukB" " present:%lukB" " mlocked:%lukB" " dirty:%lukB" " writeback:%lukB" " mapped:%lukB" " shmem:%lukB" " slab_reclaimable:%lukB" " slab_unreclaimable:%lukB" " kernel_stack:%lukB" " pagetables:%lukB" " unstable:%lukB" " bounce:%lukB" " writeback_tmp:%lukB" " pages_scanned:%lu" " all_unreclaimable? %s" "\n", zone->name, K(zone_page_state(zone, NR_FREE_PAGES)), K(min_wmark_pages(zone)), K(low_wmark_pages(zone)), K(high_wmark_pages(zone)), K(zone_page_state(zone, NR_ACTIVE_ANON)), K(zone_page_state(zone, NR_INACTIVE_ANON)), K(zone_page_state(zone, NR_ACTIVE_FILE)), K(zone_page_state(zone, NR_INACTIVE_FILE)), K(zone_page_state(zone, NR_UNEVICTABLE)), K(zone_page_state(zone, NR_ISOLATED_ANON)), K(zone_page_state(zone, NR_ISOLATED_FILE)), K(zone->present_pages), K(zone_page_state(zone, NR_MLOCK)), K(zone_page_state(zone, NR_FILE_DIRTY)), K(zone_page_state(zone, NR_WRITEBACK)), K(zone_page_state(zone, NR_FILE_MAPPED)), K(zone_page_state(zone, NR_SHMEM)), K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), zone_page_state(zone, NR_KERNEL_STACK) * THREAD_SIZE / 1024, K(zone_page_state(zone, NR_PAGETABLE)), K(zone_page_state(zone, NR_UNSTABLE_NFS)), K(zone_page_state(zone, NR_BOUNCE)), K(zone_page_state(zone, NR_WRITEBACK_TEMP)), zone->pages_scanned, (zone->all_unreclaimable ? "yes" : "no") ); printk("lowmem_reserve[]:"); for (i = 0; i < MAX_NR_ZONES; i++) printk(" %lu", zone->lowmem_reserve[i]); printk("\n"); } for_each_populated_zone(zone) { unsigned long nr[MAX_ORDER], flags, order, total = 0; if (skip_free_areas_zone(filter, zone)) continue; show_node(zone); printk("%s: ", zone->name); spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < MAX_ORDER; order++) { nr[order] = zone->free_area[order].nr_free; total += nr[order] << order; } spin_unlock_irqrestore(&zone->lock, flags); for (order = 0; order < MAX_ORDER; order++) printk("%lu*%lukB ", nr[order], K(1UL) << order); printk("= %lukB\n", K(total)); } printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); show_swap_cache_info(); } void show_free_areas(void) { __show_free_areas(0); } static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) { zoneref->zone = zone; zoneref->zone_idx = zone_idx(zone); } /* * Builds allocation fallback zone lists. * * Add all populated zones of a node to the zonelist. */ static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int nr_zones, enum zone_type zone_type) { struct zone *zone; BUG_ON(zone_type >= MAX_NR_ZONES); zone_type++; do { zone_type--; zone = pgdat->node_zones + zone_type; if (populated_zone(zone)) { zoneref_set_zone(zone, &zonelist->_zonerefs[nr_zones++]); check_highest_zone(zone_type); } } while (zone_type); return nr_zones; } /* * zonelist_order: * 0 = automatic detection of better ordering. * 1 = order by ([node] distance, -zonetype) * 2 = order by (-zonetype, [node] distance) * * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create * the same zonelist. So only NUMA can configure this param. */ #define ZONELIST_ORDER_DEFAULT 0 #define ZONELIST_ORDER_NODE 1 #define ZONELIST_ORDER_ZONE 2 /* zonelist order in the kernel. * set_zonelist_order() will set this to NODE or ZONE. */ static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; #ifdef CONFIG_NUMA /* The value user specified ....changed by config */ static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; /* string for sysctl */ #define NUMA_ZONELIST_ORDER_LEN 16 char numa_zonelist_order[16] = "default"; /* * interface for configure zonelist ordering. * command line option "numa_zonelist_order" * = "[dD]efault - default, automatic configuration. * = "[nN]ode - order by node locality, then by zone within node * = "[zZ]one - order by zone, then by locality within zone */ static int __parse_numa_zonelist_order(char *s) { if (*s == 'd' || *s == 'D') { user_zonelist_order = ZONELIST_ORDER_DEFAULT; } else if (*s == 'n' || *s == 'N') { user_zonelist_order = ZONELIST_ORDER_NODE; } else if (*s == 'z' || *s == 'Z') { user_zonelist_order = ZONELIST_ORDER_ZONE; } else { printk(KERN_WARNING "Ignoring invalid numa_zonelist_order value: " "%s\n", s); return -EINVAL; } return 0; } static __init int setup_numa_zonelist_order(char *s) { int ret; if (!s) return 0; ret = __parse_numa_zonelist_order(s); if (ret == 0) strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN); return ret; } early_param("numa_zonelist_order", setup_numa_zonelist_order); /* * sysctl handler for numa_zonelist_order */ int numa_zonelist_order_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { char saved_string[NUMA_ZONELIST_ORDER_LEN]; int ret; static DEFINE_MUTEX(zl_order_mutex); mutex_lock(&zl_order_mutex); if (write) strcpy(saved_string, (char*)table->data); ret = proc_dostring(table, write, buffer, length, ppos); if (ret) goto out; if (write) { int oldval = user_zonelist_order; if (__parse_numa_zonelist_order((char*)table->data)) { /* * bogus value. restore saved string */ strncpy((char*)table->data, saved_string, NUMA_ZONELIST_ORDER_LEN); user_zonelist_order = oldval; } else if (oldval != user_zonelist_order) { mutex_lock(&zonelists_mutex); build_all_zonelists(NULL); mutex_unlock(&zonelists_mutex); } } out: mutex_unlock(&zl_order_mutex); return ret; } #define MAX_NODE_LOAD (nr_online_nodes) static int node_load[MAX_NUMNODES]; /** * find_next_best_node - find the next node that should appear in a given node's fallback list * @node: node whose fallback list we're appending * @used_node_mask: nodemask_t of already used nodes * * We use a number of factors to determine which is the next node that should * appear on a given node's fallback list. The node should not have appeared * already in @node's fallback list, and it should be the next closest node * according to the distance array (which contains arbitrary distance values * from each node to each node in the system), and should also prefer nodes * with no CPUs, since presumably they'll have very little allocation pressure * on them otherwise. * It returns -1 if no node is found. */ static int find_next_best_node(int node, nodemask_t *used_node_mask) { int n, val; int min_val = INT_MAX; int best_node = -1; const struct cpumask *tmp = cpumask_of_node(0); /* Use the local node if we haven't already */ if (!node_isset(node, *used_node_mask)) { node_set(node, *used_node_mask); return node; } for_each_node_state(n, N_HIGH_MEMORY) { /* Don't want a node to appear more than once */ if (node_isset(n, *used_node_mask)) continue; /* Use the distance array to find the distance */ val = node_distance(node, n); /* Penalize nodes under us ("prefer the next node") */ val += (n < node); /* Give preference to headless and unused nodes */ tmp = cpumask_of_node(n); if (!cpumask_empty(tmp)) val += PENALTY_FOR_NODE_WITH_CPUS; /* Slight preference for less loaded node */ val *= (MAX_NODE_LOAD*MAX_NUMNODES); val += node_load[n]; if (val < min_val) { min_val = val; best_node = n; } } if (best_node >= 0) node_set(best_node, *used_node_mask); return best_node; } /* * Build zonelists ordered by node and zones within node. * This results in maximum locality--normal zone overflows into local * DMA zone, if any--but risks exhausting DMA zone. */ static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) { int j; struct zonelist *zonelist; zonelist = &pgdat->node_zonelists[0]; for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) ; j = build_zonelists_node(NODE_DATA(node), zonelist, j, MAX_NR_ZONES - 1); zonelist->_zonerefs[j].zone = NULL; zonelist->_zonerefs[j].zone_idx = 0; } /* * Build gfp_thisnode zonelists */ static void build_thisnode_zonelists(pg_data_t *pgdat) { int j; struct zonelist *zonelist; zonelist = &pgdat->node_zonelists[1]; j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); zonelist->_zonerefs[j].zone = NULL; zonelist->_zonerefs[j].zone_idx = 0; } /* * Build zonelists ordered by zone and nodes within zones. * This results in conserving DMA zone[s] until all Normal memory is * exhausted, but results in overflowing to remote node while memory * may still exist in local DMA zone. */ static int node_order[MAX_NUMNODES]; static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) { int pos, j, node; int zone_type; /* needs to be signed */ struct zone *z; struct zonelist *zonelist; zonelist = &pgdat->node_zonelists[0]; pos = 0; for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { for (j = 0; j < nr_nodes; j++) { node = node_order[j]; z = &NODE_DATA(node)->node_zones[zone_type]; if (populated_zone(z)) { zoneref_set_zone(z, &zonelist->_zonerefs[pos++]); check_highest_zone(zone_type); } } } zonelist->_zonerefs[pos].zone = NULL; zonelist->_zonerefs[pos].zone_idx = 0; } static int default_zonelist_order(void) { int nid, zone_type; unsigned long low_kmem_size,total_size; struct zone *z; int average_size; /* * ZONE_DMA and ZONE_DMA32 can be very small area in the system. * If they are really small and used heavily, the system can fall * into OOM very easily. * This function detect ZONE_DMA/DMA32 size and configures zone order. */ /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */ low_kmem_size = 0; total_size = 0; for_each_online_node(nid) { for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { z = &NODE_DATA(nid)->node_zones[zone_type]; if (populated_zone(z)) { if (zone_type < ZONE_NORMAL) low_kmem_size += z->present_pages; total_size += z->present_pages; } else if (zone_type == ZONE_NORMAL) { /* * If any node has only lowmem, then node order * is preferred to allow kernel allocations * locally; otherwise, they can easily infringe * on other nodes when there is an abundance of * lowmem available to allocate from. */ return ZONELIST_ORDER_NODE; } } } if (!low_kmem_size || /* there are no DMA area. */ low_kmem_size > total_size/2) /* DMA/DMA32 is big. */ return ZONELIST_ORDER_NODE; /* * look into each node's config. * If there is a node whose DMA/DMA32 memory is very big area on * local memory, NODE_ORDER may be suitable. */ average_size = total_size / (nodes_weight(node_states[N_HIGH_MEMORY]) + 1); for_each_online_node(nid) { low_kmem_size = 0; total_size = 0; for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { z = &NODE_DATA(nid)->node_zones[zone_type]; if (populated_zone(z)) { if (zone_type < ZONE_NORMAL) low_kmem_size += z->present_pages; total_size += z->present_pages; } } if (low_kmem_size && total_size > average_size && /* ignore small node */ low_kmem_size > total_size * 70/100) return ZONELIST_ORDER_NODE; } return ZONELIST_ORDER_ZONE; } static void set_zonelist_order(void) { if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) current_zonelist_order = default_zonelist_order(); else current_zonelist_order = user_zonelist_order; } static void build_zonelists(pg_data_t *pgdat) { int j, node, load; enum zone_type i; nodemask_t used_mask; int local_node, prev_node; struct zonelist *zonelist; int order = current_zonelist_order; /* initialize zonelists */ for (i = 0; i < MAX_ZONELISTS; i++) { zonelist = pgdat->node_zonelists + i; zonelist->_zonerefs[0].zone = NULL; zonelist->_zonerefs[0].zone_idx = 0; } /* NUMA-aware ordering of nodes */ local_node = pgdat->node_id; load = nr_online_nodes; prev_node = local_node; nodes_clear(used_mask); memset(node_order, 0, sizeof(node_order)); j = 0; while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { int distance = node_distance(local_node, node); /* * If another node is sufficiently far away then it is better * to reclaim pages in a zone before going off node. */ if (distance > RECLAIM_DISTANCE) zone_reclaim_mode = 1; /* * We don't want to pressure a particular node. * So adding penalty to the first node in same * distance group to make it round-robin. */ if (distance != node_distance(local_node, prev_node)) node_load[node] = load; prev_node = node; load--; if (order == ZONELIST_ORDER_NODE) build_zonelists_in_node_order(pgdat, node); else node_order[j++] = node; /* remember order */ } if (order == ZONELIST_ORDER_ZONE) { /* calculate node order -- i.e., DMA last! */ build_zonelists_in_zone_order(pgdat, j); } build_thisnode_zonelists(pgdat); } /* Construct the zonelist performance cache - see further mmzone.h */ static void build_zonelist_cache(pg_data_t *pgdat) { struct zonelist *zonelist; struct zonelist_cache *zlc; struct zoneref *z; zonelist = &pgdat->node_zonelists[0]; zonelist->zlcache_ptr = zlc = &zonelist->zlcache; bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); for (z = zonelist->_zonerefs; z->zone; z++) zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z); } #ifdef CONFIG_HAVE_MEMORYLESS_NODES /* * Return node id of node used for "local" allocations. * I.e., first node id of first zone in arg node's generic zonelist. * Used for initializing percpu 'numa_mem', which is used primarily * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. */ int local_memory_node(int node) { struct zone *zone; (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL), gfp_zone(GFP_KERNEL), NULL, &zone); return zone->node; } #endif #else /* CONFIG_NUMA */ static void set_zonelist_order(void) { current_zonelist_order = ZONELIST_ORDER_ZONE; } static void build_zonelists(pg_data_t *pgdat) { int node, local_node; enum zone_type j; struct zonelist *zonelist; local_node = pgdat->node_id; zonelist = &pgdat->node_zonelists[0]; j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); /* * Now we build the zonelist so that it contains the zones * of all the other nodes. * We don't want to pressure a particular node, so when * building the zones for node N, we make sure that the * zones coming right after the local ones are those from * node N+1 (modulo N) */ for (node = local_node + 1; node < MAX_NUMNODES; node++) { if (!node_online(node)) continue; j = build_zonelists_node(NODE_DATA(node), zonelist, j, MAX_NR_ZONES - 1); } for (node = 0; node < local_node; node++) { if (!node_online(node)) continue; j = build_zonelists_node(NODE_DATA(node), zonelist, j, MAX_NR_ZONES - 1); } zonelist->_zonerefs[j].zone = NULL; zonelist->_zonerefs[j].zone_idx = 0; } /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ static void build_zonelist_cache(pg_data_t *pgdat) { pgdat->node_zonelists[0].zlcache_ptr = NULL; } #endif /* CONFIG_NUMA */ /* * Boot pageset table. One per cpu which is going to be used for all * zones and all nodes. The parameters will be set in such a way * that an item put on a list will immediately be handed over to * the buddy list. This is safe since pageset manipulation is done * with interrupts disabled. * * The boot_pagesets must be kept even after bootup is complete for * unused processors and/or zones. They do play a role for bootstrapping * hotplugged processors. * * zoneinfo_show() and maybe other functions do * not check if the processor is online before following the pageset pointer. * Other parts of the kernel may not check if the zone is available. */ static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); static void setup_zone_pageset(struct zone *zone); /* * Global mutex to protect against size modification of zonelists * as well as to serialize pageset setup for the new populated zone. */ DEFINE_MUTEX(zonelists_mutex); /* return values int ....just for stop_machine() */ static __init_refok int __build_all_zonelists(void *data) { int nid; int cpu; #ifdef CONFIG_NUMA memset(node_load, 0, sizeof(node_load)); #endif for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); build_zonelists(pgdat); build_zonelist_cache(pgdat); } /* * Initialize the boot_pagesets that are going to be used * for bootstrapping processors. The real pagesets for * each zone will be allocated later when the per cpu * allocator is available. * * boot_pagesets are used also for bootstrapping offline * cpus if the system is already booted because the pagesets * are needed to initialize allocators on a specific cpu too. * F.e. the percpu allocator needs the page allocator which * needs the percpu allocator in order to allocate its pagesets * (a chicken-egg dilemma). */ for_each_possible_cpu(cpu) { setup_pageset(&per_cpu(boot_pageset, cpu), 0); #ifdef CONFIG_HAVE_MEMORYLESS_NODES /* * We now know the "local memory node" for each node-- * i.e., the node of the first zone in the generic zonelist. * Set up numa_mem percpu variable for on-line cpus. During * boot, only the boot cpu should be on-line; we'll init the * secondary cpus' numa_mem as they come on-line. During * node/memory hotplug, we'll fixup all on-line cpus. */ if (cpu_online(cpu)) set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); #endif } return 0; } /* * Called with zonelists_mutex held always * unless system_state == SYSTEM_BOOTING. */ void __ref build_all_zonelists(void *data) { set_zonelist_order(); if (system_state == SYSTEM_BOOTING) { __build_all_zonelists(NULL); mminit_verify_zonelist(); cpuset_init_current_mems_allowed(); } else { /* we have to stop all cpus to guarantee there is no user of zonelist */ #ifdef CONFIG_MEMORY_HOTPLUG if (data) setup_zone_pageset((struct zone *)data); #endif stop_machine(__build_all_zonelists, NULL, NULL); /* cpuset refresh routine should be here */ } vm_total_pages = nr_free_pagecache_pages(); /* * Disable grouping by mobility if the number of pages in the * system is too low to allow the mechanism to work. It would be * more accurate, but expensive to check per-zone. This check is * made on memory-hotadd so a system can start with mobility * disabled and enable it later */ if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) page_group_by_mobility_disabled = 1; else page_group_by_mobility_disabled = 0; printk("Built %i zonelists in %s order, mobility grouping %s. " "Total pages: %ld\n", nr_online_nodes, zonelist_order_name[current_zonelist_order], page_group_by_mobility_disabled ? "off" : "on", vm_total_pages); #ifdef CONFIG_NUMA printk("Policy zone: %s\n", zone_names[policy_zone]); #endif } /* * Helper functions to size the waitqueue hash table. * Essentially these want to choose hash table sizes sufficiently * large so that collisions trying to wait on pages are rare. * But in fact, the number of active page waitqueues on typical * systems is ridiculously low, less than 200. So this is even * conservative, even though it seems large. * * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to * waitqueues, i.e. the size of the waitq table given the number of pages. */ #define PAGES_PER_WAITQUEUE 256 #ifndef CONFIG_MEMORY_HOTPLUG static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) { unsigned long size = 1; pages /= PAGES_PER_WAITQUEUE; while (size < pages) size <<= 1; /* * Once we have dozens or even hundreds of threads sleeping * on IO we've got bigger problems than wait queue collision. * Limit the size of the wait table to a reasonable size. */ size = min(size, 4096UL); return max(size, 4UL); } #else /* * A zone's size might be changed by hot-add, so it is not possible to determine * a suitable size for its wait_table. So we use the maximum size now. * * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: * * i386 (preemption config) : 4096 x 16 = 64Kbyte. * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. * * The maximum entries are prepared when a zone's memory is (512K + 256) pages * or more by the traditional way. (See above). It equals: * * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. * ia64(16K page size) : = ( 8G + 4M)byte. * powerpc (64K page size) : = (32G +16M)byte. */ static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) { return 4096UL; } #endif /* * This is an integer logarithm so that shifts can be used later * to extract the more random high bits from the multiplicative * hash function before the remainder is taken. */ static inline unsigned long wait_table_bits(unsigned long size) { return ffz(~size); } #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) /* * Mark a number of pageblocks as MIGRATE_RESERVE. The number * of blocks reserved is based on min_wmark_pages(zone). The memory within * the reserve will tend to store contiguous free pages. Setting min_free_kbytes * higher will lead to a bigger reserve which will get freed as contiguous * blocks as reclaim kicks in */ static void setup_zone_migrate_reserve(struct zone *zone) { unsigned long start_pfn, pfn, end_pfn; struct page *page; unsigned long block_migratetype; int reserve; /* Get the start pfn, end pfn and the number of blocks to reserve */ start_pfn = zone->zone_start_pfn; end_pfn = start_pfn + zone->spanned_pages; reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >> pageblock_order; /* * Reserve blocks are generally in place to help high-order atomic * allocations that are short-lived. A min_free_kbytes value that * would result in more than 2 reserve blocks for atomic allocations * is assumed to be in place to help anti-fragmentation for the * future allocation of hugepages at runtime. */ reserve = min(2, reserve); for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); /* Watch out for overlapping nodes */ if (page_to_nid(page) != zone_to_nid(zone)) continue; /* Blocks with reserved pages will never free, skip them. */ if (PageReserved(page)) continue; block_migratetype = get_pageblock_migratetype(page); /* If this block is reserved, account for it */ if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) { reserve--; continue; } /* Suitable for reserving if this block is movable */ if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) { set_pageblock_migratetype(page, MIGRATE_RESERVE); move_freepages_block(zone, page, MIGRATE_RESERVE); reserve--; continue; } /* * If the reserve is met and this is a previous reserved block, * take it back */ if (block_migratetype == MIGRATE_RESERVE) { set_pageblock_migratetype(page, MIGRATE_MOVABLE); move_freepages_block(zone, page, MIGRATE_MOVABLE); } } } /* * Initially all pages are reserved - free ones are freed * up by free_all_bootmem() once the early boot process is * done. Non-atomic initialization, single-pass. */ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, unsigned long start_pfn, enum memmap_context context) { struct page *page; unsigned long end_pfn = start_pfn + size; unsigned long pfn; struct zone *z; if (highest_memmap_pfn < end_pfn - 1) highest_memmap_pfn = end_pfn - 1; z = &NODE_DATA(nid)->node_zones[zone]; for (pfn = start_pfn; pfn < end_pfn; pfn++) { /* * There can be holes in boot-time mem_map[]s * handed to this function. They do not * exist on hotplugged memory. */ if (context == MEMMAP_EARLY) { if (!early_pfn_valid(pfn)) continue; if (!early_pfn_in_nid(pfn, nid)) continue; } page = pfn_to_page(pfn); set_page_links(page, zone, nid, pfn); mminit_verify_page_links(page, zone, nid, pfn); init_page_count(page); reset_page_mapcount(page); SetPageReserved(page); /* * Mark the block movable so that blocks are reserved for * movable at startup. This will force kernel allocations * to reserve their blocks rather than leaking throughout * the address space during boot when many long-lived * kernel allocations are made. Later some blocks near * the start are marked MIGRATE_RESERVE by * setup_zone_migrate_reserve() * * bitmap is created for zone's valid pfn range. but memmap * can be created for invalid pages (for alignment) * check here not to call set_pageblock_migratetype() against * pfn out of zone. */ if ((z->zone_start_pfn <= pfn) && (pfn < z->zone_start_pfn + z->spanned_pages) && !(pfn & (pageblock_nr_pages - 1))) set_pageblock_migratetype(page, MIGRATE_MOVABLE); INIT_LIST_HEAD(&page->lru); #ifdef WANT_PAGE_VIRTUAL /* The shift won't overflow because ZONE_NORMAL is below 4G. */ if (!is_highmem_idx(zone)) set_page_address(page, __va(pfn << PAGE_SHIFT)); #endif } } static void __meminit zone_init_free_lists(struct zone *zone) { int order, t; for_each_migratetype_order(order, t) { INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); zone->free_area[order].nr_free = 0; } } #ifndef __HAVE_ARCH_MEMMAP_INIT #define memmap_init(size, nid, zone, start_pfn) \ memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) #endif static int zone_batchsize(struct zone *zone) { #ifdef CONFIG_MMU int batch; /* * The per-cpu-pages pools are set to around 1000th of the * size of the zone. But no more than 1/2 of a meg. * * OK, so we don't know how big the cache is. So guess. */ batch = zone->present_pages / 1024; if (batch * PAGE_SIZE > 512 * 1024) batch = (512 * 1024) / PAGE_SIZE; batch /= 4; /* We effectively *= 4 below */ if (batch < 1) batch = 1; /* * Clamp the batch to a 2^n - 1 value. Having a power * of 2 value was found to be more likely to have * suboptimal cache aliasing properties in some cases. * * For example if 2 tasks are alternately allocating * batches of pages, one task can end up with a lot * of pages of one half of the possible page colors * and the other with pages of the other colors. */ batch = rounddown_pow_of_two(batch + batch/2) - 1; return batch; #else /* The deferral and batching of frees should be suppressed under NOMMU * conditions. * * The problem is that NOMMU needs to be able to allocate large chunks * of contiguous memory as there's no hardware page translation to * assemble apparent contiguous memory from discontiguous pages. * * Queueing large contiguous runs of pages for batching, however, * causes the pages to actually be freed in smaller chunks. As there * can be a significant delay between the individual batches being * recycled, this leads to the once large chunks of space being * fragmented and becoming unavailable for high-order allocations. */ return 0; #endif } static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) { struct per_cpu_pages *pcp; int migratetype; memset(p, 0, sizeof(*p)); pcp = &p->pcp; pcp->count = 0; pcp->high = 6 * batch; pcp->batch = max(1UL, 1 * batch); for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) INIT_LIST_HEAD(&pcp->lists[migratetype]); } /* * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist * to the value high for the pageset p. */ static void setup_pagelist_highmark(struct per_cpu_pageset *p, unsigned long high) { struct per_cpu_pages *pcp; pcp = &p->pcp; pcp->high = high; pcp->batch = max(1UL, high/4); if ((high/4) > (PAGE_SHIFT * 8)) pcp->batch = PAGE_SHIFT * 8; } static __meminit void setup_zone_pageset(struct zone *zone) { int cpu; zone->pageset = alloc_percpu(struct per_cpu_pageset); for_each_possible_cpu(cpu) { struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); setup_pageset(pcp, zone_batchsize(zone)); if (percpu_pagelist_fraction) setup_pagelist_highmark(pcp, (zone->present_pages / percpu_pagelist_fraction)); } } /* * Allocate per cpu pagesets and initialize them. * Before this call only boot pagesets were available. */ void __init setup_per_cpu_pageset(void) { struct zone *zone; for_each_populated_zone(zone) setup_zone_pageset(zone); } static noinline __init_refok int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) { int i; struct pglist_data *pgdat = zone->zone_pgdat; size_t alloc_size; /* * The per-page waitqueue mechanism uses hashed waitqueues * per zone. */ zone->wait_table_hash_nr_entries = wait_table_hash_nr_entries(zone_size_pages); zone->wait_table_bits = wait_table_bits(zone->wait_table_hash_nr_entries); alloc_size = zone->wait_table_hash_nr_entries * sizeof(wait_queue_head_t); if (!slab_is_available()) { zone->wait_table = (wait_queue_head_t *) alloc_bootmem_node_nopanic(pgdat, alloc_size); } else { /* * This case means that a zone whose size was 0 gets new memory * via memory hot-add. * But it may be the case that a new node was hot-added. In * this case vmalloc() will not be able to use this new node's * memory - this wait_table must be initialized to use this new * node itself as well. * To use this new node's memory, further consideration will be * necessary. */ zone->wait_table = vmalloc(alloc_size); } if (!zone->wait_table) return -ENOMEM; for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) init_waitqueue_head(zone->wait_table + i); return 0; } static int __zone_pcp_update(void *data) { struct zone *zone = data; int cpu; unsigned long batch = zone_batchsize(zone), flags; for_each_possible_cpu(cpu) { struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; local_irq_save(flags); free_pcppages_bulk(zone, pcp->count, pcp); setup_pageset(pset, batch); local_irq_restore(flags); } return 0; } void zone_pcp_update(struct zone *zone) { stop_machine(__zone_pcp_update, zone, NULL); } static __meminit void zone_pcp_init(struct zone *zone) { /* * per cpu subsystem is not up at this point. The following code * relies on the ability of the linker to provide the * offset of a (static) per cpu variable into the per cpu area. */ zone->pageset = &boot_pageset; if (zone->present_pages) printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", zone->name, zone->present_pages, zone_batchsize(zone)); } __meminit int init_currently_empty_zone(struct zone *zone, unsigned long zone_start_pfn, unsigned long size, enum memmap_context context) { struct pglist_data *pgdat = zone->zone_pgdat; int ret; ret = zone_wait_table_init(zone, size); if (ret) return ret; pgdat->nr_zones = zone_idx(zone) + 1; zone->zone_start_pfn = zone_start_pfn; mminit_dprintk(MMINIT_TRACE, "memmap_init", "Initialising map node %d zone %lu pfns %lu -> %lu\n", pgdat->node_id, (unsigned long)zone_idx(zone), zone_start_pfn, (zone_start_pfn + size)); zone_init_free_lists(zone); return 0; } #ifdef CONFIG_ARCH_POPULATES_NODE_MAP /* * Basic iterator support. Return the first range of PFNs for a node * Note: nid == MAX_NUMNODES returns first region regardless of node */ static int __meminit first_active_region_index_in_nid(int nid) { int i; for (i = 0; i < nr_nodemap_entries; i++) if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) return i; return -1; } /* * Basic iterator support. Return the next active range of PFNs for a node * Note: nid == MAX_NUMNODES returns next region regardless of node */ static int __meminit next_active_region_index_in_nid(int index, int nid) { for (index = index + 1; index < nr_nodemap_entries; index++) if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) return index; return -1; } #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID /* * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. * Architectures may implement their own version but if add_active_range() * was used and there are no special requirements, this is a convenient * alternative */ int __meminit __early_pfn_to_nid(unsigned long pfn) { int i; for (i = 0; i < nr_nodemap_entries; i++) { unsigned long start_pfn = early_node_map[i].start_pfn; unsigned long end_pfn = early_node_map[i].end_pfn; if (start_pfn <= pfn && pfn < end_pfn) return early_node_map[i].nid; } /* This is a memory hole */ return -1; } #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ int __meminit early_pfn_to_nid(unsigned long pfn) { int nid; nid = __early_pfn_to_nid(pfn); if (nid >= 0) return nid; /* just returns 0 */ return 0; } #ifdef CONFIG_NODES_SPAN_OTHER_NODES bool __meminit early_pfn_in_nid(unsigned long pfn, int node) { int nid; nid = __early_pfn_to_nid(pfn); if (nid >= 0 && nid != node) return false; return true; } #endif /* Basic iterator support to walk early_node_map[] */ #define for_each_active_range_index_in_nid(i, nid) \ for (i = first_active_region_index_in_nid(nid); i != -1; \ i = next_active_region_index_in_nid(i, nid)) /** * free_bootmem_with_active_regions - Call free_bootmem_node for each active range * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node * * If an architecture guarantees that all ranges registered with * add_active_ranges() contain no holes and may be freed, this * this function may be used instead of calling free_bootmem() manually. */ void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) { int i; for_each_active_range_index_in_nid(i, nid) { unsigned long size_pages = 0; unsigned long end_pfn = early_node_map[i].end_pfn; if (early_node_map[i].start_pfn >= max_low_pfn) continue; if (end_pfn > max_low_pfn) end_pfn = max_low_pfn; size_pages = end_pfn - early_node_map[i].start_pfn; free_bootmem_node(NODE_DATA(early_node_map[i].nid), PFN_PHYS(early_node_map[i].start_pfn), size_pages << PAGE_SHIFT); } } #ifdef CONFIG_HAVE_MEMBLOCK /* * Basic iterator support. Return the last range of PFNs for a node * Note: nid == MAX_NUMNODES returns last region regardless of node */ static int __meminit last_active_region_index_in_nid(int nid) { int i; for (i = nr_nodemap_entries - 1; i >= 0; i--) if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) return i; return -1; } /* * Basic iterator support. Return the previous active range of PFNs for a node * Note: nid == MAX_NUMNODES returns next region regardless of node */ static int __meminit previous_active_region_index_in_nid(int index, int nid) { for (index = index - 1; index >= 0; index--) if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) return index; return -1; } #define for_each_active_range_index_in_nid_reverse(i, nid) \ for (i = last_active_region_index_in_nid(nid); i != -1; \ i = previous_active_region_index_in_nid(i, nid)) u64 __init find_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit) { int i; /* Need to go over early_node_map to find out good range for node */ for_each_active_range_index_in_nid_reverse(i, nid) { u64 addr; u64 ei_start, ei_last; u64 final_start, final_end; ei_last = early_node_map[i].end_pfn; ei_last <<= PAGE_SHIFT; ei_start = early_node_map[i].start_pfn; ei_start <<= PAGE_SHIFT; final_start = max(ei_start, goal); final_end = min(ei_last, limit); if (final_start >= final_end) continue; addr = memblock_find_in_range(final_start, final_end, size, align); if (addr == MEMBLOCK_ERROR) continue; return addr; } return MEMBLOCK_ERROR; } #endif int __init add_from_early_node_map(struct range *range, int az, int nr_range, int nid) { int i; u64 start, end; /* need to go over early_node_map to find out good range for node */ for_each_active_range_index_in_nid(i, nid) { start = early_node_map[i].start_pfn; end = early_node_map[i].end_pfn; nr_range = add_range(range, az, nr_range, start, end); } return nr_range; } void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data) { int i; int ret; for_each_active_range_index_in_nid(i, nid) { ret = work_fn(early_node_map[i].start_pfn, early_node_map[i].end_pfn, data); if (ret) break; } } /** * sparse_memory_present_with_active_regions - Call memory_present for each active range * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. * * If an architecture guarantees that all ranges registered with * add_active_ranges() contain no holes and may be freed, this * function may be used instead of calling memory_present() manually. */ void __init sparse_memory_present_with_active_regions(int nid) { int i; for_each_active_range_index_in_nid(i, nid) memory_present(early_node_map[i].nid, early_node_map[i].start_pfn, early_node_map[i].end_pfn); } /** * get_pfn_range_for_nid - Return the start and end page frames for a node * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. * @start_pfn: Passed by reference. On return, it will have the node start_pfn. * @end_pfn: Passed by reference. On return, it will have the node end_pfn. * * It returns the start and end page frame of a node based on information * provided by an arch calling add_active_range(). If called for a node * with no available memory, a warning is printed and the start and end * PFNs will be 0. */ void __meminit get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn) { int i; *start_pfn = -1UL; *end_pfn = 0; for_each_active_range_index_in_nid(i, nid) { *start_pfn = min(*start_pfn, early_node_map[i].start_pfn); *end_pfn = max(*end_pfn, early_node_map[i].end_pfn); } if (*start_pfn == -1UL) *start_pfn = 0; } /* * This finds a zone that can be used for ZONE_MOVABLE pages. The * assumption is made that zones within a node are ordered in monotonic * increasing memory addresses so that the "highest" populated zone is used */ static void __init find_usable_zone_for_movable(void) { int zone_index; for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { if (zone_index == ZONE_MOVABLE) continue; if (arch_zone_highest_possible_pfn[zone_index] > arch_zone_lowest_possible_pfn[zone_index]) break; } VM_BUG_ON(zone_index == -1); movable_zone = zone_index; } /* * The zone ranges provided by the architecture do not include ZONE_MOVABLE * because it is sized independent of architecture. Unlike the other zones, * the starting point for ZONE_MOVABLE is not fixed. It may be different * in each node depending on the size of each node and how evenly kernelcore * is distributed. This helper function adjusts the zone ranges * provided by the architecture for a given node by using the end of the * highest usable zone for ZONE_MOVABLE. This preserves the assumption that * zones within a node are in order of monotonic increases memory addresses */ static void __meminit adjust_zone_range_for_zone_movable(int nid, unsigned long zone_type, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *zone_start_pfn, unsigned long *zone_end_pfn) { /* Only adjust if ZONE_MOVABLE is on this node */ if (zone_movable_pfn[nid]) { /* Size ZONE_MOVABLE */ if (zone_type == ZONE_MOVABLE) { *zone_start_pfn = zone_movable_pfn[nid]; *zone_end_pfn = min(node_end_pfn, arch_zone_highest_possible_pfn[movable_zone]); /* Adjust for ZONE_MOVABLE starting within this range */ } else if (*zone_start_pfn < zone_movable_pfn[nid] && *zone_end_pfn > zone_movable_pfn[nid]) { *zone_end_pfn = zone_movable_pfn[nid]; /* Check if this whole range is within ZONE_MOVABLE */ } else if (*zone_start_pfn >= zone_movable_pfn[nid]) *zone_start_pfn = *zone_end_pfn; } } /* * Return the number of pages a zone spans in a node, including holes * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() */ static unsigned long __meminit zone_spanned_pages_in_node(int nid, unsigned long zone_type, unsigned long *ignored) { unsigned long node_start_pfn, node_end_pfn; unsigned long zone_start_pfn, zone_end_pfn; /* Get the start and end of the node and zone */ get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; adjust_zone_range_for_zone_movable(nid, zone_type, node_start_pfn, node_end_pfn, &zone_start_pfn, &zone_end_pfn); /* Check that this node has pages within the zone's required range */ if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) return 0; /* Move the zone boundaries inside the node if necessary */ zone_end_pfn = min(zone_end_pfn, node_end_pfn); zone_start_pfn = max(zone_start_pfn, node_start_pfn); /* Return the spanned pages */ return zone_end_pfn - zone_start_pfn; } /* * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, * then all holes in the requested range will be accounted for. */ unsigned long __meminit __absent_pages_in_range(int nid, unsigned long range_start_pfn, unsigned long range_end_pfn) { int i = 0; unsigned long prev_end_pfn = 0, hole_pages = 0; unsigned long start_pfn; /* Find the end_pfn of the first active range of pfns in the node */ i = first_active_region_index_in_nid(nid); if (i == -1) return 0; prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn); /* Account for ranges before physical memory on this node */ if (early_node_map[i].start_pfn > range_start_pfn) hole_pages = prev_end_pfn - range_start_pfn; /* Find all holes for the zone within the node */ for (; i != -1; i = next_active_region_index_in_nid(i, nid)) { /* No need to continue if prev_end_pfn is outside the zone */ if (prev_end_pfn >= range_end_pfn) break; /* Make sure the end of the zone is not within the hole */ start_pfn = min(early_node_map[i].start_pfn, range_end_pfn); prev_end_pfn = max(prev_end_pfn, range_start_pfn); /* Update the hole size cound and move on */ if (start_pfn > range_start_pfn) { BUG_ON(prev_end_pfn > start_pfn); hole_pages += start_pfn - prev_end_pfn; } prev_end_pfn = early_node_map[i].end_pfn; } /* Account for ranges past physical memory on this node */ if (range_end_pfn > prev_end_pfn) hole_pages += range_end_pfn - max(range_start_pfn, prev_end_pfn); return hole_pages; } /** * absent_pages_in_range - Return number of page frames in holes within a range * @start_pfn: The start PFN to start searching for holes * @end_pfn: The end PFN to stop searching for holes * * It returns the number of pages frames in memory holes within a range. */ unsigned long __init absent_pages_in_range(unsigned long start_pfn, unsigned long end_pfn) { return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); } /* Return the number of page frames in holes in a zone on a node */ static unsigned long __meminit zone_absent_pages_in_node(int nid, unsigned long zone_type, unsigned long *ignored) { unsigned long node_start_pfn, node_end_pfn; unsigned long zone_start_pfn, zone_end_pfn; get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type], node_start_pfn); zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type], node_end_pfn); adjust_zone_range_for_zone_movable(nid, zone_type, node_start_pfn, node_end_pfn, &zone_start_pfn, &zone_end_pfn); return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); } #else static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, unsigned long zone_type, unsigned long *zones_size) { return zones_size[zone_type]; } static inline unsigned long __meminit zone_absent_pages_in_node(int nid, unsigned long zone_type, unsigned long *zholes_size) { if (!zholes_size) return 0; return zholes_size[zone_type]; } #endif static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, unsigned long *zones_size, unsigned long *zholes_size) { unsigned long realtotalpages, totalpages = 0; enum zone_type i; for (i = 0; i < MAX_NR_ZONES; i++) totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, zones_size); pgdat->node_spanned_pages = totalpages; realtotalpages = totalpages; for (i = 0; i < MAX_NR_ZONES; i++) realtotalpages -= zone_absent_pages_in_node(pgdat->node_id, i, zholes_size); pgdat->node_present_pages = realtotalpages; printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); } #ifndef CONFIG_SPARSEMEM /* * Calculate the size of the zone->blockflags rounded to an unsigned long * Start by making sure zonesize is a multiple of pageblock_order by rounding * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally * round what is now in bits to nearest long in bits, then return it in * bytes. */ static unsigned long __init usemap_size(unsigned long zonesize) { unsigned long usemapsize; usemapsize = roundup(zonesize, pageblock_nr_pages); usemapsize = usemapsize >> pageblock_order; usemapsize *= NR_PAGEBLOCK_BITS; usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); return usemapsize / 8; } static void __init setup_usemap(struct pglist_data *pgdat, struct zone *zone, unsigned long zonesize) { unsigned long usemapsize = usemap_size(zonesize); zone->pageblock_flags = NULL; if (usemapsize) zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, usemapsize); } #else static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, unsigned long zonesize) {} #endif /* CONFIG_SPARSEMEM */ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE /* Return a sensible default order for the pageblock size. */ static inline int pageblock_default_order(void) { if (HPAGE_SHIFT > PAGE_SHIFT) return HUGETLB_PAGE_ORDER; return MAX_ORDER-1; } /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ static inline void __init set_pageblock_order(unsigned int order) { /* Check that pageblock_nr_pages has not already been setup */ if (pageblock_order) return; /* * Assume the largest contiguous order of interest is a huge page. * This value may be variable depending on boot parameters on IA64 */ pageblock_order = order; } #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ /* * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() * and pageblock_default_order() are unused as pageblock_order is set * at compile-time. See include/linux/pageblock-flags.h for the values of * pageblock_order based on the kernel config */ static inline int pageblock_default_order(unsigned int order) { return MAX_ORDER-1; } #define set_pageblock_order(x) do {} while (0) #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ /* * Set up the zone data structures: * - mark all pages reserved * - mark all memory queues empty * - clear the memory bitmaps */ static void __paginginit free_area_init_core(struct pglist_data *pgdat, unsigned long *zones_size, unsigned long *zholes_size) { enum zone_type j; int nid = pgdat->node_id; unsigned long zone_start_pfn = pgdat->node_start_pfn; int ret; pgdat_resize_init(pgdat); pgdat->nr_zones = 0; init_waitqueue_head(&pgdat->kswapd_wait); pgdat->kswapd_max_order = 0; pgdat_page_cgroup_init(pgdat); for (j = 0; j < MAX_NR_ZONES; j++) { struct zone *zone = pgdat->node_zones + j; unsigned long size, realsize, memmap_pages; enum lru_list l; size = zone_spanned_pages_in_node(nid, j, zones_size); realsize = size - zone_absent_pages_in_node(nid, j, zholes_size); /* * Adjust realsize so that it accounts for how much memory * is used by this zone for memmap. This affects the watermark * and per-cpu initialisations */ memmap_pages = PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; if (realsize >= memmap_pages) { realsize -= memmap_pages; if (memmap_pages) printk(KERN_DEBUG " %s zone: %lu pages used for memmap\n", zone_names[j], memmap_pages); } else printk(KERN_WARNING " %s zone: %lu pages exceeds realsize %lu\n", zone_names[j], memmap_pages, realsize); /* Account for reserved pages */ if (j == 0 && realsize > dma_reserve) { realsize -= dma_reserve; printk(KERN_DEBUG " %s zone: %lu pages reserved\n", zone_names[0], dma_reserve); } if (!is_highmem_idx(j)) nr_kernel_pages += realsize; nr_all_pages += realsize; zone->spanned_pages = size; zone->present_pages = realsize; #ifdef CONFIG_NUMA zone->node = nid; zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) / 100; zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100; #endif zone->name = zone_names[j]; spin_lock_init(&zone->lock); spin_lock_init(&zone->lru_lock); zone_seqlock_init(zone); zone->zone_pgdat = pgdat; zone_pcp_init(zone); for_each_lru(l) { INIT_LIST_HEAD(&zone->lru[l].list); zone->reclaim_stat.nr_saved_scan[l] = 0; } zone->reclaim_stat.recent_rotated[0] = 0; zone->reclaim_stat.recent_rotated[1] = 0; zone->reclaim_stat.recent_scanned[0] = 0; zone->reclaim_stat.recent_scanned[1] = 0; zap_zone_vm_stats(zone); zone->flags = 0; if (!size) continue; set_pageblock_order(pageblock_default_order()); setup_usemap(pgdat, zone, size); ret = init_currently_empty_zone(zone, zone_start_pfn, size, MEMMAP_EARLY); BUG_ON(ret); memmap_init(size, nid, j, zone_start_pfn); zone_start_pfn += size; } } static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) { /* Skip empty nodes */ if (!pgdat->node_spanned_pages) return; #ifdef CONFIG_FLAT_NODE_MEM_MAP /* ia64 gets its own node_mem_map, before this, without bootmem */ if (!pgdat->node_mem_map) { unsigned long size, start, end; struct page *map; /* * The zone's endpoints aren't required to be MAX_ORDER * aligned but the node_mem_map endpoints must be in order * for the buddy allocator to function correctly. */ start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); end = pgdat->node_start_pfn + pgdat->node_spanned_pages; end = ALIGN(end, MAX_ORDER_NR_PAGES); size = (end - start) * sizeof(struct page); map = alloc_remap(pgdat->node_id, size); if (!map) map = alloc_bootmem_node_nopanic(pgdat, size); pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); } #ifndef CONFIG_NEED_MULTIPLE_NODES /* * With no DISCONTIG, the global mem_map is just set as node 0's */ if (pgdat == NODE_DATA(0)) { mem_map = NODE_DATA(0)->node_mem_map; #ifdef CONFIG_ARCH_POPULATES_NODE_MAP if (page_to_pfn(mem_map) != pgdat->node_start_pfn) mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET); #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ } #endif #endif /* CONFIG_FLAT_NODE_MEM_MAP */ } void __paginginit free_area_init_node(int nid, unsigned long *zones_size, unsigned long node_start_pfn, unsigned long *zholes_size) { pg_data_t *pgdat = NODE_DATA(nid); pgdat->node_id = nid; pgdat->node_start_pfn = node_start_pfn; calculate_node_totalpages(pgdat, zones_size, zholes_size); alloc_node_mem_map(pgdat); #ifdef CONFIG_FLAT_NODE_MEM_MAP printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n", nid, (unsigned long)pgdat, (unsigned long)pgdat->node_mem_map); #endif free_area_init_core(pgdat, zones_size, zholes_size); } #ifdef CONFIG_ARCH_POPULATES_NODE_MAP #if MAX_NUMNODES > 1 /* * Figure out the number of possible node ids. */ static void __init setup_nr_node_ids(void) { unsigned int node; unsigned int highest = 0; for_each_node_mask(node, node_possible_map) highest = node; nr_node_ids = highest + 1; } #else static inline void setup_nr_node_ids(void) { } #endif /** * add_active_range - Register a range of PFNs backed by physical memory * @nid: The node ID the range resides on * @start_pfn: The start PFN of the available physical memory * @end_pfn: The end PFN of the available physical memory * * These ranges are stored in an early_node_map[] and later used by * free_area_init_nodes() to calculate zone sizes and holes. If the * range spans a memory hole, it is up to the architecture to ensure * the memory is not freed by the bootmem allocator. If possible * the range being registered will be merged with existing ranges. */ void __init add_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn) { int i; mminit_dprintk(MMINIT_TRACE, "memory_register", "Entering add_active_range(%d, %#lx, %#lx) " "%d entries of %d used\n", nid, start_pfn, end_pfn, nr_nodemap_entries, MAX_ACTIVE_REGIONS); mminit_validate_memmodel_limits(&start_pfn, &end_pfn); /* Merge with existing active regions if possible */ for (i = 0; i < nr_nodemap_entries; i++) { if (early_node_map[i].nid != nid) continue; /* Skip if an existing region covers this new one */ if (start_pfn >= early_node_map[i].start_pfn && end_pfn <= early_node_map[i].end_pfn) return; /* Merge forward if suitable */ if (start_pfn <= early_node_map[i].end_pfn && end_pfn > early_node_map[i].end_pfn) { early_node_map[i].end_pfn = end_pfn; return; } /* Merge backward if suitable */ if (start_pfn < early_node_map[i].start_pfn && end_pfn >= early_node_map[i].start_pfn) { early_node_map[i].start_pfn = start_pfn; return; } } /* Check that early_node_map is large enough */ if (i >= MAX_ACTIVE_REGIONS) { printk(KERN_CRIT "More than %d memory regions, truncating\n", MAX_ACTIVE_REGIONS); return; } early_node_map[i].nid = nid; early_node_map[i].start_pfn = start_pfn; early_node_map[i].end_pfn = end_pfn; nr_nodemap_entries = i + 1; } /** * remove_active_range - Shrink an existing registered range of PFNs * @nid: The node id the range is on that should be shrunk * @start_pfn: The new PFN of the range * @end_pfn: The new PFN of the range * * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. * The map is kept near the end physical page range that has already been * registered. This function allows an arch to shrink an existing registered * range. */ void __init remove_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn) { int i, j; int removed = 0; printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n", nid, start_pfn, end_pfn); /* Find the old active region end and shrink */ for_each_active_range_index_in_nid(i, nid) { if (early_node_map[i].start_pfn >= start_pfn && early_node_map[i].end_pfn <= end_pfn) { /* clear it */ early_node_map[i].start_pfn = 0; early_node_map[i].end_pfn = 0; removed = 1; continue; } if (early_node_map[i].start_pfn < start_pfn && early_node_map[i].end_pfn > start_pfn) { unsigned long temp_end_pfn = early_node_map[i].end_pfn; early_node_map[i].end_pfn = start_pfn; if (temp_end_pfn > end_pfn) add_active_range(nid, end_pfn, temp_end_pfn); continue; } if (early_node_map[i].start_pfn >= start_pfn && early_node_map[i].end_pfn > end_pfn && early_node_map[i].start_pfn < end_pfn) { early_node_map[i].start_pfn = end_pfn; continue; } } if (!removed) return; /* remove the blank ones */ for (i = nr_nodemap_entries - 1; i > 0; i--) { if (early_node_map[i].nid != nid) continue; if (early_node_map[i].end_pfn) continue; /* we found it, get rid of it */ for (j = i; j < nr_nodemap_entries - 1; j++) memcpy(&early_node_map[j], &early_node_map[j+1], sizeof(early_node_map[j])); j = nr_nodemap_entries - 1; memset(&early_node_map[j], 0, sizeof(early_node_map[j])); nr_nodemap_entries--; } } /** * remove_all_active_ranges - Remove all currently registered regions * * During discovery, it may be found that a table like SRAT is invalid * and an alternative discovery method must be used. This function removes * all currently registered regions. */ void __init remove_all_active_ranges(void) { memset(early_node_map, 0, sizeof(early_node_map)); nr_nodemap_entries = 0; } /* Compare two active node_active_regions */ static int __init cmp_node_active_region(const void *a, const void *b) { struct node_active_region *arange = (struct node_active_region *)a; struct node_active_region *brange = (struct node_active_region *)b; /* Done this way to avoid overflows */ if (arange->start_pfn > brange->start_pfn) return 1; if (arange->start_pfn < brange->start_pfn) return -1; return 0; } /* sort the node_map by start_pfn */ void __init sort_node_map(void) { sort(early_node_map, (size_t)nr_nodemap_entries, sizeof(struct node_active_region), cmp_node_active_region, NULL); } /* Find the lowest pfn for a node */ static unsigned long __init find_min_pfn_for_node(int nid) { int i; unsigned long min_pfn = ULONG_MAX; /* Assuming a sorted map, the first range found has the starting pfn */ for_each_active_range_index_in_nid(i, nid) min_pfn = min(min_pfn, early_node_map[i].start_pfn); if (min_pfn == ULONG_MAX) { printk(KERN_WARNING "Could not find start_pfn for node %d\n", nid); return 0; } return min_pfn; } /** * find_min_pfn_with_active_regions - Find the minimum PFN registered * * It returns the minimum PFN based on information provided via * add_active_range(). */ unsigned long __init find_min_pfn_with_active_regions(void) { return find_min_pfn_for_node(MAX_NUMNODES); } /* * early_calculate_totalpages() * Sum pages in active regions for movable zone. * Populate N_HIGH_MEMORY for calculating usable_nodes. */ static unsigned long __init early_calculate_totalpages(void) { int i; unsigned long totalpages = 0; for (i = 0; i < nr_nodemap_entries; i++) { unsigned long pages = early_node_map[i].end_pfn - early_node_map[i].start_pfn; totalpages += pages; if (pages) node_set_state(early_node_map[i].nid, N_HIGH_MEMORY); } return totalpages; } /* * Find the PFN the Movable zone begins in each node. Kernel memory * is spread evenly between nodes as long as the nodes have enough * memory. When they don't, some nodes will have more kernelcore than * others */ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) { int i, nid; unsigned long usable_startpfn; unsigned long kernelcore_node, kernelcore_remaining; /* save the state before borrow the nodemask */ nodemask_t saved_node_state = node_states[N_HIGH_MEMORY]; unsigned long totalpages = early_calculate_totalpages(); int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); /* * If movablecore was specified, calculate what size of * kernelcore that corresponds so that memory usable for * any allocation type is evenly spread. If both kernelcore * and movablecore are specified, then the value of kernelcore * will be used for required_kernelcore if it's greater than * what movablecore would have allowed. */ if (required_movablecore) { unsigned long corepages; /* * Round-up so that ZONE_MOVABLE is at least as large as what * was requested by the user */ required_movablecore = roundup(required_movablecore, MAX_ORDER_NR_PAGES); corepages = totalpages - required_movablecore; required_kernelcore = max(required_kernelcore, corepages); } /* If kernelcore was not specified, there is no ZONE_MOVABLE */ if (!required_kernelcore) goto out; /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ find_usable_zone_for_movable(); usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; restart: /* Spread kernelcore memory as evenly as possible throughout nodes */ kernelcore_node = required_kernelcore / usable_nodes; for_each_node_state(nid, N_HIGH_MEMORY) { /* * Recalculate kernelcore_node if the division per node * now exceeds what is necessary to satisfy the requested * amount of memory for the kernel */ if (required_kernelcore < kernelcore_node) kernelcore_node = required_kernelcore / usable_nodes; /* * As the map is walked, we track how much memory is usable * by the kernel using kernelcore_remaining. When it is * 0, the rest of the node is usable by ZONE_MOVABLE */ kernelcore_remaining = kernelcore_node; /* Go through each range of PFNs within this node */ for_each_active_range_index_in_nid(i, nid) { unsigned long start_pfn, end_pfn; unsigned long size_pages; start_pfn = max(early_node_map[i].start_pfn, zone_movable_pfn[nid]); end_pfn = early_node_map[i].end_pfn; if (start_pfn >= end_pfn) continue; /* Account for what is only usable for kernelcore */ if (start_pfn < usable_startpfn) { unsigned long kernel_pages; kernel_pages = min(end_pfn, usable_startpfn) - start_pfn; kernelcore_remaining -= min(kernel_pages, kernelcore_remaining); required_kernelcore -= min(kernel_pages, required_kernelcore); /* Continue if range is now fully accounted */ if (end_pfn <= usable_startpfn) { /* * Push zone_movable_pfn to the end so * that if we have to rebalance * kernelcore across nodes, we will * not double account here */ zone_movable_pfn[nid] = end_pfn; continue; } start_pfn = usable_startpfn; } /* * The usable PFN range for ZONE_MOVABLE is from * start_pfn->end_pfn. Calculate size_pages as the * number of pages used as kernelcore */ size_pages = end_pfn - start_pfn; if (size_pages > kernelcore_remaining) size_pages = kernelcore_remaining; zone_movable_pfn[nid] = start_pfn + size_pages; /* * Some kernelcore has been met, update counts and * break if the kernelcore for this node has been * satisified */ required_kernelcore -= min(required_kernelcore, size_pages); kernelcore_remaining -= size_pages; if (!kernelcore_remaining) break; } } /* * If there is still required_kernelcore, we do another pass with one * less node in the count. This will push zone_movable_pfn[nid] further * along on the nodes that still have memory until kernelcore is * satisified */ usable_nodes--; if (usable_nodes && required_kernelcore > usable_nodes) goto restart; /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ for (nid = 0; nid < MAX_NUMNODES; nid++) zone_movable_pfn[nid] = roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); out: /* restore the node_state */ node_states[N_HIGH_MEMORY] = saved_node_state; } /* Any regular memory on that node ? */ static void check_for_regular_memory(pg_data_t *pgdat) { #ifdef CONFIG_HIGHMEM enum zone_type zone_type; for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) { struct zone *zone = &pgdat->node_zones[zone_type]; if (zone->present_pages) node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY); } #endif } /** * free_area_init_nodes - Initialise all pg_data_t and zone data * @max_zone_pfn: an array of max PFNs for each zone * * This will call free_area_init_node() for each active node in the system. * Using the page ranges provided by add_active_range(), the size of each * zone in each node and their holes is calculated. If the maximum PFN * between two adjacent zones match, it is assumed that the zone is empty. * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed * that arch_max_dma32_pfn has no pages. It is also assumed that a zone * starts where the previous one ended. For example, ZONE_DMA32 starts * at arch_max_dma_pfn. */ void __init free_area_init_nodes(unsigned long *max_zone_pfn) { unsigned long nid; int i; /* Sort early_node_map as initialisation assumes it is sorted */ sort_node_map(); /* Record where the zone boundaries are */ memset(arch_zone_lowest_possible_pfn, 0, sizeof(arch_zone_lowest_possible_pfn)); memset(arch_zone_highest_possible_pfn, 0, sizeof(arch_zone_highest_possible_pfn)); arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; for (i = 1; i < MAX_NR_ZONES; i++) { if (i == ZONE_MOVABLE) continue; arch_zone_lowest_possible_pfn[i] = arch_zone_highest_possible_pfn[i-1]; arch_zone_highest_possible_pfn[i] = max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); } arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; /* Find the PFNs that ZONE_MOVABLE begins at in each node */ memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); find_zone_movable_pfns_for_nodes(zone_movable_pfn); /* Print out the zone ranges */ printk("Zone PFN ranges:\n"); for (i = 0; i < MAX_NR_ZONES; i++) { if (i == ZONE_MOVABLE) continue; printk(" %-8s ", zone_names[i]); if (arch_zone_lowest_possible_pfn[i] == arch_zone_highest_possible_pfn[i]) printk("empty\n"); else printk("%0#10lx -> %0#10lx\n", arch_zone_lowest_possible_pfn[i], arch_zone_highest_possible_pfn[i]); } /* Print out the PFNs ZONE_MOVABLE begins at in each node */ printk("Movable zone start PFN for each node\n"); for (i = 0; i < MAX_NUMNODES; i++) { if (zone_movable_pfn[i]) printk(" Node %d: %lu\n", i, zone_movable_pfn[i]); } /* Print out the early_node_map[] */ printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); for (i = 0; i < nr_nodemap_entries; i++) printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid, early_node_map[i].start_pfn, early_node_map[i].end_pfn); /* Initialise every node */ mminit_verify_pageflags_layout(); setup_nr_node_ids(); for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); free_area_init_node(nid, NULL, find_min_pfn_for_node(nid), NULL); /* Any memory on that node */ if (pgdat->node_present_pages) node_set_state(nid, N_HIGH_MEMORY); check_for_regular_memory(pgdat); } } static int __init cmdline_parse_core(char *p, unsigned long *core) { unsigned long long coremem; if (!p) return -EINVAL; coremem = memparse(p, &p); *core = coremem >> PAGE_SHIFT; /* Paranoid check that UL is enough for the coremem value */ WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); return 0; } /* * kernelcore=size sets the amount of memory for use for allocations that * cannot be reclaimed or migrated. */ static int __init cmdline_parse_kernelcore(char *p) { return cmdline_parse_core(p, &required_kernelcore); } /* * movablecore=size sets the amount of memory for use for allocations that * can be reclaimed or migrated. */ static int __init cmdline_parse_movablecore(char *p) { return cmdline_parse_core(p, &required_movablecore); } early_param("kernelcore", cmdline_parse_kernelcore); early_param("movablecore", cmdline_parse_movablecore); #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ /** * set_dma_reserve - set the specified number of pages reserved in the first zone * @new_dma_reserve: The number of pages to mark reserved * * The per-cpu batchsize and zone watermarks are determined by present_pages. * In the DMA zone, a significant percentage may be consumed by kernel image * and other unfreeable allocations which can skew the watermarks badly. This * function may optionally be used to account for unfreeable pages in the * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and * smaller per-cpu batchsize. */ void __init set_dma_reserve(unsigned long new_dma_reserve) { dma_reserve = new_dma_reserve; } void __init free_area_init(unsigned long *zones_size) { free_area_init_node(0, zones_size, __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); } static int page_alloc_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { int cpu = (unsigned long)hcpu; if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { drain_pages(cpu); /* * Spill the event counters of the dead processor * into the current processors event counters. * This artificially elevates the count of the current * processor. */ vm_events_fold_cpu(cpu); /* * Zero the differential counters of the dead processor * so that the vm statistics are consistent. * * This is only okay since the processor is dead and cannot * race with what we are doing. */ refresh_cpu_vm_stats(cpu); } return NOTIFY_OK; } void __init page_alloc_init(void) { hotcpu_notifier(page_alloc_cpu_notify, 0); } /* * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio * or min_free_kbytes changes. */ static void calculate_totalreserve_pages(void) { struct pglist_data *pgdat; unsigned long reserve_pages = 0; enum zone_type i, j; for_each_online_pgdat(pgdat) { for (i = 0; i < MAX_NR_ZONES; i++) { struct zone *zone = pgdat->node_zones + i; unsigned long max = 0; /* Find valid and maximum lowmem_reserve in the zone */ for (j = i; j < MAX_NR_ZONES; j++) { if (zone->lowmem_reserve[j] > max) max = zone->lowmem_reserve[j]; } /* we treat the high watermark as reserved pages. */ max += high_wmark_pages(zone); if (max > zone->present_pages) max = zone->present_pages; reserve_pages += max; } } totalreserve_pages = reserve_pages; } /* * setup_per_zone_lowmem_reserve - called whenever * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone * has a correct pages reserved value, so an adequate number of * pages are left in the zone after a successful __alloc_pages(). */ static void setup_per_zone_lowmem_reserve(void) { struct pglist_data *pgdat; enum zone_type j, idx; for_each_online_pgdat(pgdat) { for (j = 0; j < MAX_NR_ZONES; j++) { struct zone *zone = pgdat->node_zones + j; unsigned long present_pages = zone->present_pages; zone->lowmem_reserve[j] = 0; idx = j; while (idx) { struct zone *lower_zone; idx--; if (sysctl_lowmem_reserve_ratio[idx] < 1) sysctl_lowmem_reserve_ratio[idx] = 1; lower_zone = pgdat->node_zones + idx; lower_zone->lowmem_reserve[j] = present_pages / sysctl_lowmem_reserve_ratio[idx]; present_pages += lower_zone->present_pages; } } } /* update totalreserve_pages */ calculate_totalreserve_pages(); } /** * setup_per_zone_wmarks - called when min_free_kbytes changes * or when memory is hot-{added|removed} * * Ensures that the watermark[min,low,high] values for each zone are set * correctly with respect to min_free_kbytes. */ void setup_per_zone_wmarks(void) { unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); unsigned long lowmem_pages = 0; struct zone *zone; unsigned long flags; /* Calculate total number of !ZONE_HIGHMEM pages */ for_each_zone(zone) { if (!is_highmem(zone)) lowmem_pages += zone->present_pages; } for_each_zone(zone) { u64 tmp; spin_lock_irqsave(&zone->lock, flags); tmp = (u64)pages_min * zone->present_pages; do_div(tmp, lowmem_pages); if (is_highmem(zone)) { /* * __GFP_HIGH and PF_MEMALLOC allocations usually don't * need highmem pages, so cap pages_min to a small * value here. * * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) * deltas controls asynch page reclaim, and so should * not be capped for highmem. */ int min_pages; min_pages = zone->present_pages / 1024; if (min_pages < SWAP_CLUSTER_MAX) min_pages = SWAP_CLUSTER_MAX; if (min_pages > 128) min_pages = 128; zone->watermark[WMARK_MIN] = min_pages; } else { /* * If it's a lowmem zone, reserve a number of pages * proportionate to the zone's size. */ zone->watermark[WMARK_MIN] = tmp; } zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); setup_zone_migrate_reserve(zone); spin_unlock_irqrestore(&zone->lock, flags); } /* update totalreserve_pages */ calculate_totalreserve_pages(); } /* * The inactive anon list should be small enough that the VM never has to * do too much work, but large enough that each inactive page has a chance * to be referenced again before it is swapped out. * * The inactive_anon ratio is the target ratio of ACTIVE_ANON to * INACTIVE_ANON pages on this zone's LRU, maintained by the * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of * the anonymous pages are kept on the inactive list. * * total target max * memory ratio inactive anon * ------------------------------------- * 10MB 1 5MB * 100MB 1 50MB * 1GB 3 250MB * 10GB 10 0.9GB * 100GB 31 3GB * 1TB 101 10GB * 10TB 320 32GB */ void calculate_zone_inactive_ratio(struct zone *zone) { unsigned int gb, ratio; /* Zone size in gigabytes */ gb = zone->present_pages >> (30 - PAGE_SHIFT); if (gb) ratio = int_sqrt(10 * gb); else ratio = 1; zone->inactive_ratio = ratio; } static void __init setup_per_zone_inactive_ratio(void) { struct zone *zone; for_each_zone(zone) calculate_zone_inactive_ratio(zone); } /* * Initialise min_free_kbytes. * * For small machines we want it small (128k min). For large machines * we want it large (64MB max). But it is not linear, because network * bandwidth does not increase linearly with machine size. We use * * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: * min_free_kbytes = sqrt(lowmem_kbytes * 16) * * which yields * * 16MB: 512k * 32MB: 724k * 64MB: 1024k * 128MB: 1448k * 256MB: 2048k * 512MB: 2896k * 1024MB: 4096k * 2048MB: 5792k * 4096MB: 8192k * 8192MB: 11584k * 16384MB: 16384k */ static int __init init_per_zone_wmark_min(void) { unsigned long lowmem_kbytes; lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); min_free_kbytes = int_sqrt(lowmem_kbytes * 16); if (min_free_kbytes < 128) min_free_kbytes = 128; if (min_free_kbytes > 65536) min_free_kbytes = 65536; setup_per_zone_wmarks(); setup_per_zone_lowmem_reserve(); setup_per_zone_inactive_ratio(); return 0; } module_init(init_per_zone_wmark_min) /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so * that we can call two helper functions whenever min_free_kbytes * changes. */ int min_free_kbytes_sysctl_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { proc_dointvec(table, write, buffer, length, ppos); if (write) setup_per_zone_wmarks(); return 0; } #ifdef CONFIG_NUMA int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { struct zone *zone; int rc; rc = proc_dointvec_minmax(table, write, buffer, length, ppos); if (rc) return rc; for_each_zone(zone) zone->min_unmapped_pages = (zone->present_pages * sysctl_min_unmapped_ratio) / 100; return 0; } int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { struct zone *zone; int rc; rc = proc_dointvec_minmax(table, write, buffer, length, ppos); if (rc) return rc; for_each_zone(zone) zone->min_slab_pages = (zone->present_pages * sysctl_min_slab_ratio) / 100; return 0; } #endif /* * lowmem_reserve_ratio_sysctl_handler - just a wrapper around * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() * whenever sysctl_lowmem_reserve_ratio changes. * * The reserve ratio obviously has absolutely no relation with the * minimum watermarks. The lowmem reserve ratio can only make sense * if in function of the boot time zone sizes. */ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { proc_dointvec_minmax(table, write, buffer, length, ppos); setup_per_zone_lowmem_reserve(); return 0; } /* * percpu_pagelist_fraction - changes the pcp->high for each zone on each * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist * can have before it gets flushed back to buddy allocator. */ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { struct zone *zone; unsigned int cpu; int ret; ret = proc_dointvec_minmax(table, write, buffer, length, ppos); if (!write || (ret == -EINVAL)) return ret; for_each_populated_zone(zone) { for_each_possible_cpu(cpu) { unsigned long high; high = zone->present_pages / percpu_pagelist_fraction; setup_pagelist_highmark( per_cpu_ptr(zone->pageset, cpu), high); } } return 0; } int hashdist = HASHDIST_DEFAULT; #ifdef CONFIG_NUMA static int __init set_hashdist(char *str) { if (!str) return 0; hashdist = simple_strtoul(str, &str, 0); return 1; } __setup("hashdist=", set_hashdist); #endif /* * allocate a large system hash table from bootmem * - it is assumed that the hash table must contain an exact power-of-2 * quantity of entries * - limit is the number of hash buckets, not the total allocation size */ void *__init alloc_large_system_hash(const char *tablename, unsigned long bucketsize, unsigned long numentries, int scale, int flags, unsigned int *_hash_shift, unsigned int *_hash_mask, unsigned long limit) { unsigned long long max = limit; unsigned long log2qty, size; void *table = NULL; /* allow the kernel cmdline to have a say */ if (!numentries) { /* round applicable memory size up to nearest megabyte */ numentries = nr_kernel_pages; numentries += (1UL << (20 - PAGE_SHIFT)) - 1; numentries >>= 20 - PAGE_SHIFT; numentries <<= 20 - PAGE_SHIFT; /* limit to 1 bucket per 2^scale bytes of low memory */ if (scale > PAGE_SHIFT) numentries >>= (scale - PAGE_SHIFT); else numentries <<= (PAGE_SHIFT - scale); /* Make sure we've got at least a 0-order allocation.. */ if (unlikely(flags & HASH_SMALL)) { /* Makes no sense without HASH_EARLY */ WARN_ON(!(flags & HASH_EARLY)); if (!(numentries >> *_hash_shift)) { numentries = 1UL << *_hash_shift; BUG_ON(!numentries); } } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) numentries = PAGE_SIZE / bucketsize; } numentries = roundup_pow_of_two(numentries); /* limit allocation size to 1/16 total memory by default */ if (max == 0) { max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; do_div(max, bucketsize); } if (numentries > max) numentries = max; log2qty = ilog2(numentries); do { size = bucketsize << log2qty; if (flags & HASH_EARLY) table = alloc_bootmem_nopanic(size); else if (hashdist) table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); else { /* * If bucketsize is not a power-of-two, we may free * some pages at the end of hash table which * alloc_pages_exact() automatically does */ if (get_order(size) < MAX_ORDER) { table = alloc_pages_exact(size, GFP_ATOMIC); kmemleak_alloc(table, size, 1, GFP_ATOMIC); } } } while (!table && size > PAGE_SIZE && --log2qty); if (!table) panic("Failed to allocate %s hash table\n", tablename); printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n", tablename, (1UL << log2qty), ilog2(size) - PAGE_SHIFT, size); if (_hash_shift) *_hash_shift = log2qty; if (_hash_mask) *_hash_mask = (1 << log2qty) - 1; return table; } /* Return a pointer to the bitmap storing bits affecting a block of pages */ static inline unsigned long *get_pageblock_bitmap(struct zone *zone, unsigned long pfn) { #ifdef CONFIG_SPARSEMEM return __pfn_to_section(pfn)->pageblock_flags; #else return zone->pageblock_flags; #endif /* CONFIG_SPARSEMEM */ } static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) { #ifdef CONFIG_SPARSEMEM pfn &= (PAGES_PER_SECTION-1); return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #else pfn = pfn - zone->zone_start_pfn; return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #endif /* CONFIG_SPARSEMEM */ } /** * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages * @page: The page within the block of interest * @start_bitidx: The first bit of interest to retrieve * @end_bitidx: The last bit of interest * returns pageblock_bits flags */ unsigned long get_pageblock_flags_group(struct page *page, int start_bitidx, int end_bitidx) { struct zone *zone; unsigned long *bitmap; unsigned long pfn, bitidx; unsigned long flags = 0; unsigned long value = 1; zone = page_zone(page); pfn = page_to_pfn(page); bitmap = get_pageblock_bitmap(zone, pfn); bitidx = pfn_to_bitidx(zone, pfn); for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) if (test_bit(bitidx + start_bitidx, bitmap)) flags |= value; return flags; } /** * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages * @page: The page within the block of interest * @start_bitidx: The first bit of interest * @end_bitidx: The last bit of interest * @flags: The flags to set */ void set_pageblock_flags_group(struct page *page, unsigned long flags, int start_bitidx, int end_bitidx) { struct zone *zone; unsigned long *bitmap; unsigned long pfn, bitidx; unsigned long value = 1; zone = page_zone(page); pfn = page_to_pfn(page); bitmap = get_pageblock_bitmap(zone, pfn); bitidx = pfn_to_bitidx(zone, pfn); VM_BUG_ON(pfn < zone->zone_start_pfn); VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages); for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) if (flags & value) __set_bit(bitidx + start_bitidx, bitmap); else __clear_bit(bitidx + start_bitidx, bitmap); } /* * This is designed as sub function...plz see page_isolation.c also. * set/clear page block's type to be ISOLATE. * page allocater never alloc memory from ISOLATE block. */ static int __count_immobile_pages(struct zone *zone, struct page *page, int count) { unsigned long pfn, iter, found; /* * For avoiding noise data, lru_add_drain_all() should be called * If ZONE_MOVABLE, the zone never contains immobile pages */ if (zone_idx(zone) == ZONE_MOVABLE) return true; if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE) return true; pfn = page_to_pfn(page); for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { unsigned long check = pfn + iter; if (!pfn_valid_within(check)) continue; page = pfn_to_page(check); if (!page_count(page)) { if (PageBuddy(page)) iter += (1 << page_order(page)) - 1; continue; } if (!PageLRU(page)) found++; /* * If there are RECLAIMABLE pages, we need to check it. * But now, memory offline itself doesn't call shrink_slab() * and it still to be fixed. */ /* * If the page is not RAM, page_count()should be 0. * we don't need more check. This is an _used_ not-movable page. * * The problematic thing here is PG_reserved pages. PG_reserved * is set to both of a memory hole page and a _used_ kernel * page at boot. */ if (found > count) return false; } return true; } bool is_pageblock_removable_nolock(struct page *page) { struct zone *zone = page_zone(page); return __count_immobile_pages(zone, page, 0); } int set_migratetype_isolate(struct page *page) { struct zone *zone; unsigned long flags, pfn; struct memory_isolate_notify arg; int notifier_ret; int ret = -EBUSY; int zone_idx; zone = page_zone(page); zone_idx = zone_idx(zone); spin_lock_irqsave(&zone->lock, flags); pfn = page_to_pfn(page); arg.start_pfn = pfn; arg.nr_pages = pageblock_nr_pages; arg.pages_found = 0; /* * It may be possible to isolate a pageblock even if the * migratetype is not MIGRATE_MOVABLE. The memory isolation * notifier chain is used by balloon drivers to return the * number of pages in a range that are held by the balloon * driver to shrink memory. If all the pages are accounted for * by balloons, are free, or on the LRU, isolation can continue. * Later, for example, when memory hotplug notifier runs, these * pages reported as "can be isolated" should be isolated(freed) * by the balloon driver through the memory notifier chain. */ notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); notifier_ret = notifier_to_errno(notifier_ret); if (notifier_ret) goto out; /* * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. * We just check MOVABLE pages. */ if (__count_immobile_pages(zone, page, arg.pages_found)) ret = 0; /* * immobile means "not-on-lru" paes. If immobile is larger than * removable-by-driver pages reported by notifier, we'll fail. */ out: if (!ret) { set_pageblock_migratetype(page, MIGRATE_ISOLATE); move_freepages_block(zone, page, MIGRATE_ISOLATE); } spin_unlock_irqrestore(&zone->lock, flags); if (!ret) drain_all_pages(); return ret; } void unset_migratetype_isolate(struct page *page) { struct zone *zone; unsigned long flags; zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) goto out; set_pageblock_migratetype(page, MIGRATE_MOVABLE); move_freepages_block(zone, page, MIGRATE_MOVABLE); out: spin_unlock_irqrestore(&zone->lock, flags); } #ifdef CONFIG_MEMORY_HOTREMOVE /* * All pages in the range must be isolated before calling this. */ void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) { struct page *page; struct zone *zone; int order, i; unsigned long pfn; unsigned long flags; /* find the first valid pfn */ for (pfn = start_pfn; pfn < end_pfn; pfn++) if (pfn_valid(pfn)) break; if (pfn == end_pfn) return; zone = page_zone(pfn_to_page(pfn)); spin_lock_irqsave(&zone->lock, flags); pfn = start_pfn; while (pfn < end_pfn) { if (!pfn_valid(pfn)) { pfn++; continue; } page = pfn_to_page(pfn); BUG_ON(page_count(page)); BUG_ON(!PageBuddy(page)); order = page_order(page); #ifdef CONFIG_DEBUG_VM printk(KERN_INFO "remove from free list %lx %d %lx\n", pfn, 1 << order, end_pfn); #endif list_del(&page->lru); rmv_page_order(page); zone->free_area[order].nr_free--; __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order)); for (i = 0; i < (1 << order); i++) SetPageReserved((page+i)); pfn += (1 << order); } spin_unlock_irqrestore(&zone->lock, flags); } #endif #ifdef CONFIG_MEMORY_FAILURE bool is_free_buddy_page(struct page *page) { struct zone *zone = page_zone(page); unsigned long pfn = page_to_pfn(page); unsigned long flags; int order; spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < MAX_ORDER; order++) { struct page *page_head = page - (pfn & ((1 << order) - 1)); if (PageBuddy(page_head) && page_order(page_head) >= order) break; } spin_unlock_irqrestore(&zone->lock, flags); return order < MAX_ORDER; } #endif static struct trace_print_flags pageflag_names[] = { {1UL << PG_locked, "locked" }, {1UL << PG_error, "error" }, {1UL << PG_referenced, "referenced" }, {1UL << PG_uptodate, "uptodate" }, {1UL << PG_dirty, "dirty" }, {1UL << PG_lru, "lru" }, {1UL << PG_active, "active" }, {1UL << PG_slab, "slab" }, {1UL << PG_owner_priv_1, "owner_priv_1" }, {1UL << PG_arch_1, "arch_1" }, {1UL << PG_reserved, "reserved" }, {1UL << PG_private, "private" }, {1UL << PG_private_2, "private_2" }, {1UL << PG_writeback, "writeback" }, #ifdef CONFIG_PAGEFLAGS_EXTENDED {1UL << PG_head, "head" }, {1UL << PG_tail, "tail" }, #else {1UL << PG_compound, "compound" }, #endif {1UL << PG_swapcache, "swapcache" }, {1UL << PG_mappedtodisk, "mappedtodisk" }, {1UL << PG_reclaim, "reclaim" }, {1UL << PG_swapbacked, "swapbacked" }, {1UL << PG_unevictable, "unevictable" }, #ifdef CONFIG_MMU {1UL << PG_mlocked, "mlocked" }, #endif #ifdef CONFIG_ARCH_USES_PG_UNCACHED {1UL << PG_uncached, "uncached" }, #endif #ifdef CONFIG_MEMORY_FAILURE {1UL << PG_hwpoison, "hwpoison" }, #endif {-1UL, NULL }, }; static void dump_page_flags(unsigned long flags) { const char *delim = ""; unsigned long mask; int i; printk(KERN_ALERT "page flags: %#lx(", flags); /* remove zone id */ flags &= (1UL << NR_PAGEFLAGS) - 1; for (i = 0; pageflag_names[i].name && flags; i++) { mask = pageflag_names[i].mask; if ((flags & mask) != mask) continue; flags &= ~mask; printk("%s%s", delim, pageflag_names[i].name); delim = "|"; } /* check for left over flags */ if (flags) printk("%s%#lx", delim, flags); printk(")\n"); } void dump_page(struct page *page) { printk(KERN_ALERT "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", page, atomic_read(&page->_count), page_mapcount(page), page->mapping, page->index); dump_page_flags(page->flags); mem_cgroup_print_bad_page(page); }
Martix/shr-fcse
mm/page_alloc.c
C
gpl-2.0
160,788
<?php /** * @file * Definition of \Drupal\simpletest\TestBase. */ namespace Drupal\simpletest; use Drupal\Component\Utility\Crypt; use Drupal\Component\Utility\Random; use Drupal\Core\Database\Database; use Drupal\Component\Utility\String; use Drupal\Core\Config\ConfigImporter; use Drupal\Core\Config\StorageComparer; use Drupal\Core\DependencyInjection\ContainerBuilder; use Drupal\Core\Database\ConnectionNotDefinedException; use Drupal\Core\Config\StorageInterface; use Drupal\Core\Language\Language; use Drupal\Core\Session\AccountProxy; use Drupal\Core\Session\AnonymousUserSession; use Drupal\Core\Site\Settings; use Drupal\Core\StreamWrapper\PublicStream; use Drupal\Core\Utility\Error; use Symfony\Component\HttpFoundation\Request; use Symfony\Component\HttpFoundation\RequestStack; use Symfony\Component\DependencyInjection\Reference; /** * Base class for Drupal tests. * * Do not extend this class directly; use either * \Drupal\simpletest\WebTestBase or \Drupal\simpletest\KernelTestBase. */ abstract class TestBase { /** * The test run ID. * * @var string */ protected $testId; /** * The site directory of this test run. * * @var string */ protected $siteDirectory = NULL; /** * The database prefix of this test run. * * @var string */ protected $databasePrefix = NULL; /** * The site directory of the original parent site. * * @var string */ protected $originalSite; /** * The original file directory, before it was changed for testing purposes. * * @var string */ protected $originalFileDirectory = NULL; /** * Time limit for the test. */ protected $timeLimit = 500; /** * Current results of this test case. * * @var Array */ public $results = array( '#pass' => 0, '#fail' => 0, '#exception' => 0, '#debug' => 0, ); /** * Assertions thrown in that test case. * * @var Array */ protected $assertions = array(); /** * This class is skipped when looking for the source of an assertion. * * When displaying which function an assert comes from, it's not too useful * to see "WebTestBase->drupalLogin()', we would like to see the test * that called it. So we need to skip the classes defining these helper * methods. */ protected $skipClasses = array(__CLASS__ => TRUE); /** * TRUE if verbose debugging is enabled. * * @var boolean */ public $verbose; /** * Incrementing identifier for verbose output filenames. * * @var integer */ protected $verboseId = 0; /** * Safe class name for use in verbose output filenames. * * Namespaces separator (\) replaced with _. * * @var string */ protected $verboseClassName; /** * Directory where verbose output files are put. * * @var string */ protected $verboseDirectory; /** * The original database prefix when running inside Simpletest. * * @var string */ protected $originalPrefix; /** * URL to the verbose output file directory. * * @var string */ protected $verboseDirectoryUrl; /** * The settings array. */ protected $originalSettings; /** * The public file directory for the test environment. * * This is set in TestBase::prepareEnvironment(). * * @var string */ protected $public_files_directory; /** * The private file directory for the test environment. * * This is set in TestBase::prepareEnvironment(). * * @var string */ protected $private_files_directory; /** * Whether to die in case any test assertion fails. * * @var boolean * * @see run-tests.sh */ public $dieOnFail = FALSE; /** * The DrupalKernel instance used in the test. * * @var \Drupal\Core\DrupalKernel */ protected $kernel; /** * The dependency injection container used in the test. * * @var \Symfony\Component\DependencyInjection\ContainerInterface */ protected $container; /** * The config importer that can used in a test. * * @var \Drupal\Core\Config\ConfigImporter */ protected $configImporter; /** * The random generator. * * @var \Drupal\Component\Utility\Random */ protected $randomGenerator; /** * The name of the session cookie. */ protected $originalSessionName; /** * Set to TRUE to strict check all configuration saved. * * @see \Drupal\Core\Config\Testing\ConfigSchemaChecker * * @var bool */ protected $strictConfigSchema = TRUE; /** * Constructor for Test. * * @param $test_id * Tests with the same id are reported together. */ public function __construct($test_id = NULL) { $this->testId = $test_id; } /** * Performs setup tasks before each individual test method is run. */ abstract protected function setUp(); /** * Checks the matching requirements for Test. * * @return * Array of errors containing a list of unmet requirements. */ protected function checkRequirements() { return array(); } /** * Internal helper: stores the assert. * * @param $status * Can be 'pass', 'fail', 'exception', 'debug'. * TRUE is a synonym for 'pass', FALSE for 'fail'. * @param $message * (optional) A message to display with the assertion. Do not translate * messages: use \Drupal\Component\Utility\String::format() to embed * variables in the message text, not t(). If left blank, a default message * will be displayed. * @param $group * (optional) The group this message is in, which is displayed in a column * in test output. Use 'Debug' to indicate this is debugging output. Do not * translate this string. Defaults to 'Other'; most tests do not override * this default. * @param $caller * By default, the assert comes from a function whose name starts with * 'test'. Instead, you can specify where this assert originates from * by passing in an associative array as $caller. Key 'file' is * the name of the source file, 'line' is the line number and 'function' * is the caller function itself. */ protected function assert($status, $message = '', $group = 'Other', array $caller = NULL) { // Convert boolean status to string status. if (is_bool($status)) { $status = $status ? 'pass' : 'fail'; } // Increment summary result counter. $this->results['#' . $status]++; // Get the function information about the call to the assertion method. if (!$caller) { $caller = $this->getAssertionCall(); } // Creation assertion array that can be displayed while tests are running. $this->assertions[] = $assertion = array( 'test_id' => $this->testId, 'test_class' => get_class($this), 'status' => $status, 'message' => $message, 'message_group' => $group, 'function' => $caller['function'], 'line' => $caller['line'], 'file' => $caller['file'], ); // Store assertion for display after the test has completed. self::getDatabaseConnection() ->insert('simpletest') ->fields($assertion) ->execute(); // We do not use a ternary operator here to allow a breakpoint on // test failure. if ($status == 'pass') { return TRUE; } else { if ($this->dieOnFail && ($status == 'fail' || $status == 'exception')) { exit(1); } return FALSE; } } /** * Store an assertion from outside the testing context. * * This is useful for inserting assertions that can only be recorded after * the test case has been destroyed, such as PHP fatal errors. The caller * information is not automatically gathered since the caller is most likely * inserting the assertion on behalf of other code. In all other respects * the method behaves just like \Drupal\simpletest\TestBase::assert() in terms * of storing the assertion. * * @return * Message ID of the stored assertion. * * @see \Drupal\simpletest\TestBase::assert() * @see \Drupal\simpletest\TestBase::deleteAssert() */ public static function insertAssert($test_id, $test_class, $status, $message = '', $group = 'Other', array $caller = array()) { // Convert boolean status to string status. if (is_bool($status)) { $status = $status ? 'pass' : 'fail'; } $caller += array( 'function' => 'Unknown', 'line' => 0, 'file' => 'Unknown', ); $assertion = array( 'test_id' => $test_id, 'test_class' => $test_class, 'status' => $status, 'message' => $message, 'message_group' => $group, 'function' => $caller['function'], 'line' => $caller['line'], 'file' => $caller['file'], ); return self::getDatabaseConnection() ->insert('simpletest') ->fields($assertion) ->execute(); } /** * Delete an assertion record by message ID. * * @param $message_id * Message ID of the assertion to delete. * * @return * TRUE if the assertion was deleted, FALSE otherwise. * * @see \Drupal\simpletest\TestBase::insertAssert() */ public static function deleteAssert($message_id) { return (bool) self::getDatabaseConnection() ->delete('simpletest') ->condition('message_id', $message_id) ->execute(); } /** * Returns the database connection to the site running Simpletest. * * @return \Drupal\Core\Database\Connection * The database connection to use for inserting assertions. */ public static function getDatabaseConnection() { // Check whether there is a test runner connection. // @see run-tests.sh // @todo Convert Simpletest UI runner to create + use this connection, too. try { $connection = Database::getConnection('default', 'test-runner'); } catch (ConnectionNotDefinedException $e) { // Check whether there is a backup of the original default connection. // @see TestBase::prepareEnvironment() try { $connection = Database::getConnection('default', 'simpletest_original_default'); } catch (ConnectionNotDefinedException $e) { // If TestBase::prepareEnvironment() or TestBase::restoreEnvironment() // failed, the test-specific database connection does not exist // yet/anymore, so fall back to the default of the (UI) test runner. $connection = Database::getConnection('default', 'default'); } } return $connection; } /** * Cycles through backtrace until the first non-assertion method is found. * * @return * Array representing the true caller. */ protected function getAssertionCall() { $backtrace = debug_backtrace(); // The first element is the call. The second element is the caller. // We skip calls that occurred in one of the methods of our base classes // or in an assertion function. while (($caller = $backtrace[1]) && ((isset($caller['class']) && isset($this->skipClasses[$caller['class']])) || substr($caller['function'], 0, 6) == 'assert')) { // We remove that call. array_shift($backtrace); } return Error::getLastCaller($backtrace); } /** * Check to see if a value is not false. * * False values are: empty string, 0, NULL, and FALSE. * * @param $value * The value on which the assertion is to be done. * @param $message * (optional) A message to display with the assertion. Do not translate * messages: use \Drupal\Component\Utility\String::format() to embed * variables in the message text, not t(). If left blank, a default message * will be displayed. * @param $group * (optional) The group this message is in, which is displayed in a column * in test output. Use 'Debug' to indicate this is debugging output. Do not * translate this string. Defaults to 'Other'; most tests do not override * this default. * * @return * TRUE if the assertion succeeded, FALSE otherwise. */ protected function assertTrue($value, $message = '', $group = 'Other') { return $this->assert((bool) $value, $message ? $message : String::format('Value @value is TRUE.', array('@value' => var_export($value, TRUE))), $group); } /** * Check to see if a value is false. * * False values are: empty string, 0, NULL, and FALSE. * * @param $value * The value on which the assertion is to be done. * @param $message * (optional) A message to display with the assertion. Do not translate * messages: use \Drupal\Component\Utility\String::format() to embed * variables in the message text, not t(). If left blank, a default message * will be displayed. * @param $group * (optional) The group this message is in, which is displayed in a column * in test output. Use 'Debug' to indicate this is debugging output. Do not * translate this string. Defaults to 'Other'; most tests do not override * this default. * * @return * TRUE if the assertion succeeded, FALSE otherwise. */ protected function assertFalse($value, $message = '', $group = 'Other') { return $this->assert(!$value, $message ? $message : String::format('Value @value is FALSE.', array('@value' => var_export($value, TRUE))), $group); } /** * Check to see if a value is NULL. * * @param $value * The value on which the assertion is to be done. * @param $message * (optional) A message to display with the assertion. Do not translate * messages: use \Drupal\Component\Utility\String::format() to embed * variables in the message text, not t(). If left blank, a default message * will be displayed. * @param $group * (optional) The group this message is in, which is displayed in a column * in test output. Use 'Debug' to indicate this is debugging output. Do not * translate this string. Defaults to 'Other'; most tests do not override * this default. * * @return * TRUE if the assertion succeeded, FALSE otherwise. */ protected function assertNull($value, $message = '', $group = 'Other') { return $this->assert(!isset($value), $message ? $message : String::format('Value @value is NULL.', array('@value' => var_export($value, TRUE))), $group); } /** * Check to see if a value is not NULL. * * @param $value * The value on which the assertion is to be done. * @param $message * (optional) A message to display with the assertion. Do not translate * messages: use \Drupal\Component\Utility\String::format() to embed * variables in the message text, not t(). If left blank, a default message * will be displayed. * @param $group * (optional) The group this message is in, which is displayed in a column * in test output. Use 'Debug' to indicate this is debugging output. Do not * translate this string. Defaults to 'Other'; most tests do not override * this default. * * @return * TRUE if the assertion succeeded, FALSE otherwise. */ protected function assertNotNull($value, $message = '', $group = 'Other') { return $this->assert(isset($value), $message ? $message : String::format('Value @value is not NULL.', array('@value' => var_export($value, TRUE))), $group); } /** * Check to see if two values are equal. * * @param $first * The first value to check. * @param $second * The second value to check. * @param $message * (optional) A message to display with the assertion. Do not translate * messages: use \Drupal\Component\Utility\String::format() to embed * variables in the message text, not t(). If left blank, a default message * will be displayed. * @param $group * (optional) The group this message is in, which is displayed in a column * in test output. Use 'Debug' to indicate this is debugging output. Do not * translate this string. Defaults to 'Other'; most tests do not override * this default. * * @return * TRUE if the assertion succeeded, FALSE otherwise. */ protected function assertEqual($first, $second, $message = '', $group = 'Other') { return $this->assert($first == $second, $message ? $message : String::format('Value @first is equal to value @second.', array('@first' => var_export($first, TRUE), '@second' => var_export($second, TRUE))), $group); } /** * Check to see if two values are not equal. * * @param $first * The first value to check. * @param $second * The second value to check. * @param $message * (optional) A message to display with the assertion. Do not translate * messages: use \Drupal\Component\Utility\String::format() to embed * variables in the message text, not t(). If left blank, a default message * will be displayed. * @param $group * (optional) The group this message is in, which is displayed in a column * in test output. Use 'Debug' to indicate this is debugging output. Do not * translate this string. Defaults to 'Other'; most tests do not override * this default. * * @return * TRUE if the assertion succeeded, FALSE otherwise. */ protected function assertNotEqual($first, $second, $message = '', $group = 'Other') { return $this->assert($first != $second, $message ? $message : String::format('Value @first is not equal to value @second.', array('@first' => var_export($first, TRUE), '@second' => var_export($second, TRUE))), $group); } /** * Check to see if two values are identical. * * @param $first * The first value to check. * @param $second * The second value to check. * @param $message * (optional) A message to display with the assertion. Do not translate * messages: use \Drupal\Component\Utility\String::format() to embed * variables in the message text, not t(). If left blank, a default message * will be displayed. * @param $group * (optional) The group this message is in, which is displayed in a column * in test output. Use 'Debug' to indicate this is debugging output. Do not * translate this string. Defaults to 'Other'; most tests do not override * this default. * * @return * TRUE if the assertion succeeded, FALSE otherwise. */ protected function assertIdentical($first, $second, $message = '', $group = 'Other') { return $this->assert($first === $second, $message ? $message : String::format('Value @first is identical to value @second.', array('@first' => var_export($first, TRUE), '@second' => var_export($second, TRUE))), $group); } /** * Check to see if two values are not identical. * * @param $first * The first value to check. * @param $second * The second value to check. * @param $message * (optional) A message to display with the assertion. Do not translate * messages: use \Drupal\Component\Utility\String::format() to embed * variables in the message text, not t(). If left blank, a default message * will be displayed. * @param $group * (optional) The group this message is in, which is displayed in a column * in test output. Use 'Debug' to indicate this is debugging output. Do not * translate this string. Defaults to 'Other'; most tests do not override * this default. * * @return * TRUE if the assertion succeeded, FALSE otherwise. */ protected function assertNotIdentical($first, $second, $message = '', $group = 'Other') { return $this->assert($first !== $second, $message ? $message : String::format('Value @first is not identical to value @second.', array('@first' => var_export($first, TRUE), '@second' => var_export($second, TRUE))), $group); } /** * Checks to see if two objects are identical. * * @param object $object1 * The first object to check. * @param object $object2 * The second object to check. * @param $message * (optional) A message to display with the assertion. Do not translate * messages: use \Drupal\Component\Utility\String::format() to embed * variables in the message text, not t(). If left blank, a default message * will be displayed. * @param $group * (optional) The group this message is in, which is displayed in a column * in test output. Use 'Debug' to indicate this is debugging output. Do not * translate this string. Defaults to 'Other'; most tests do not override * this default. * * @return * TRUE if the assertion succeeded, FALSE otherwise. */ protected function assertIdenticalObject($object1, $object2, $message = '', $group = 'Other') { $message = $message ?: String::format('!object1 is identical to !object2', array( '!object1' => var_export($object1, TRUE), '!object2' => var_export($object2, TRUE), )); $identical = TRUE; foreach ($object1 as $key => $value) { $identical = $identical && isset($object2->$key) && $object2->$key === $value; } return $this->assertTrue($identical, $message, $group); } /** * Asserts that no errors have been logged to the PHP error.log thus far. * * @return bool * TRUE if the assertion succeeded, FALSE otherwise. * * @see TestBase::prepareEnvironment() * @see \Drupal\Core\DrupalKernel::bootConfiguration() */ protected function assertNoErrorsLogged() { // Since PHP only creates the error.log file when an actual error is // triggered, it is sufficient to check whether the file exists. return $this->assertFalse(file_exists(DRUPAL_ROOT . '/' . $this->siteDirectory . '/error.log'), 'PHP error.log is empty.'); } /** * Fire an assertion that is always positive. * * @param $message * (optional) A message to display with the assertion. Do not translate * messages: use \Drupal\Component\Utility\String::format() to embed * variables in the message text, not t(). If left blank, a default message * will be displayed. * @param $group * (optional) The group this message is in, which is displayed in a column * in test output. Use 'Debug' to indicate this is debugging output. Do not * translate this string. Defaults to 'Other'; most tests do not override * this default. * * @return * TRUE. */ protected function pass($message = NULL, $group = 'Other') { return $this->assert(TRUE, $message, $group); } /** * Fire an assertion that is always negative. * * @param $message * (optional) A message to display with the assertion. Do not translate * messages: use \Drupal\Component\Utility\String::format() to embed * variables in the message text, not t(). If left blank, a default message * will be displayed. * @param $group * (optional) The group this message is in, which is displayed in a column * in test output. Use 'Debug' to indicate this is debugging output. Do not * translate this string. Defaults to 'Other'; most tests do not override * this default. * * @return * FALSE. */ protected function fail($message = NULL, $group = 'Other') { return $this->assert(FALSE, $message, $group); } /** * Fire an error assertion. * * @param $message * (optional) A message to display with the assertion. Do not translate * messages: use \Drupal\Component\Utility\String::format() to embed * variables in the message text, not t(). If left blank, a default message * will be displayed. * @param $group * (optional) The group this message is in, which is displayed in a column * in test output. Use 'Debug' to indicate this is debugging output. Do not * translate this string. Defaults to 'Other'; most tests do not override * this default. * @param $caller * The caller of the error. * * @return * FALSE. */ protected function error($message = '', $group = 'Other', array $caller = NULL) { if ($group == 'User notice') { // Since 'User notice' is set by trigger_error() which is used for debug // set the message to a status of 'debug'. return $this->assert('debug', $message, 'Debug', $caller); } return $this->assert('exception', $message, $group, $caller); } /** * Logs a verbose message in a text file. * * The link to the verbose message will be placed in the test results as a * passing assertion with the text '[verbose message]'. * * @param $message * The verbose message to be stored. * * @see simpletest_verbose() */ protected function verbose($message) { // Do nothing if verbose debugging is disabled. if (!$this->verbose) { return; } $message = '<hr />ID #' . $this->verboseId . ' (<a href="' . $this->verboseClassName . '-' . ($this->verboseId - 1) . '.html">Previous</a> | <a href="' . $this->verboseClassName . '-' . ($this->verboseId + 1) . '.html">Next</a>)<hr />' . $message; $verbose_filename = $this->verboseDirectory . '/' . $this->verboseClassName . '-' . $this->verboseId . '.html'; if (file_put_contents($verbose_filename, $message, FILE_APPEND)) { $url = $this->verboseDirectoryUrl . '/' . $this->verboseClassName . '-' . $this->verboseId . '.html'; // Not using _l() to avoid invoking the theme system, so that unit tests // can use verbose() as well. $url = '<a href="' . $url . '" target="_blank">Verbose message</a>'; $this->error($url, 'User notice'); } $this->verboseId++; } /** * Run all tests in this class. * * Regardless of whether $methods are passed or not, only method names * starting with "test" are executed. * * @param $methods * (optional) A list of method names in the test case class to run; e.g., * array('testFoo', 'testBar'). By default, all methods of the class are * taken into account, but it can be useful to only run a few selected test * methods during debugging. */ public function run(array $methods = array()) { $class = get_class($this); if ($missing_requirements = $this->checkRequirements()) { $object_info = new \ReflectionObject($this); $caller = array( 'file' => $object_info->getFileName(), ); foreach ($missing_requirements as $missing_requirement) { TestBase::insertAssert($this->testId, $class, FALSE, $missing_requirement, 'Requirements check', $caller); } return; } TestServiceProvider::$currentTest = $this; $simpletest_config = \Drupal::config('simpletest.settings'); // Unless preset from run-tests.sh, retrieve the current verbose setting. if (!isset($this->verbose)) { $this->verbose = $simpletest_config->get('verbose'); } if ($this->verbose) { // Initialize verbose debugging. $this->verbose = TRUE; $this->verboseDirectory = PublicStream::basePath() . '/simpletest/verbose'; $this->verboseDirectoryUrl = file_create_url($this->verboseDirectory); if (file_prepare_directory($this->verboseDirectory, FILE_CREATE_DIRECTORY) && !file_exists($this->verboseDirectory . '/.htaccess')) { file_put_contents($this->verboseDirectory . '/.htaccess', "<IfModule mod_expires.c>\nExpiresActive Off\n</IfModule>\n"); } $this->verboseClassName = str_replace("\\", "_", $class); } // HTTP auth settings (<username>:<password>) for the simpletest browser // when sending requests to the test site. $this->httpauth_method = (int) $simpletest_config->get('httpauth.method'); $username = $simpletest_config->get('httpauth.username'); $password = $simpletest_config->get('httpauth.password'); if (!empty($username) && !empty($password)) { $this->httpauth_credentials = $username . ':' . $password; } set_error_handler(array($this, 'errorHandler')); // Iterate through all the methods in this class, unless a specific list of // methods to run was passed. $test_methods = array_filter(get_class_methods($class), function ($method) { return strpos($method, 'test') === 0; }); if (empty($test_methods)) { // Call $this->assert() here because we need to pass along custom caller // information, lest the wrong originating code file/line be identified. $this->assert(FALSE, 'No test methods found.', 'Requirements', array('function' => __METHOD__ . '()', 'file' => __FILE__, 'line' => __LINE__)); } if ($methods) { $test_methods = array_intersect($test_methods, $methods); } foreach ($test_methods as $method) { // Insert a fail record. This will be deleted on completion to ensure // that testing completed. $method_info = new \ReflectionMethod($class, $method); $caller = array( 'file' => $method_info->getFileName(), 'line' => $method_info->getStartLine(), 'function' => $class . '->' . $method . '()', ); $test_completion_check_id = TestBase::insertAssert($this->testId, $class, FALSE, 'The test did not complete due to a fatal error.', 'Completion check', $caller); try { $this->prepareEnvironment(); } catch (\Exception $e) { $this->exceptionHandler($e); // The prepareEnvironment() method isolates the test from the parent // Drupal site by creating a random database prefix and test site // directory. If this fails, a test would possibly operate in the // parent site. Therefore, the entire test run for this test class // has to be aborted. // restoreEnvironment() cannot be called, because we do not know // where exactly the environment setup failed. break; } try { $this->setUp(); } catch (\Exception $e) { $this->exceptionHandler($e); // Abort if setUp() fails, since all test methods will fail. // But ensure to clean up and restore the environment, since // prepareEnvironment() succeeded. $this->restoreEnvironment(); break; } try { $this->$method(); } catch (\Exception $e) { $this->exceptionHandler($e); } try { $this->tearDown(); } catch (\Exception $e) { $this->exceptionHandler($e); // If a test fails to tear down, abort the entire test class, since // it is likely that all tests will fail in the same way and a // failure here only results in additional test artifacts that have // to be manually deleted. $this->restoreEnvironment(); break; } $this->restoreEnvironment(); // Remove the test method completion check record. TestBase::deleteAssert($test_completion_check_id); } TestServiceProvider::$currentTest = NULL; // Clear out the error messages and restore error handler. drupal_get_messages(); restore_error_handler(); } /** * Generates a database prefix for running tests. * * The database prefix is used by prepareEnvironment() to setup a public files * directory for the test to be run, which also contains the PHP error log, * which is written to in case of a fatal error. Since that directory is based * on the database prefix, all tests (even unit tests) need to have one, in * order to access and read the error log. * * @see TestBase::prepareEnvironment() * * The generated database table prefix is used for the Drupal installation * being performed for the test. It is also used as user agent HTTP header * value by the cURL-based browser of DrupalWebTestCase, which is sent to the * Drupal installation of the test. During early Drupal bootstrap, the user * agent HTTP header is parsed, and if it matches, all database queries use * the database table prefix that has been generated here. * * @see WebTestBase::curlInitialize() * @see drupal_valid_test_ua() */ private function prepareDatabasePrefix() { // Ensure that the generated test site directory does not exist already, // which may happen with a large amount of concurrent threads and // long-running tests. do { $suffix = mt_rand(100000, 999999); $this->siteDirectory = 'sites/simpletest/' . $suffix; $this->databasePrefix = 'simpletest' . $suffix; } while (is_dir(DRUPAL_ROOT . '/' . $this->siteDirectory)); // As soon as the database prefix is set, the test might start to execute. // All assertions as well as the SimpleTest batch operations are associated // with the testId, so the database prefix has to be associated with it. $affected_rows = self::getDatabaseConnection()->update('simpletest_test_id') ->fields(array('last_prefix' => $this->databasePrefix)) ->condition('test_id', $this->testId) ->execute(); if (!$affected_rows) { throw new \RuntimeException('Failed to set up database prefix.'); } } /** * Changes the database connection to the prefixed one. * * @see TestBase::prepareEnvironment() */ private function changeDatabasePrefix() { if (empty($this->databasePrefix)) { $this->prepareDatabasePrefix(); } // If the backup already exists, something went terribly wrong. // This case is possible, because database connection info is a static // global state construct on the Database class, which at least persists // for all test methods executed in one PHP process. if (Database::getConnectionInfo('simpletest_original_default')) { throw new \RuntimeException("Bad Database connection state: 'simpletest_original_default' connection key already exists. Broken test?"); } // Clone the current connection and replace the current prefix. $connection_info = Database::getConnectionInfo('default'); Database::renameConnection('default', 'simpletest_original_default'); foreach ($connection_info as $target => $value) { // Replace the full table prefix definition to ensure that no table // prefixes of the test runner leak into the test. $connection_info[$target]['prefix'] = array( 'default' => $value['prefix']['default'] . $this->databasePrefix, ); } Database::addConnectionInfo('default', 'default', $connection_info['default']); } /** * Act on global state information before the environment is altered for a test. * * Allows e.g. KernelTestBase to prime system/extension info from the * parent site (and inject it into the test environment so as to improve * performance). */ protected function beforePrepareEnvironment() { } /** * Prepares the current environment for running the test. * * Backups various current environment variables and resets them, so they do * not interfere with the Drupal site installation in which tests are executed * and can be restored in TestBase::restoreEnvironment(). * * Also sets up new resources for the testing environment, such as the public * filesystem and configuration directories. * * This method is private as it must only be called once by TestBase::run() * (multiple invocations for the same test would have unpredictable * consequences) and it must not be callable or overridable by test classes. * * @see TestBase::beforePrepareEnvironment() */ private function prepareEnvironment() { $user = \Drupal::currentUser(); // Allow (base) test classes to backup global state information. $this->beforePrepareEnvironment(); // Create the database prefix for this test. $this->prepareDatabasePrefix(); $language_interface = \Drupal::languageManager()->getCurrentLanguage(); // When running the test runner within a test, back up the original database // prefix. if (DRUPAL_TEST_IN_CHILD_SITE) { $this->originalPrefix = drupal_valid_test_ua(); } // Backup current in-memory configuration. $this->originalSite = conf_path(); $this->originalSettings = Settings::getAll(); $this->originalConfig = $GLOBALS['config']; // @todo Remove all remnants of $GLOBALS['conf']. // @see https://drupal.org/node/2183323 $this->originalConf = isset($GLOBALS['conf']) ? $GLOBALS['conf'] : NULL; // Backup statics and globals. $this->originalContainer = clone \Drupal::getContainer(); $this->originalLanguage = $language_interface; $this->originalConfigDirectories = $GLOBALS['config_directories']; // Save further contextual information. // Use the original files directory to avoid nesting it within an existing // simpletest directory if a test is executed within a test. $this->originalFileDirectory = Settings::get('file_public_path', conf_path() . '/files'); $this->originalProfile = drupal_get_profile(); $this->originalUser = isset($user) ? clone $user : NULL; // Prevent that session data is leaked into the UI test runner by closing // the session and then setting the session-name (i.e. the name of the // session cookie) to a random value. If a test starts a new session, then // it will be associated with a different session-name. After the test-run // it can be safely destroyed. // @see TestBase::restoreEnvironment() if (PHP_SAPI !== 'cli' && session_status() === PHP_SESSION_ACTIVE) { session_write_close(); } $this->originalSessionName = session_name(); session_name('SIMPLETEST' . Crypt::randomBytesBase64()); // Save and clean the shutdown callbacks array because it is static cached // and will be changed by the test run. Otherwise it will contain callbacks // from both environments and the testing environment will try to call the // handlers defined by the original one. $callbacks = &drupal_register_shutdown_function(); $this->originalShutdownCallbacks = $callbacks; $callbacks = array(); // Create test directory ahead of installation so fatal errors and debug // information can be logged during installation process. file_prepare_directory($this->siteDirectory, FILE_CREATE_DIRECTORY | FILE_MODIFY_PERMISSIONS); // Prepare filesystem directory paths. $this->public_files_directory = $this->siteDirectory . '/files'; $this->private_files_directory = $this->siteDirectory . '/private'; $this->temp_files_directory = $this->siteDirectory . '/temp'; $this->translation_files_directory = $this->siteDirectory . '/translations'; $this->generatedTestFiles = FALSE; // Ensure the configImporter is refreshed for each test. $this->configImporter = NULL; // Unregister all custom stream wrappers of the parent site. // Availability of Drupal stream wrappers varies by test base class: // - KernelTestBase supports and maintains stream wrappers in a custom // way. // - WebTestBase re-initializes Drupal stream wrappers after installation. // The original stream wrappers are restored after the test run. // @see TestBase::restoreEnvironment() $this->originalContainer->get('stream_wrapper_manager')->unregister(); // Reset statics. drupal_static_reset(); // Ensure there is no service container. $this->container = NULL; \Drupal::setContainer(NULL); // Unset globals. unset($GLOBALS['config_directories']); unset($GLOBALS['config']); unset($GLOBALS['conf']); // Log fatal errors. ini_set('log_errors', 1); ini_set('error_log', DRUPAL_ROOT . '/' . $this->siteDirectory . '/error.log'); // Change the database prefix. $this->changeDatabasePrefix(); // After preparing the environment and changing the database prefix, we are // in a valid test environment. drupal_valid_test_ua($this->databasePrefix); conf_path(FALSE, TRUE); // Reset settings. new Settings(array( // For performance, simply use the database prefix as hash salt. 'hash_salt' => $this->databasePrefix, )); drupal_set_time_limit($this->timeLimit); } /** * Performs cleanup tasks after each individual test method has been run. */ protected function tearDown() { } /** * Cleans up the test environment and restores the original environment. * * Deletes created files, database tables, and reverts environment changes. * * This method needs to be invoked for both unit and integration tests. * * @see TestBase::prepareDatabasePrefix() * @see TestBase::changeDatabasePrefix() * @see TestBase::prepareEnvironment() */ private function restoreEnvironment() { // Destroy the session if one was started during the test-run. $_SESSION = array(); if (PHP_SAPI !== 'cli' && session_status() === PHP_SESSION_ACTIVE) { session_destroy(); $params = session_get_cookie_params(); setcookie(session_name(), '', REQUEST_TIME - 3600, $params['path'], $params['domain'], $params['secure'], $params['httponly']); } session_name($this->originalSessionName); // Reset all static variables. // Unsetting static variables will potentially invoke destruct methods, // which might call into functions that prime statics and caches again. // In that case, all functions are still operating on the test environment, // which means they may need to access its filesystem and database. drupal_static_reset(); if ($this->container && $this->container->has('state') && $state = $this->container->get('state')) { $captured_emails = $state->get('system.test_mail_collector') ?: array(); $emailCount = count($captured_emails); if ($emailCount) { $message = $emailCount == 1 ? '1 email was sent during this test.' : $emailCount . ' emails were sent during this test.'; $this->pass($message, 'Email'); } } // Sleep for 50ms to allow shutdown functions and terminate events to // complete. Further information: https://drupal.org/node/2194357. usleep(50000); // Remove all prefixed tables. $original_connection_info = Database::getConnectionInfo('simpletest_original_default'); $original_prefix = $original_connection_info['default']['prefix']['default']; $test_connection_info = Database::getConnectionInfo('default'); $test_prefix = $test_connection_info['default']['prefix']['default']; if ($original_prefix != $test_prefix) { $tables = Database::getConnection()->schema()->findTables($test_prefix . '%'); $prefix_length = strlen($test_prefix); foreach ($tables as $table) { if (Database::getConnection()->schema()->dropTable(substr($table, $prefix_length))) { unset($tables[$table]); } } } // In case a fatal error occurred that was not in the test process read the // log to pick up any fatal errors. simpletest_log_read($this->testId, $this->databasePrefix, get_class($this)); // Delete test site directory. file_unmanaged_delete_recursive($this->siteDirectory, array($this, 'filePreDeleteCallback')); // Restore original database connection. Database::removeConnection('default'); Database::renameConnection('simpletest_original_default', 'default'); // Reset all static variables. // All destructors of statically cached objects have been invoked above; // this second reset is guaranteed to reset everything to nothing. drupal_static_reset(); // Restore original in-memory configuration. $GLOBALS['config'] = $this->originalConfig; $GLOBALS['conf'] = $this->originalConf; new Settings($this->originalSettings); // Restore original statics and globals. \Drupal::setContainer($this->originalContainer); $GLOBALS['config_directories'] = $this->originalConfigDirectories; // Re-initialize original stream wrappers of the parent site. // This must happen after static variables have been reset and the original // container and $config_directories are restored, as simpletest_log_read() // uses the public stream wrapper to locate the error.log. $this->originalContainer->get('stream_wrapper_manager')->register(); if (isset($this->originalPrefix)) { drupal_valid_test_ua($this->originalPrefix); } else { drupal_valid_test_ua(FALSE); } conf_path(TRUE, TRUE); // Restore original shutdown callbacks. $callbacks = &drupal_register_shutdown_function(); $callbacks = $this->originalShutdownCallbacks; } /** * Handle errors during test runs. * * Because this is registered in set_error_handler(), it has to be public. * * @see set_error_handler */ public function errorHandler($severity, $message, $file = NULL, $line = NULL) { if ($severity & error_reporting()) { require_once DRUPAL_ROOT . '/core/includes/errors.inc'; $error_map = array( E_STRICT => 'Run-time notice', E_WARNING => 'Warning', E_NOTICE => 'Notice', E_CORE_ERROR => 'Core error', E_CORE_WARNING => 'Core warning', E_USER_ERROR => 'User error', E_USER_WARNING => 'User warning', E_USER_NOTICE => 'User notice', E_RECOVERABLE_ERROR => 'Recoverable error', E_DEPRECATED => 'Deprecated', E_USER_DEPRECATED => 'User deprecated', ); $backtrace = debug_backtrace(); // Add verbose backtrace for errors, but not for debug() messages. if ($severity !== E_USER_NOTICE) { $verbose_backtrace = $backtrace; array_shift($verbose_backtrace); $message .= '<pre class="backtrace">' . Error::formatBacktrace($verbose_backtrace) . '</pre>'; } $this->error($message, $error_map[$severity], Error::getLastCaller($backtrace)); } return TRUE; } /** * Handle exceptions. * * @see set_exception_handler */ protected function exceptionHandler($exception) { require_once DRUPAL_ROOT . '/core/includes/errors.inc'; $backtrace = $exception->getTrace(); $verbose_backtrace = $backtrace; // Push on top of the backtrace the call that generated the exception. array_unshift($backtrace, array( 'line' => $exception->getLine(), 'file' => $exception->getFile(), )); // \Drupal\Core\Utility\Error::decodeException() runs the exception // message through \Drupal\Component\Utility\String::checkPlain(). $decoded_exception = Error::decodeException($exception); unset($decoded_exception['backtrace']); $message = String::format('%type: !message in %function (line %line of %file). <pre class="backtrace">!backtrace</pre>', $decoded_exception + array( '!backtrace' => Error::formatBacktrace($verbose_backtrace), )); $this->error($message, 'Uncaught exception', Error::getLastCaller($backtrace)); } /** * Changes in memory settings. * * @param $name * The name of the setting to return. * @param $value * The value of the setting. * * @see \Drupal\Core\Site\Settings::get() */ protected function settingsSet($name, $value) { $settings = Settings::getAll(); $settings[$name] = $value; new Settings($settings); } /** * Generates a pseudo-random string of ASCII characters of codes 32 to 126. * * Do not use this method when special characters are not possible (e.g., in * machine or file names that have already been validated); instead, use * \Drupal\simpletest\TestBase::randomMachineName(). If $length is greater * than 2 the random string will include at least one ampersand ('&') * character to ensure coverage for special characters and avoid the * introduction of random test failures. * * @param int $length * Length of random string to generate. * * @return string * Pseudo-randomly generated unique string including special characters. * * @see \Drupal\Component\Utility\Random::string() */ public function randomString($length = 8) { if ($length < 3) { return $this->getRandomGenerator()->string($length, TRUE, array($this, 'randomStringValidate')); } // To prevent the introduction of random test failures, ensure that the // returned string contains a character that needs to be escaped in HTML by // injecting an ampersand into it. $replacement_pos = floor($length / 2); // Remove 1 from the length to account for the ampersand character. $string = $this->getRandomGenerator()->string($length - 1, TRUE, array($this, 'randomStringValidate')); return substr_replace($string, '&', $replacement_pos, 0); } /** * Callback for random string validation. * * @see \Drupal\Component\Utility\Random::string() * * @param string $string * The random string to validate. * * @return bool * TRUE if the random string is valid, FALSE if not. */ public function randomStringValidate($string) { // Consecutive spaces causes issues for // Drupal\simpletest\WebTestBase::assertLink(). if (preg_match('/\s{2,}/', $string)) { return FALSE; } // Starting with a space means that length might not be what is expected. // Starting with an @ sign causes CURL to fail if used in conjunction with a // file upload, see https://drupal.org/node/2174997. if (preg_match('/^(\s|@)/', $string)) { return FALSE; } // Ending with a space means that length might not be what is expected. if (preg_match('/\s$/', $string)) { return FALSE; } return TRUE; } /** * Generates a unique random string containing letters and numbers. * * Do not use this method when testing unvalidated user input. Instead, use * \Drupal\simpletest\TestBase::randomString(). * * @param int $length * Length of random string to generate. * * @return string * Randomly generated unique string. * * @see \Drupal\Component\Utility\Random::name() */ public function randomMachineName($length = 8) { return $this->getRandomGenerator()->name($length, TRUE); } /** * Generates a random PHP object. * * @param int $size * The number of random keys to add to the object. * * @return \stdClass * The generated object, with the specified number of random keys. Each key * has a random string value. * * @see \Drupal\Component\Utility\Random::object() */ public function randomObject($size = 4) { return $this->getRandomGenerator()->object($size); } /** * Gets the random generator for the utility methods. * * @return \Drupal\Component\Utility\Random * The random generator */ protected function getRandomGenerator() { if (!is_object($this->randomGenerator)) { $this->randomGenerator = new Random(); } return $this->randomGenerator; } /** * Converts a list of possible parameters into a stack of permutations. * * Takes a list of parameters containing possible values, and converts all of * them into a list of items containing every possible permutation. * * Example: * @code * $parameters = array( * 'one' => array(0, 1), * 'two' => array(2, 3), * ); * $permutations = TestBase::generatePermutations($parameters); * // Result: * $permutations == array( * array('one' => 0, 'two' => 2), * array('one' => 1, 'two' => 2), * array('one' => 0, 'two' => 3), * array('one' => 1, 'two' => 3), * ) * @endcode * * @param $parameters * An associative array of parameters, keyed by parameter name, and whose * values are arrays of parameter values. * * @return * A list of permutations, which is an array of arrays. Each inner array * contains the full list of parameters that have been passed, but with a * single value only. */ public static function generatePermutations($parameters) { $all_permutations = array(array()); foreach ($parameters as $parameter => $values) { $new_permutations = array(); // Iterate over all values of the parameter. foreach ($values as $value) { // Iterate over all existing permutations. foreach ($all_permutations as $permutation) { // Add the new parameter value to existing permutations. $new_permutations[] = $permutation + array($parameter => $value); } } // Replace the old permutations with the new permutations. $all_permutations = $new_permutations; } return $all_permutations; } /** * Ensures test files are deletable within file_unmanaged_delete_recursive(). * * Some tests chmod generated files to be read only. During * TestBase::restoreEnvironment() and other cleanup operations, these files * need to get deleted too. */ public static function filePreDeleteCallback($path) { chmod($path, 0700); } /** * Returns a ConfigImporter object to import test importing of configuration. * * @return \Drupal\Core\Config\ConfigImporter * The ConfigImporter object. */ public function configImporter() { if (!$this->configImporter) { // Set up the ConfigImporter object for testing. $storage_comparer = new StorageComparer( $this->container->get('config.storage.staging'), $this->container->get('config.storage'), $this->container->get('config.manager') ); $this->configImporter = new ConfigImporter( $storage_comparer, $this->container->get('event_dispatcher'), $this->container->get('config.manager'), $this->container->get('lock'), $this->container->get('config.typed'), $this->container->get('module_handler'), $this->container->get('module_installer'), $this->container->get('theme_handler'), $this->container->get('string_translation') ); } // Always recalculate the changelist when called. return $this->configImporter->reset(); } /** * Copies configuration objects from source storage to target storage. * * @param \Drupal\Core\Config\StorageInterface $source_storage * The source config storage service. * @param \Drupal\Core\Config\StorageInterface $target_storage * The target config storage service. */ public function copyConfig(StorageInterface $source_storage, StorageInterface $target_storage) { $target_storage->deleteAll(); foreach ($source_storage->listAll() as $name) { $target_storage->write($name, $source_storage->read($name)); } } }
Norrlandsit/d8
core/modules/simpletest/src/TestBase.php
PHP
gpl-2.0
54,530
/***************************************************************************** * ts.c: Transport Stream input module for VLC. ***************************************************************************** * Copyright (C) 2004-2005 VLC authors and VideoLAN * $Id$ * * Authors: Laurent Aimar <fenrir@via.ecp.fr> * Jean-Paul Saman <jpsaman #_at_# m2x.nl> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. *****************************************************************************/ /***************************************************************************** * Preamble *****************************************************************************/ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include <vlc_common.h> #include <vlc_plugin.h> #include <assert.h> #include <time.h> #include <vlc_access.h> /* DVB-specific things */ #include <vlc_demux.h> #include <vlc_meta.h> #include <vlc_epg.h> #include <vlc_charset.h> /* FromCharset, for EIT */ #include <vlc_bits.h> #include "../../mux/mpeg/csa.h" /* Include dvbpsi headers */ # include <dvbpsi/dvbpsi.h> # include <dvbpsi/demux.h> # include <dvbpsi/descriptor.h> # include <dvbpsi/pat.h> # include <dvbpsi/pmt.h> # include <dvbpsi/sdt.h> # include <dvbpsi/dr.h> # include <dvbpsi/psi.h> /* EIT support */ # include <dvbpsi/eit.h> /* TDT support */ # include <dvbpsi/tot.h> #include "../../mux/mpeg/dvbpsi_compat.h" #include "../../mux/mpeg/streams.h" #include "../../mux/mpeg/tsutil.h" #include "../../mux/mpeg/tables.h" #include "../../codec/opus_header.h" #include "../opus.h" #include "pes.h" #include "mpeg4_iod.h" #ifdef HAVE_ARIBB24 #include <aribb24/aribb24.h> #include <aribb24/decoder.h> #endif typedef enum arib_modes_e { ARIBMODE_AUTO = -1, ARIBMODE_DISABLED = 0, ARIBMODE_ENABLED = 1 } arib_modes_e; /***************************************************************************** * Module descriptor *****************************************************************************/ static int Open ( vlc_object_t * ); static void Close ( vlc_object_t * ); /* TODO * - Rename "extra pmt" to "user pmt" * - Update extra pmt description * pmt_pid[:pmt_number][=pid_description[,pid_description]] * where pid_description could take 3 forms: * 1. pid:pcr (to force the pcr pid) * 2. pid:stream_type * 3. pid:type=fourcc where type=(video|audio|spu) */ #define PMT_TEXT N_("Extra PMT") #define PMT_LONGTEXT N_( \ "Allows a user to specify an extra pmt (pmt_pid=pid:stream_type[,...])." ) #define PID_TEXT N_("Set id of ES to PID") #define PID_LONGTEXT N_("Set the internal ID of each elementary stream" \ " handled by VLC to the same value as the PID in" \ " the TS stream, instead of 1, 2, 3, etc. Useful to" \ " do \'#duplicate{..., select=\"es=<pid>\"}\'.") #define CSA_TEXT N_("CSA Key") #define CSA_LONGTEXT N_("CSA encryption key. This must be a " \ "16 char string (8 hexadecimal bytes).") #define CSA2_TEXT N_("Second CSA Key") #define CSA2_LONGTEXT N_("The even CSA encryption key. This must be a " \ "16 char string (8 hexadecimal bytes).") #define CPKT_TEXT N_("Packet size in bytes to decrypt") #define CPKT_LONGTEXT N_("Specify the size of the TS packet to decrypt. " \ "The decryption routines subtract the TS-header from the value before " \ "decrypting. " ) #define SPLIT_ES_TEXT N_("Separate sub-streams") #define SPLIT_ES_LONGTEXT N_( \ "Separate teletex/dvbs pages into independent ES. " \ "It can be useful to turn off this option when using stream output." ) #define SEEK_PERCENT_TEXT N_("Seek based on percent not time") #define SEEK_PERCENT_LONGTEXT N_( \ "Seek and position based on a percent byte position, not a PCR generated " \ "time position. If seeking doesn't work property, turn on this option." ) #define PCR_TEXT N_("Trust in-stream PCR") #define PCR_LONGTEXT N_("Use the stream PCR as a reference.") static const int const arib_mode_list[] = { ARIBMODE_AUTO, ARIBMODE_ENABLED, ARIBMODE_DISABLED }; static const char *const arib_mode_list_text[] = { N_("Auto"), N_("Enabled"), N_("Disabled") }; #define SUPPORT_ARIB_TEXT N_("ARIB STD-B24 mode") #define SUPPORT_ARIB_LONGTEXT N_( \ "Forces ARIB STD-B24 mode for decoding characters." \ "This feature affects EPG information and subtitles." ) vlc_module_begin () set_description( N_("MPEG Transport Stream demuxer") ) set_shortname ( "MPEG-TS" ) set_category( CAT_INPUT ) set_subcategory( SUBCAT_INPUT_DEMUX ) add_string( "ts-extra-pmt", NULL, PMT_TEXT, PMT_LONGTEXT, true ) add_bool( "ts-trust-pcr", true, PCR_TEXT, PCR_LONGTEXT, true ) change_safe() add_bool( "ts-es-id-pid", true, PID_TEXT, PID_LONGTEXT, true ) change_safe() add_obsolete_string( "ts-out" ) /* since 2.2.0 */ add_obsolete_integer( "ts-out-mtu" ) /* since 2.2.0 */ add_string( "ts-csa-ck", NULL, CSA_TEXT, CSA_LONGTEXT, true ) change_safe() add_string( "ts-csa2-ck", NULL, CSA2_TEXT, CSA2_LONGTEXT, true ) change_safe() add_integer( "ts-csa-pkt", 188, CPKT_TEXT, CPKT_LONGTEXT, true ) change_safe() add_bool( "ts-split-es", true, SPLIT_ES_TEXT, SPLIT_ES_LONGTEXT, false ) add_bool( "ts-seek-percent", false, SEEK_PERCENT_TEXT, SEEK_PERCENT_LONGTEXT, true ) add_integer( "ts-arib", ARIBMODE_AUTO, SUPPORT_ARIB_TEXT, SUPPORT_ARIB_LONGTEXT, false ) change_integer_list( arib_mode_list, arib_mode_list_text ) add_obsolete_bool( "ts-silent" ); set_capability( "demux", 10 ) set_callbacks( Open, Close ) add_shortcut( "ts" ) vlc_module_end () /***************************************************************************** * Local prototypes *****************************************************************************/ static const char *const ppsz_teletext_type[] = { "", N_("Teletext"), N_("Teletext subtitles"), N_("Teletext: additional information"), N_("Teletext: program schedule"), N_("Teletext subtitles: hearing impaired") }; typedef struct ts_pid_t ts_pid_t; typedef struct { int i_version; int i_ts_id; dvbpsi_t *handle; DECL_ARRAY(ts_pid_t *) programs; } ts_pat_t; typedef struct { dvbpsi_t *handle; int i_version; int i_number; int i_pid_pcr; /* IOD stuff (mpeg4) */ od_descriptor_t *iod; od_descriptors_t od; DECL_ARRAY(ts_pid_t *) e_streams; struct { mtime_t i_current; mtime_t i_first; // seen <> != -1 /* broken PCR handling */ mtime_t i_first_dts; mtime_t i_pcroffset; bool b_disable; /* ignore PCR field, use dts */ bool b_fix_done; } pcr; mtime_t i_last_dts; } ts_pmt_t; typedef struct { es_format_t fmt; es_out_id_t *id; uint16_t i_sl_es_id; } ts_pes_es_t; typedef enum { TS_ES_DATA_PES, TS_ES_DATA_TABLE_SECTION } ts_es_data_type_t; typedef struct { ts_pes_es_t es; /* Some private streams encapsulate several ES (eg. DVB subtitles)*/ DECL_ARRAY( ts_pes_es_t * ) extra_es; uint8_t i_stream_type; ts_es_data_type_t data_type; int i_data_size; int i_data_gathered; block_t *p_data; block_t **pp_last; block_t * p_prepcr_outqueue; /* SL AU */ struct { block_t *p_data; block_t **pp_last; } sl; } ts_pes_t; typedef struct { /* for special PAT/SDT case */ dvbpsi_t *handle; /* PAT/SDT/EIT */ int i_version; } ts_psi_t; typedef enum { TS_PMT_REGISTRATION_NONE = 0, TS_PMT_REGISTRATION_HDMV } ts_pmt_registration_type_t; typedef enum { TYPE_FREE = 0, TYPE_PAT, TYPE_PMT, TYPE_PES, TYPE_SDT, TYPE_TDT, TYPE_EIT, } ts_pid_type_t; enum { FLAGS_NONE = 0, FLAG_SEEN = 1, FLAG_SCRAMBLED = 2, FLAG_FILTERED = 4 }; #define SEEN(x) ((x)->i_flags & FLAG_SEEN) #define SCRAMBLED(x) ((x).i_flags & FLAG_SCRAMBLED) struct ts_pid_t { uint16_t i_pid; uint8_t i_flags; uint8_t i_cc; /* countinuity counter */ uint8_t type; /* PSI owner (ie PMT -> PAT, ES -> PMT */ uint8_t i_refcount; ts_pid_t *p_parent; /* */ union { ts_pat_t *p_pat; ts_pmt_t *p_pmt; ts_pes_t *p_pes; ts_psi_t *p_psi; } u; struct { vlc_fourcc_t i_fourcc; int i_type; int i_pcr_count; } probed; }; typedef struct { int i_service; } vdr_info_t; #define MIN_ES_PID 4 /* Should be 32.. broken muxers */ #define MAX_ES_PID 8190 #define MIN_PAT_INTERVAL CLOCK_FREQ // DVB is 500ms #define PID_ALLOC_CHUNK 16 struct demux_sys_t { stream_t *stream; bool b_canseek; bool b_canfastseek; vlc_mutex_t csa_lock; /* TS packet size (188, 192, 204) */ unsigned i_packet_size; /* Additional TS packet header size (BluRay TS packets have 4-byte header before sync byte) */ unsigned i_packet_header_size; /* how many TS packet we read at once */ unsigned i_ts_read; bool b_force_seek_per_percent; struct { arib_modes_e e_mode; #ifdef HAVE_ARIBB24 arib_instance_t *p_instance; #endif stream_t *b25stream; } arib; /* All pid */ struct { ts_pid_t pat; ts_pid_t dummy; /* all non commons ones, dynamically allocated */ ts_pid_t **pp_all; int i_all; int i_all_alloc; /* last recently used */ uint16_t i_last_pid; ts_pid_t *p_last; } pids; bool b_user_pmt; int i_pmt_es; bool b_es_all; /* If we need to return all es/programs */ enum { NO_ES, /* for preparse */ DELAY_ES, CREATE_ES } es_creation; #define PREPARSING p_sys->es_creation == NO_ES /* */ bool b_es_id_pid; uint16_t i_next_extraid; csa_t *csa; int i_csa_pkt_size; bool b_split_es; bool b_trust_pcr; /* */ bool b_access_control; bool b_end_preparse; /* */ bool b_dvb_meta; int64_t i_tdt_delta; int64_t i_dvb_start; int64_t i_dvb_length; bool b_broken_charset; /* True if broken encoding is used in EPG/SDT */ /* Selected programs */ DECL_ARRAY( int ) programs; /* List of selected/access-filtered programs */ bool b_default_selection; /* True if set by default to first pmt seen (to get data from filtered access) */ struct { mtime_t i_first_dts; /* first dts encountered for the stream */ int i_timesourcepid; /* which pid we saved the dts from */ bool b_pat_deadline; /* set if we haven't seen PAT within MIN_PAT_INTERVAL */ } patfix; vdr_info_t vdr; /* */ bool b_start_record; }; static int Demux ( demux_t *p_demux ); static int Control( demux_t *p_demux, int i_query, va_list args ); static void PIDFillFormat( es_format_t *fmt, int i_stream_type, ts_es_data_type_t * ); static bool PIDSetup( demux_t *p_demux, ts_pid_type_t i_type, ts_pid_t *pid, ts_pid_t *p_parent ); static void PIDRelease( demux_t *p_demux, ts_pid_t *pid ); static void PATCallBack( void*, dvbpsi_pat_t * ); static void PMTCallBack( void *data, dvbpsi_pmt_t *p_pmt ); static void PSINewTableCallBack( dvbpsi_t *handle, uint8_t i_table_id, uint16_t i_extension, demux_t * ); static int ChangeKeyCallback( vlc_object_t *, char const *, vlc_value_t, vlc_value_t, void * ); /* Structs */ static ts_pat_t *ts_pat_New( demux_t * ); static void ts_pat_Del( demux_t *, ts_pat_t * ); static ts_pmt_t *ts_pmt_New( demux_t * ); static void ts_pmt_Del( demux_t *, ts_pmt_t * ); static ts_pes_t *ts_pes_New( demux_t * ); static void ts_pes_Del( demux_t *, ts_pes_t * ); static ts_psi_t *ts_psi_New( demux_t * ); static void ts_psi_Del( demux_t *, ts_psi_t * ); /* Helpers */ static ts_pid_t *GetPID( demux_sys_t *, uint16_t i_pid ); static ts_pmt_t * GetProgramByID( demux_sys_t *, int i_program ); static bool ProgramIsSelected( demux_sys_t *, uint16_t i_pgrm ); static void UpdatePESFilters( demux_t *p_demux, bool b_all ); static inline void FlushESBuffer( ts_pes_t *p_pes ); static void UpdateScrambledState( demux_t *p_demux, ts_pid_t *p_pid, bool ); static inline int PIDGet( block_t *p ) { return ( (p->p_buffer[1]&0x1f)<<8 )|p->p_buffer[2]; } static bool GatherData( demux_t *p_demux, ts_pid_t *pid, block_t *p_bk ); static void AddAndCreateES( demux_t *p_demux, ts_pid_t *pid, bool ); static void ProgramSetPCR( demux_t *p_demux, ts_pmt_t *p_prg, mtime_t i_pcr ); static block_t* ReadTSPacket( demux_t *p_demux ); static int ProbeStart( demux_t *p_demux, int i_program ); static int ProbeEnd( demux_t *p_demux, int i_program ); static int SeekToTime( demux_t *p_demux, ts_pmt_t *, int64_t time ); static void ReadyQueuesPostSeek( demux_t *p_demux ); static void PCRHandle( demux_t *p_demux, ts_pid_t *, block_t * ); static void PCRFixHandle( demux_t *, ts_pmt_t *, block_t * ); static int64_t TimeStampWrapAround( ts_pmt_t *, int64_t ); /* MPEG4 related */ static const es_mpeg4_descriptor_t * GetMPEG4DescByEsId( const ts_pmt_t *, uint16_t ); static ts_pes_es_t * GetPMTESBySLEsId( ts_pmt_t *, uint16_t ); static bool SetupISO14496LogicalStream( demux_t *, const decoder_config_descriptor_t *, es_format_t * ); #define TS_USER_PMT_NUMBER (0) static int UserPmt( demux_t *p_demux, const char * ); static int SetPIDFilter( demux_sys_t *, ts_pid_t *, bool b_selected ); #define TS_PACKET_SIZE_188 188 #define TS_PACKET_SIZE_192 192 #define TS_PACKET_SIZE_204 204 #define TS_PACKET_SIZE_MAX 204 #define TS_HEADER_SIZE 4 static int DetectPacketSize( demux_t *p_demux, unsigned *pi_header_size, int i_offset ) { const uint8_t *p_peek; if( stream_Peek( p_demux->s, &p_peek, i_offset + TS_PACKET_SIZE_MAX ) < i_offset + TS_PACKET_SIZE_MAX ) return -1; for( int i_sync = 0; i_sync < TS_PACKET_SIZE_MAX; i_sync++ ) { if( p_peek[i_offset + i_sync] != 0x47 ) continue; /* Check next 3 sync bytes */ int i_peek = i_offset + TS_PACKET_SIZE_MAX * 3 + i_sync + 1; if( ( stream_Peek( p_demux->s, &p_peek, i_peek ) ) < i_peek ) { msg_Err( p_demux, "cannot peek" ); return -1; } if( p_peek[i_offset + i_sync + 1 * TS_PACKET_SIZE_188] == 0x47 && p_peek[i_offset + i_sync + 2 * TS_PACKET_SIZE_188] == 0x47 && p_peek[i_offset + i_sync + 3 * TS_PACKET_SIZE_188] == 0x47 ) { return TS_PACKET_SIZE_188; } else if( p_peek[i_offset + i_sync + 1 * TS_PACKET_SIZE_192] == 0x47 && p_peek[i_offset + i_sync + 2 * TS_PACKET_SIZE_192] == 0x47 && p_peek[i_offset + i_sync + 3 * TS_PACKET_SIZE_192] == 0x47 ) { if( i_sync == 4 ) { *pi_header_size = 4; /* BluRay TS packets have 4-byte header */ } return TS_PACKET_SIZE_192; } else if( p_peek[i_offset + i_sync + 1 * TS_PACKET_SIZE_204] == 0x47 && p_peek[i_offset + i_sync + 2 * TS_PACKET_SIZE_204] == 0x47 && p_peek[i_offset + i_sync + 3 * TS_PACKET_SIZE_204] == 0x47 ) { return TS_PACKET_SIZE_204; } } if( p_demux->b_force ) { msg_Warn( p_demux, "this does not look like a TS stream, continuing" ); return TS_PACKET_SIZE_188; } msg_Dbg( p_demux, "TS module discarded (lost sync)" ); return -1; } #define TOPFIELD_HEADER_SIZE 3712 static int DetectPVRHeadersAndHeaderSize( demux_t *p_demux, unsigned *pi_header_size, vdr_info_t *p_vdr ) { const uint8_t *p_peek; *pi_header_size = 0; int i_packet_size = -1; if( stream_Peek( p_demux->s, &p_peek, TS_PACKET_SIZE_MAX ) < TS_PACKET_SIZE_MAX ) return -1; if( memcmp( p_peek, "TFrc", 4 ) == 0 && p_peek[6] == 0 && memcmp( &p_peek[53], "\x80\x00\x00", 4 ) == 0 && stream_Peek( p_demux->s, &p_peek, TOPFIELD_HEADER_SIZE + TS_PACKET_SIZE_MAX ) == TOPFIELD_HEADER_SIZE + TS_PACKET_SIZE_MAX ) { i_packet_size = DetectPacketSize( p_demux, pi_header_size, TOPFIELD_HEADER_SIZE ); if( i_packet_size != -1 ) { msg_Dbg( p_demux, "this is a topfield file" ); #if 0 /* I used the TF5000PVR 2004 Firmware .doc header documentation, * http://www.i-topfield.com/data/product/firmware/Structure%20of%20Recorded%20File%20in%20TF5000PVR%20(Feb%2021%202004).doc * but after the filename the offsets seem to be incorrect. - DJ */ int i_duration, i_name; char *psz_name = xmalloc(25); char *psz_event_name; char *psz_event_text = xmalloc(130); char *psz_ext_text = xmalloc(1025); // 2 bytes version Uimsbf (4,5) // 2 bytes reserved (6,7) // 2 bytes duration in minutes Uimsbf (8,9( i_duration = (int) (p_peek[8] << 8) | p_peek[9]; msg_Dbg( p_demux, "Topfield recording length: +/- %d minutes", i_duration); // 2 bytes service number in channel list (10, 11) // 2 bytes service type Bslbf 0=TV 1=Radio Bslb (12, 13) // 4 bytes of reserved + tuner info (14,15,16,17) // 2 bytes of Service ID Bslbf (18,19) // 2 bytes of PMT PID Uimsbf (20,21) // 2 bytes of PCR PID Uimsbf (22,23) // 2 bytes of Video PID Uimsbf (24,25) // 2 bytes of Audio PID Uimsbf (26,27) // 24 bytes filename Bslbf memcpy( psz_name, &p_peek[28], 24 ); psz_name[24] = '\0'; msg_Dbg( p_demux, "recordingname=%s", psz_name ); // 1 byte of sat index Uimsbf (52) // 3 bytes (1 bit of polarity Bslbf +23 bits reserved) // 4 bytes of freq. Uimsbf (56,57,58,59) // 2 bytes of symbol rate Uimsbf (60,61) // 2 bytes of TS stream ID Uimsbf (62,63) // 4 bytes reserved // 2 bytes reserved // 2 bytes duration Uimsbf (70,71) //i_duration = (int) (p_peek[70] << 8) | p_peek[71]; //msg_Dbg( p_demux, "Topfield 2nd duration field: +/- %d minutes", i_duration); // 4 bytes EventID Uimsbf (72-75) // 8 bytes of Start and End time info (76-83) // 1 byte reserved (84) // 1 byte event name length Uimsbf (89) i_name = (int)(p_peek[89]&~0x81); msg_Dbg( p_demux, "event name length = %d", i_name); psz_event_name = xmalloc( i_name+1 ); // 1 byte parental rating (90) // 129 bytes of event text memcpy( psz_event_name, &p_peek[91], i_name ); psz_event_name[i_name] = '\0'; memcpy( psz_event_text, &p_peek[91+i_name], 129-i_name ); psz_event_text[129-i_name] = '\0'; msg_Dbg( p_demux, "event name=%s", psz_event_name ); msg_Dbg( p_demux, "event text=%s", psz_event_text ); // 12 bytes reserved (220) // 6 bytes reserved // 2 bytes Event Text Length Uimsbf // 4 bytes EventID Uimsbf // FIXME We just have 613 bytes. not enough for this entire text // 1024 bytes Extended Event Text Bslbf memcpy( psz_ext_text, p_peek+372, 1024 ); psz_ext_text[1024] = '\0'; msg_Dbg( p_demux, "extended event text=%s", psz_ext_text ); // 52 bytes reserved Bslbf #endif p_vdr->i_service = GetWBE(&p_peek[18]); return i_packet_size; //return TS_PACKET_SIZE_188; } } return DetectPacketSize( p_demux, pi_header_size, 0 ); } static void ProbePES( demux_t *p_demux, ts_pid_t *pid, const uint8_t *p_pesstart, size_t i_data, bool b_adaptfield ) { demux_sys_t *p_sys = p_demux->p_sys; const uint8_t *p_pes = p_pesstart; pid->probed.i_type = -1; if( b_adaptfield ) { if ( i_data < 2 ) return; uint8_t len = *p_pes; p_pes++; i_data--; if(len == 0) { p_pes++; i_data--;/* stuffing */ } else { if( i_data < len ) return; if( len >= 7 && (p_pes[1] & 0x10) ) pid->probed.i_pcr_count++; p_pes += len; i_data -= len; } } if( i_data < 9 ) return; if( p_pes[0] != 0 || p_pes[1] != 0 || p_pes[2] != 1 ) return; size_t i_pesextoffset = 8; mtime_t i_dts = -1; if( p_pes[7] & 0x80 ) // PTS { i_pesextoffset += 5; if ( i_data < i_pesextoffset ) return; i_dts = ExtractPESTimestamp( &p_pes[9] ); } if( p_pes[7] & 0x40 ) // DTS { i_pesextoffset += 5; if ( i_data < i_pesextoffset ) return; i_dts = ExtractPESTimestamp( &p_pes[14] ); } if( p_pes[7] & 0x20 ) // ESCR i_pesextoffset += 6; if( p_pes[7] & 0x10 ) // ESrate i_pesextoffset += 3; if( p_pes[7] & 0x08 ) // DSM i_pesextoffset += 1; if( p_pes[7] & 0x04 ) // CopyInfo i_pesextoffset += 1; if( p_pes[7] & 0x02 ) // PESCRC i_pesextoffset += 2; if ( i_data < i_pesextoffset ) return; /* HeaderdataLength */ const size_t i_payloadoffset = 8 + 1 + p_pes[8]; i_pesextoffset += 1; if ( i_data < i_pesextoffset || i_data < i_payloadoffset ) return; i_data -= 8 + 1 + p_pes[8]; if( p_pes[7] & 0x01 ) // PESExt { size_t i_extension2_offset = 1; if ( p_pes[i_pesextoffset] & 0x80 ) // private data i_extension2_offset += 16; if ( p_pes[i_pesextoffset] & 0x40 ) // pack i_extension2_offset += 1; if ( p_pes[i_pesextoffset] & 0x20 ) // seq i_extension2_offset += 2; if ( p_pes[i_pesextoffset] & 0x10 ) // P-STD i_extension2_offset += 2; if ( p_pes[i_pesextoffset] & 0x01 ) // Extension 2 { uint8_t i_len = p_pes[i_pesextoffset + i_extension2_offset] & 0x7F; i_extension2_offset += i_len; } if( i_data < i_extension2_offset ) return; i_data -= i_extension2_offset; } /* (i_payloadoffset - i_pesextoffset) 0xFF stuffing */ if ( i_data < 4 ) return; const uint8_t *p_data = &p_pes[i_payloadoffset]; /* NON MPEG audio & subpictures STREAM */ if(p_pes[3] == 0xBD) { if( !memcmp( p_data, "\x7F\xFE\x80\x01", 4 ) ) { pid->probed.i_type = 0x06; pid->probed.i_fourcc = VLC_CODEC_DTS; } else if( !memcmp( p_data, "\x0B\x77", 2 ) ) { pid->probed.i_type = 0x06; pid->probed.i_fourcc = VLC_CODEC_EAC3; } } /* MPEG AUDIO STREAM */ else if(p_pes[3] >= 0xC0 && p_pes[3] <= 0xDF) { if( p_data[0] == 0xFF && (p_data[1] & 0xE0) == 0xE0 ) { switch(p_data[1] & 18) { /* 10 - MPEG Version 2 (ISO/IEC 13818-3) 11 - MPEG Version 1 (ISO/IEC 11172-3) */ case 0x10: pid->probed.i_type = 0x04; break; case 0x18: pid->probed.i_type = 0x03; default: break; } switch(p_data[1] & 6) { /* 01 - Layer III 10 - Layer II 11 - Layer I */ case 0x06: pid->probed.i_type = 0x04; pid->probed.i_fourcc = VLC_CODEC_MPGA; break; case 0x04: pid->probed.i_type = 0x04; pid->probed.i_fourcc = VLC_CODEC_MP2; break; case 0x02: pid->probed.i_type = 0x04; pid->probed.i_fourcc = VLC_CODEC_MP3; default: break; } } } /* VIDEO STREAM */ else if( p_pes[3] >= 0xE0 && p_pes[3] <= 0xEF ) { if( !memcmp( p_data, "\x00\x00\x00\x01", 4 ) ) { pid->probed.i_type = 0x1b; pid->probed.i_fourcc = VLC_CODEC_H264; } else if( !memcmp( p_data, "\x00\x00\x01", 4 ) ) { pid->probed.i_type = 0x02; pid->probed.i_fourcc = VLC_CODEC_MPGV; } } /* Track timestamps and flag missing PAT */ if( !p_sys->patfix.i_timesourcepid && i_dts > -1 ) { p_sys->patfix.i_first_dts = i_dts; p_sys->patfix.i_timesourcepid = pid->i_pid; } else if( p_sys->patfix.i_timesourcepid == pid->i_pid && i_dts > -1 && !p_sys->patfix.b_pat_deadline ) { if( i_dts - p_sys->patfix.i_first_dts > TO_SCALE(MIN_PAT_INTERVAL) ) p_sys->patfix.b_pat_deadline = true; } } static void BuildPATCallback( void *p_opaque, block_t *p_block ) { ts_pid_t *pat_pid = (ts_pid_t *) p_opaque; dvbpsi_packet_push( pat_pid->u.p_pat->handle, p_block->p_buffer ); } static void BuildPMTCallback( void *p_opaque, block_t *p_block ) { ts_pid_t *program_pid = (ts_pid_t *) p_opaque; assert(program_pid->type == TYPE_PMT); while( p_block ) { dvbpsi_packet_push( program_pid->u.p_pmt->handle, p_block->p_buffer ); p_block = p_block->p_next; } } static void MissingPATPMTFixup( demux_t *p_demux ) { demux_sys_t *p_sys = p_demux->p_sys; int i_program_number = 1234; int i_program_pid = 1337; int i_pcr_pid = 0x1FFF; int i_num_pes = 0; ts_pid_t *p_program_pid = GetPID( p_sys, i_program_pid ); if( SEEN(p_program_pid) ) { /* Find a free one */ for( i_program_pid = MIN_ES_PID; i_program_pid <= MAX_ES_PID && SEEN(p_program_pid); i_program_pid++ ) { p_program_pid = GetPID( p_sys, i_program_pid ); } } for( int i=0; i<p_sys->pids.i_all; i++ ) { const ts_pid_t *p_pid = p_sys->pids.pp_all[i]; if( !SEEN(p_pid) || p_pid->probed.i_type == -1 ) continue; if( i_pcr_pid == 0x1FFF && ( p_pid->probed.i_type == 0x03 || p_pid->probed.i_pcr_count ) ) i_pcr_pid = p_pid->i_pid; i_num_pes++; } if( i_num_pes == 0 ) return; ts_stream_t patstream = { .i_pid = 0, .i_continuity_counter = 0x10, .b_discontinuity = false }; ts_stream_t pmtprogramstream = { .i_pid = i_program_pid, .i_continuity_counter = 0x0, .b_discontinuity = false }; BuildPAT( GetPID(p_sys, 0)->u.p_pat->handle, &p_sys->pids.pat, BuildPATCallback, 0, 1, &patstream, 1, &pmtprogramstream, &i_program_number ); /* PAT callback should have been triggered */ if( p_program_pid->type != TYPE_PMT ) { msg_Err( p_demux, "PAT creation failed" ); return; } struct esstreams_t { pes_stream_t pes; ts_stream_t ts; }; es_format_t esfmt = {0}; struct esstreams_t *esstreams = calloc( i_num_pes, sizeof(struct esstreams_t) ); pes_mapped_stream_t *mapped = calloc( i_num_pes, sizeof(pes_mapped_stream_t) ); if( esstreams && mapped ) { int j=0; for( int i=0; i<p_sys->pids.i_all; i++ ) { const ts_pid_t *p_pid = p_sys->pids.pp_all[i]; if( !SEEN(p_pid) || p_pid->probed.i_type == -1 ) continue; esstreams[j].pes.i_codec = p_pid->probed.i_fourcc; esstreams[j].pes.i_stream_type = p_pid->probed.i_type; esstreams[j].ts.i_pid = p_pid->i_pid; mapped[j].pes = &esstreams[j].pes; mapped[j].ts = &esstreams[j].ts; mapped[j].fmt = &esfmt; j++; } BuildPMT( GetPID(p_sys, 0)->u.p_pat->handle, VLC_OBJECT(p_demux), p_program_pid, BuildPMTCallback, 0, 1, i_pcr_pid, NULL, 1, &pmtprogramstream, &i_program_number, i_num_pes, mapped ); } free(esstreams); free(mapped); } /***************************************************************************** * Open *****************************************************************************/ static int Open( vlc_object_t *p_this ) { demux_t *p_demux = (demux_t*)p_this; demux_sys_t *p_sys; int i_packet_size; unsigned i_packet_header_size = 0; ts_pid_t *patpid; vdr_info_t vdr = {0}; /* Search first sync byte */ i_packet_size = DetectPVRHeadersAndHeaderSize( p_demux, &i_packet_header_size, &vdr ); if( i_packet_size < 0 ) return VLC_EGENERIC; p_demux->p_sys = p_sys = malloc( sizeof( demux_sys_t ) ); if( !p_sys ) return VLC_ENOMEM; memset( p_sys, 0, sizeof( demux_sys_t ) ); vlc_mutex_init( &p_sys->csa_lock ); p_demux->pf_demux = Demux; p_demux->pf_control = Control; /* Init p_sys field */ p_sys->b_dvb_meta = true; p_sys->b_access_control = true; p_sys->b_end_preparse = false; ARRAY_INIT( p_sys->programs ); p_sys->b_default_selection = false; p_sys->i_tdt_delta = 0; p_sys->i_dvb_start = 0; p_sys->i_dvb_length = 0; p_sys->vdr = vdr; p_sys->arib.b25stream = NULL; p_sys->stream = p_demux->s; p_sys->b_broken_charset = false; p_sys->pids.dummy.i_pid = 8191; p_sys->pids.dummy.i_flags = FLAG_SEEN; p_sys->i_packet_size = i_packet_size; p_sys->i_packet_header_size = i_packet_header_size; p_sys->i_ts_read = 50; p_sys->csa = NULL; p_sys->b_start_record = false; # define VLC_DVBPSI_DEMUX_TABLE_INIT(table,obj) \ do { \ if( !dvbpsi_AttachDemux( (table)->u.p_psi->handle, (dvbpsi_demux_new_cb_t)PSINewTableCallBack, (obj) ) ) \ { \ msg_Warn( obj, "Can't dvbpsi_AttachDemux on pid %d", (table)->i_pid );\ } \ } while (0) /* Init PAT handler */ patpid = GetPID(p_sys, 0); if ( !PIDSetup( p_demux, TYPE_PAT, patpid, NULL ) ) { vlc_mutex_destroy( &p_sys->csa_lock ); free( p_sys ); return VLC_ENOMEM; } if( !dvbpsi_pat_attach( patpid->u.p_pat->handle, PATCallBack, p_demux ) ) { PIDRelease( p_demux, patpid ); vlc_mutex_destroy( &p_sys->csa_lock ); free( p_sys ); return VLC_EGENERIC; } if( p_sys->b_dvb_meta ) { if( !PIDSetup( p_demux, TYPE_SDT, GetPID(p_sys, 0x11), NULL ) || !PIDSetup( p_demux, TYPE_EIT, GetPID(p_sys, 0x12), NULL ) || !PIDSetup( p_demux, TYPE_TDT, GetPID(p_sys, 0x14), NULL ) ) { PIDRelease( p_demux, GetPID(p_sys, 0x11) ); PIDRelease( p_demux, GetPID(p_sys, 0x12) ); PIDRelease( p_demux, GetPID(p_sys, 0x14) ); p_sys->b_dvb_meta = false; } else { VLC_DVBPSI_DEMUX_TABLE_INIT(GetPID(p_sys, 0x11), p_demux); VLC_DVBPSI_DEMUX_TABLE_INIT(GetPID(p_sys, 0x12), p_demux); VLC_DVBPSI_DEMUX_TABLE_INIT(GetPID(p_sys, 0x14), p_demux); if( p_sys->b_access_control && ( SetPIDFilter( p_sys, GetPID(p_sys, 0x11), true ) || SetPIDFilter( p_sys, GetPID(p_sys, 0x14), true ) || SetPIDFilter( p_sys, GetPID(p_sys, 0x12), true ) ) ) p_sys->b_access_control = false; } } # undef VLC_DVBPSI_DEMUX_TABLE_INIT p_sys->i_pmt_es = 0; p_sys->b_es_all = false; /* Read config */ p_sys->b_es_id_pid = var_CreateGetBool( p_demux, "ts-es-id-pid" ); p_sys->i_next_extraid = 1; p_sys->b_trust_pcr = var_CreateGetBool( p_demux, "ts-trust-pcr" ); /* We handle description of an extra PMT */ char* psz_string = var_CreateGetString( p_demux, "ts-extra-pmt" ); p_sys->b_user_pmt = false; if( psz_string && *psz_string ) UserPmt( p_demux, psz_string ); free( psz_string ); psz_string = var_CreateGetStringCommand( p_demux, "ts-csa-ck" ); if( psz_string && *psz_string ) { int i_res; char* psz_csa2; p_sys->csa = csa_New(); psz_csa2 = var_CreateGetStringCommand( p_demux, "ts-csa2-ck" ); i_res = csa_SetCW( (vlc_object_t*)p_demux, p_sys->csa, psz_string, true ); if( i_res == VLC_SUCCESS && psz_csa2 && *psz_csa2 ) { if( csa_SetCW( (vlc_object_t*)p_demux, p_sys->csa, psz_csa2, false ) != VLC_SUCCESS ) { csa_SetCW( (vlc_object_t*)p_demux, p_sys->csa, psz_string, false ); } } else if ( i_res == VLC_SUCCESS ) { csa_SetCW( (vlc_object_t*)p_demux, p_sys->csa, psz_string, false ); } else { csa_Delete( p_sys->csa ); p_sys->csa = NULL; } if( p_sys->csa ) { var_AddCallback( p_demux, "ts-csa-ck", ChangeKeyCallback, (void *)1 ); var_AddCallback( p_demux, "ts-csa2-ck", ChangeKeyCallback, NULL ); int i_pkt = var_CreateGetInteger( p_demux, "ts-csa-pkt" ); if( i_pkt < 4 || i_pkt > 188 ) { msg_Err( p_demux, "wrong packet size %d specified.", i_pkt ); msg_Warn( p_demux, "using default packet size of 188 bytes" ); p_sys->i_csa_pkt_size = 188; } else p_sys->i_csa_pkt_size = i_pkt; msg_Dbg( p_demux, "decrypting %d bytes of packet", p_sys->i_csa_pkt_size ); } free( psz_csa2 ); } free( psz_string ); p_sys->b_split_es = var_InheritBool( p_demux, "ts-split-es" ); p_sys->b_canseek = false; p_sys->b_canfastseek = false; p_sys->b_force_seek_per_percent = var_InheritBool( p_demux, "ts-seek-percent" ); p_sys->arib.e_mode = var_InheritInteger( p_demux, "ts-arib" ); stream_Control( p_sys->stream, STREAM_CAN_SEEK, &p_sys->b_canseek ); stream_Control( p_sys->stream, STREAM_CAN_FASTSEEK, &p_sys->b_canfastseek ); /* Preparse time */ if( p_sys->b_canseek ) { p_sys->es_creation = NO_ES; while( !p_sys->i_pmt_es && !p_sys->b_end_preparse ) if( Demux( p_demux ) != VLC_DEMUXER_SUCCESS ) break; p_sys->es_creation = DELAY_ES; } else p_sys->es_creation = ( p_sys->b_access_control ? CREATE_ES : DELAY_ES ); return VLC_SUCCESS; } /***************************************************************************** * Close *****************************************************************************/ static void Close( vlc_object_t *p_this ) { demux_t *p_demux = (demux_t*)p_this; demux_sys_t *p_sys = p_demux->p_sys; PIDRelease( p_demux, GetPID(p_sys, 0) ); if( p_sys->b_dvb_meta ) { PIDRelease( p_demux, GetPID(p_sys, 0x11) ); PIDRelease( p_demux, GetPID(p_sys, 0x12) ); PIDRelease( p_demux, GetPID(p_sys, 0x14) ); } vlc_mutex_lock( &p_sys->csa_lock ); if( p_sys->csa ) { var_DelCallback( p_demux, "ts-csa-ck", ChangeKeyCallback, NULL ); var_DelCallback( p_demux, "ts-csa2-ck", ChangeKeyCallback, NULL ); csa_Delete( p_sys->csa ); } vlc_mutex_unlock( &p_sys->csa_lock ); ARRAY_RESET( p_sys->programs ); #ifdef HAVE_ARIBB24 if ( p_sys->arib.p_instance ) arib_instance_destroy( p_sys->arib.p_instance ); #endif if ( p_sys->arib.b25stream ) { p_sys->arib.b25stream->p_source = NULL; /* don't chain kill demuxer's source */ stream_Delete( p_sys->arib.b25stream ); } vlc_mutex_destroy( &p_sys->csa_lock ); /* Release all non default pids */ for( int i = 0; i < p_sys->pids.i_all; i++ ) { ts_pid_t *pid = p_sys->pids.pp_all[i]; #ifndef NDEBUG if( pid->type != TYPE_FREE ) msg_Err( p_demux, "PID %d type %d not freed", pid->i_pid, pid->type ); #endif free( pid ); } free( p_sys->pids.pp_all ); free( p_sys ); } /***************************************************************************** * ChangeKeyCallback: called when changing the odd encryption key on the fly. *****************************************************************************/ static int ChangeKeyCallback( vlc_object_t *p_this, char const *psz_cmd, vlc_value_t oldval, vlc_value_t newval, void *p_data ) { VLC_UNUSED(psz_cmd); VLC_UNUSED(oldval); demux_t *p_demux = (demux_t*)p_this; demux_sys_t *p_sys = p_demux->p_sys; int i_tmp = (intptr_t)p_data; vlc_mutex_lock( &p_sys->csa_lock ); if ( i_tmp ) i_tmp = csa_SetCW( p_this, p_sys->csa, newval.psz_string, true ); else i_tmp = csa_SetCW( p_this, p_sys->csa, newval.psz_string, false ); vlc_mutex_unlock( &p_sys->csa_lock ); return i_tmp; } /***************************************************************************** * Demux: *****************************************************************************/ static int Demux( demux_t *p_demux ) { demux_sys_t *p_sys = p_demux->p_sys; bool b_wait_es = p_sys->i_pmt_es <= 0; /* If we had no PAT within MIN_PAT_INTERVAL, create PAT/PMT from probed streams */ if( p_sys->i_pmt_es == 0 && !SEEN(GetPID(p_sys, 0)) && p_sys->patfix.b_pat_deadline ) MissingPATPMTFixup( p_demux ); /* We read at most 100 TS packet or until a frame is completed */ for( unsigned i_pkt = 0; i_pkt < p_sys->i_ts_read; i_pkt++ ) { bool b_frame = false; block_t *p_pkt; if( !(p_pkt = ReadTSPacket( p_demux )) ) { return VLC_DEMUXER_EOF; } if( p_sys->b_start_record ) { /* Enable recording once synchronized */ stream_Control( p_sys->stream, STREAM_SET_RECORD_STATE, true, "ts" ); p_sys->b_start_record = false; } /* Parse the TS packet */ ts_pid_t *p_pid = GetPID( p_sys, PIDGet( p_pkt ) ); if( (p_pkt->p_buffer[1] & 0x40) && (p_pkt->p_buffer[3] & 0x10) && !SCRAMBLED(*p_pid) != !(p_pkt->p_buffer[3] & 0x80) ) { UpdateScrambledState( p_demux, p_pid, p_pkt->p_buffer[3] & 0x80 ); } if( !SEEN(p_pid) ) { if( p_pid->type == TYPE_FREE ) msg_Dbg( p_demux, "pid[%d] unknown", p_pid->i_pid ); p_pid->i_flags |= FLAG_SEEN; } if ( SCRAMBLED(*p_pid) && !p_demux->p_sys->csa ) { PCRHandle( p_demux, p_pid, p_pkt ); block_Release( p_pkt ); continue; } /* Probe streams to build PAT/PMT after MIN_PAT_INTERVAL in case we don't see any PAT */ if( !SEEN( GetPID( p_sys, 0 ) ) && (p_pid->probed.i_type == 0 || p_pid->i_pid == p_sys->patfix.i_timesourcepid) && (p_pkt->p_buffer[1] & 0xC0) == 0x40 && /* Payload start but not corrupt */ (p_pkt->p_buffer[3] & 0xD0) == 0x10 ) /* Has payload but is not encrypted */ { ProbePES( p_demux, p_pid, p_pkt->p_buffer + TS_HEADER_SIZE, p_pkt->i_buffer - TS_HEADER_SIZE, p_pkt->p_buffer[3] & 0x20 /* Adaptation field */); } switch( p_pid->type ) { case TYPE_PAT: dvbpsi_packet_push( p_pid->u.p_pat->handle, p_pkt->p_buffer ); block_Release( p_pkt ); break; case TYPE_PMT: dvbpsi_packet_push( p_pid->u.p_pmt->handle, p_pkt->p_buffer ); block_Release( p_pkt ); break; case TYPE_PES: p_sys->b_end_preparse = true; if( p_sys->es_creation == DELAY_ES ) /* No longer delay ES since that pid's program sends data */ { msg_Dbg( p_demux, "Creating delayed ES" ); AddAndCreateES( p_demux, p_pid, true ); } if( !p_sys->b_access_control && !(p_pid->i_flags & FLAG_FILTERED) ) { /* That packet is for an unselected ES, don't waste time/memory gathering its data */ block_Release( p_pkt ); continue; } b_frame = GatherData( p_demux, p_pid, p_pkt ); break; case TYPE_SDT: case TYPE_TDT: case TYPE_EIT: if( p_sys->b_dvb_meta ) dvbpsi_packet_push( p_pid->u.p_psi->handle, p_pkt->p_buffer ); block_Release( p_pkt ); break; default: /* We have to handle PCR if present */ PCRHandle( p_demux, p_pid, p_pkt ); block_Release( p_pkt ); break; } if( b_frame || ( b_wait_es && p_sys->i_pmt_es > 0 ) ) break; } demux_UpdateTitleFromStream( p_demux ); return VLC_DEMUXER_SUCCESS; } /***************************************************************************** * Control: *****************************************************************************/ static int DVBEventInformation( demux_t *p_demux, int64_t *pi_time, int64_t *pi_length ) { demux_sys_t *p_sys = p_demux->p_sys; if( pi_length ) *pi_length = 0; if( pi_time ) *pi_time = 0; if( p_sys->i_dvb_length > 0 ) { const int64_t t = mdate() + p_sys->i_tdt_delta; if( p_sys->i_dvb_start <= t && t < p_sys->i_dvb_start + p_sys->i_dvb_length ) { if( pi_length ) *pi_length = p_sys->i_dvb_length; if( pi_time ) *pi_time = t - p_sys->i_dvb_start; return VLC_SUCCESS; } } return VLC_EGENERIC; } static void UpdatePESFilters( demux_t *p_demux, bool b_all ) { demux_sys_t *p_sys = p_demux->p_sys; ts_pat_t *p_pat = GetPID(p_sys, 0)->u.p_pat; for( int i=0; i< p_pat->programs.i_size; i++ ) { ts_pmt_t *p_pmt = p_pat->programs.p_elems[i]->u.p_pmt; bool b_program_selected; if( (p_sys->b_default_selection && !p_sys->b_access_control) || b_all ) b_program_selected = true; else b_program_selected = ProgramIsSelected( p_sys, p_pmt->i_number ); SetPIDFilter( p_sys, p_pat->programs.p_elems[i], b_program_selected ); for( int j=0; j<p_pmt->e_streams.i_size; j++ ) { ts_pid_t *espid = p_pmt->e_streams.p_elems[j]; bool b_stream_selected = b_program_selected; if( b_program_selected && !b_all && espid->u.p_pes->es.id ) { es_out_Control( p_demux->out, ES_OUT_GET_ES_STATE, espid->u.p_pes->es.id, &b_stream_selected ); for( int k=0; !b_stream_selected && k< espid->u.p_pes->extra_es.i_size; k++ ) { if( espid->u.p_pes->extra_es.p_elems[k]->id ) es_out_Control( p_demux->out, ES_OUT_GET_ES_STATE, espid->u.p_pes->extra_es.p_elems[k]->id, &b_stream_selected ); } } if( espid->u.p_pes->es.fmt.i_cat == UNKNOWN_ES ) { if( espid->u.p_pes->i_stream_type == 0x13 ) /* Object channel */ b_stream_selected = true; else if( !p_sys->b_es_all ) b_stream_selected = false; } if( b_stream_selected ) msg_Dbg( p_demux, "enabling pid %d from program %d", espid->i_pid, p_pmt->i_number ); SetPIDFilter( p_sys, espid, b_stream_selected ); if( !b_stream_selected ) FlushESBuffer( espid->u.p_pes ); } /* Select pcr last in case it is handled by unselected ES */ if( p_pmt->i_pid_pcr > 0 ) { SetPIDFilter( p_sys, GetPID(p_sys, p_pmt->i_pid_pcr), b_program_selected ); if( b_program_selected ) msg_Dbg( p_demux, "enabling pcr pid %d from program %d", p_pmt->i_pid_pcr, p_pmt->i_number ); } } } static int Control( demux_t *p_demux, int i_query, va_list args ) { demux_sys_t *p_sys = p_demux->p_sys; double f, *pf; bool b_bool, *pb_bool; int64_t i64; int64_t *pi64; int i_int; ts_pmt_t *p_pmt; int i_first_program = ( p_sys->programs.i_size ) ? p_sys->programs.p_elems[0] : 0; if( PREPARSING || !i_first_program || p_sys->b_default_selection ) { if( likely(GetPID(p_sys, 0)->type == TYPE_PAT) ) { ts_pat_t *p_pat = GetPID(p_sys, 0)->u.p_pat; /* Set default program for preparse time (no program has been selected) */ for( int i = 0; i < p_pat->programs.i_size; i++ ) { assert(p_pat->programs.p_elems[i]->type == TYPE_PMT); p_pmt = p_pat->programs.p_elems[i]->u.p_pmt; if( ( p_pmt->pcr.i_first > -1 || p_pmt->pcr.i_first_dts > VLC_TS_INVALID ) && p_pmt->i_last_dts > 0 ) { i_first_program = p_pmt->i_number; break; } } } } switch( i_query ) { case DEMUX_GET_POSITION: pf = (double*) va_arg( args, double* ); /* Access control test is because EPG for recordings is not relevant */ if( p_sys->b_dvb_meta && p_sys->b_access_control ) { int64_t i_time, i_length; if( !DVBEventInformation( p_demux, &i_time, &i_length ) && i_length > 0 ) { *pf = (double)i_time/(double)i_length; return VLC_SUCCESS; } } if( (p_pmt = GetProgramByID( p_sys, i_first_program )) && p_pmt->pcr.i_first > -1 && p_pmt->i_last_dts > VLC_TS_INVALID && p_pmt->pcr.i_current > -1 ) { double i_length = TimeStampWrapAround( p_pmt, p_pmt->i_last_dts ) - p_pmt->pcr.i_first; i_length += p_pmt->pcr.i_pcroffset; double i_pos = TimeStampWrapAround( p_pmt, p_pmt->pcr.i_current ) - p_pmt->pcr.i_first; *pf = i_pos / i_length; return VLC_SUCCESS; } if( (i64 = stream_Size( p_sys->stream) ) > 0 ) { int64_t offset = stream_Tell( p_sys->stream ); *pf = (double)offset / (double)i64; return VLC_SUCCESS; } break; case DEMUX_SET_POSITION: f = (double) va_arg( args, double ); if(!p_sys->b_canseek) break; if( p_sys->b_dvb_meta && p_sys->b_access_control && !p_sys->b_force_seek_per_percent && (p_pmt = GetProgramByID( p_sys, i_first_program )) ) { int64_t i_time, i_length; if( !DVBEventInformation( p_demux, &i_time, &i_length ) && i_length > 0 && !SeekToTime( p_demux, p_pmt, TO_SCALE(i_length) * f ) ) { ReadyQueuesPostSeek( p_demux ); es_out_Control( p_demux->out, ES_OUT_SET_NEXT_DISPLAY_TIME, TO_SCALE(i_length) * f ); return VLC_SUCCESS; } } if( !p_sys->b_force_seek_per_percent && (p_pmt = GetProgramByID( p_sys, i_first_program )) && p_pmt->pcr.i_first > -1 && p_pmt->i_last_dts > VLC_TS_INVALID && p_pmt->pcr.i_current > -1 ) { double i_length = TimeStampWrapAround( p_pmt, p_pmt->i_last_dts ) - p_pmt->pcr.i_first; if( !SeekToTime( p_demux, p_pmt, p_pmt->pcr.i_first + i_length * f ) ) { ReadyQueuesPostSeek( p_demux ); es_out_Control( p_demux->out, ES_OUT_SET_NEXT_DISPLAY_TIME, FROM_SCALE(p_pmt->pcr.i_first + i_length * f) ); return VLC_SUCCESS; } } i64 = stream_Size( p_sys->stream ); if( i64 > 0 && stream_Seek( p_sys->stream, (int64_t)(i64 * f) ) == VLC_SUCCESS ) { ReadyQueuesPostSeek( p_demux ); return VLC_SUCCESS; } break; case DEMUX_SET_TIME: i64 = (int64_t)va_arg( args, int64_t ); if( p_sys->b_canseek && (p_pmt = GetProgramByID( p_sys, i_first_program )) && p_pmt->pcr.i_first > -1 && !SeekToTime( p_demux, p_pmt, p_pmt->pcr.i_first + TO_SCALE(i64) ) ) { ReadyQueuesPostSeek( p_demux ); es_out_Control( p_demux->out, ES_OUT_SET_NEXT_DISPLAY_TIME, FROM_SCALE(p_pmt->pcr.i_first) + i64 - VLC_TS_0 ); return VLC_SUCCESS; } break; case DEMUX_GET_TIME: pi64 = (int64_t*)va_arg( args, int64_t * ); if( p_sys->b_dvb_meta && p_sys->b_access_control ) { if( !DVBEventInformation( p_demux, pi64, NULL ) ) return VLC_SUCCESS; } if( (p_pmt = GetProgramByID( p_sys, i_first_program )) && p_pmt->pcr.i_current > -1 && p_pmt->pcr.i_first > -1 ) { int64_t i_pcr = TimeStampWrapAround( p_pmt, p_pmt->pcr.i_current ); *pi64 = FROM_SCALE(i_pcr - p_pmt->pcr.i_first); return VLC_SUCCESS; } break; case DEMUX_GET_LENGTH: pi64 = (int64_t*)va_arg( args, int64_t * ); if( p_sys->b_dvb_meta && p_sys->b_access_control ) { if( !DVBEventInformation( p_demux, NULL, pi64 ) ) return VLC_SUCCESS; } if( (p_pmt = GetProgramByID( p_sys, i_first_program )) && ( p_pmt->pcr.i_first > -1 || p_pmt->pcr.i_first_dts > VLC_TS_INVALID ) && p_pmt->i_last_dts > 0 ) { int64_t i_start = (p_pmt->pcr.i_first > -1) ? p_pmt->pcr.i_first : TO_SCALE(p_pmt->pcr.i_first_dts); int64_t i_last = TimeStampWrapAround( p_pmt, p_pmt->i_last_dts ); i_last += p_pmt->pcr.i_pcroffset; *pi64 = FROM_SCALE(i_last - i_start); return VLC_SUCCESS; } break; case DEMUX_SET_GROUP: { vlc_list_t *p_list; i_int = (int)va_arg( args, int ); p_list = (vlc_list_t *)va_arg( args, vlc_list_t * ); msg_Dbg( p_demux, "DEMUX_SET_GROUP %d %p", i_int, p_list ); if( i_int != 0 ) /* If not default program */ { /* Deselect/filter current ones */ if( i_int != -1 ) { p_sys->b_es_all = false; ARRAY_APPEND( p_sys->programs, i_int ); UpdatePESFilters( p_demux, false ); } else if( likely( p_list != NULL ) ) { p_sys->b_es_all = false; for( int i = 0; i < p_list->i_count; i++ ) ARRAY_APPEND( p_sys->programs, p_list->p_values[i].i_int ); UpdatePESFilters( p_demux, false ); } else // All ES Mode { p_sys->b_es_all = true; ts_pat_t *p_pat = GetPID(p_sys, 0)->u.p_pat; for( int i = 0; i < p_pat->programs.i_size; i++ ) ARRAY_APPEND( p_sys->programs, p_pat->programs.p_elems[i]->i_pid ); UpdatePESFilters( p_demux, true ); } p_sys->b_default_selection = false; } return VLC_SUCCESS; } case DEMUX_SET_ES: { i_int = (int)va_arg( args, int ); msg_Dbg( p_demux, "DEMUX_SET_ES %d", i_int ); if( !p_sys->b_es_all ) /* Won't change anything */ UpdatePESFilters( p_demux, false ); return VLC_SUCCESS; } case DEMUX_GET_TITLE_INFO: { struct input_title_t ***v = va_arg( args, struct input_title_t*** ); int *c = va_arg( args, int * ); *va_arg( args, int* ) = 0; /* Title offset */ *va_arg( args, int* ) = 0; /* Chapter offset */ return stream_Control( p_sys->stream, STREAM_GET_TITLE_INFO, v, c ); } case DEMUX_SET_TITLE: return stream_vaControl( p_sys->stream, STREAM_SET_TITLE, args ); case DEMUX_SET_SEEKPOINT: return stream_vaControl( p_sys->stream, STREAM_SET_SEEKPOINT, args ); case DEMUX_GET_META: return stream_vaControl( p_sys->stream, STREAM_GET_META, args ); case DEMUX_CAN_RECORD: pb_bool = (bool*)va_arg( args, bool * ); *pb_bool = true; return VLC_SUCCESS; case DEMUX_SET_RECORD_STATE: b_bool = (bool)va_arg( args, int ); if( !b_bool ) stream_Control( p_sys->stream, STREAM_SET_RECORD_STATE, false ); p_sys->b_start_record = b_bool; return VLC_SUCCESS; case DEMUX_GET_SIGNAL: return stream_vaControl( p_sys->stream, STREAM_GET_SIGNAL, args ); default: break; } return VLC_EGENERIC; } /***************************************************************************** * *****************************************************************************/ static int UserPmt( demux_t *p_demux, const char *psz_fmt ) { demux_sys_t *p_sys = p_demux->p_sys; char *psz_dup = strdup( psz_fmt ); char *psz = psz_dup; int i_pid; int i_number; if( !psz_dup ) return VLC_ENOMEM; /* Parse PID */ i_pid = strtol( psz, &psz, 0 ); if( i_pid < 2 || i_pid >= 8192 ) goto error; /* Parse optional program number */ i_number = 0; if( *psz == ':' ) i_number = strtol( &psz[1], &psz, 0 ); /* */ ts_pid_t *pmtpid = GetPID(p_sys, i_pid); msg_Dbg( p_demux, "user pmt specified (pid=%d,number=%d)", i_pid, i_number ); if ( !PIDSetup( p_demux, TYPE_PMT, pmtpid, GetPID(p_sys, 0) ) ) goto error; /* Dummy PMT */ ts_pmt_t *p_pmt = pmtpid->u.p_pmt; p_pmt->i_number = i_number != 0 ? i_number : TS_USER_PMT_NUMBER; if( !dvbpsi_pmt_attach( p_pmt->handle, ((i_number != TS_USER_PMT_NUMBER ? i_number : 1)), PMTCallBack, p_demux ) ) { PIDRelease( p_demux, pmtpid ); goto error; } ARRAY_APPEND( GetPID(p_sys, 0)->u.p_pat->programs, pmtpid ); psz = strchr( psz, '=' ); if( psz ) psz++; while( psz && *psz ) { char *psz_next = strchr( psz, ',' ); int i_pid; if( psz_next ) *psz_next++ = '\0'; i_pid = strtol( psz, &psz, 0 ); if( *psz != ':' || i_pid < 2 || i_pid >= 8192 ) goto next; char *psz_opt = &psz[1]; if( !strcmp( psz_opt, "pcr" ) ) { p_pmt->i_pid_pcr = i_pid; } else if( GetPID(p_sys, i_pid)->type == TYPE_FREE ) { ts_pid_t *pid = GetPID(p_sys, i_pid); char *psz_arg = strchr( psz_opt, '=' ); if( psz_arg ) *psz_arg++ = '\0'; if ( !PIDSetup( p_demux, TYPE_PES, pid, pmtpid ) ) continue; ARRAY_APPEND( p_pmt->e_streams, pid ); if( p_pmt->i_pid_pcr <= 0 ) p_pmt->i_pid_pcr = i_pid; es_format_t *fmt = &pid->u.p_pes->es.fmt; if( psz_arg && strlen( psz_arg ) == 4 ) { const vlc_fourcc_t i_codec = VLC_FOURCC( psz_arg[0], psz_arg[1], psz_arg[2], psz_arg[3] ); int i_cat = UNKNOWN_ES; if( !strcmp( psz_opt, "video" ) ) i_cat = VIDEO_ES; else if( !strcmp( psz_opt, "audio" ) ) i_cat = AUDIO_ES; else if( !strcmp( psz_opt, "spu" ) ) i_cat = SPU_ES; es_format_Init( fmt, i_cat, i_codec ); fmt->b_packetized = false; } else { const int i_stream_type = strtol( psz_opt, NULL, 0 ); PIDFillFormat( fmt, i_stream_type, &pid->u.p_pes->data_type ); } fmt->i_group = i_number; if( p_sys->b_es_id_pid ) fmt->i_id = i_pid; if( fmt->i_cat != UNKNOWN_ES ) { msg_Dbg( p_demux, " * es pid=%d fcc=%4.4s", i_pid, (char*)&fmt->i_codec ); pid->u.p_pes->es.id = es_out_Add( p_demux->out, fmt ); p_sys->i_pmt_es++; } } next: psz = psz_next; } p_sys->b_user_pmt = true; free( psz_dup ); return VLC_SUCCESS; error: free( psz_dup ); return VLC_EGENERIC; } static int SetPIDFilter( demux_sys_t *p_sys, ts_pid_t *p_pid, bool b_selected ) { if( b_selected ) p_pid->i_flags |= FLAG_FILTERED; else p_pid->i_flags &= ~FLAG_FILTERED; if( !p_sys->b_access_control ) return VLC_EGENERIC; return stream_Control( p_sys->stream, STREAM_SET_PRIVATE_ID_STATE, p_pid->i_pid, b_selected ); } static void PIDReset( ts_pid_t *pid ) { assert(pid->i_refcount == 0); pid->i_cc = 0xff; pid->i_flags &= ~FLAG_SCRAMBLED; pid->p_parent = NULL; pid->type = TYPE_FREE; } static bool PIDSetup( demux_t *p_demux, ts_pid_type_t i_type, ts_pid_t *pid, ts_pid_t *p_parent ) { if( pid == p_parent || pid->i_pid == 0x1FFF ) return false; if( pid->i_refcount == 0 ) { assert( pid->type == TYPE_FREE ); switch( i_type ) { case TYPE_FREE: /* nonsense ?*/ PIDReset( pid ); return true; case TYPE_PAT: PIDReset( pid ); pid->u.p_pat = ts_pat_New( p_demux ); if( !pid->u.p_pat ) return false; break; case TYPE_PMT: PIDReset( pid ); pid->u.p_pmt = ts_pmt_New( p_demux ); if( !pid->u.p_pmt ) return false; break; case TYPE_PES: PIDReset( pid ); pid->u.p_pes = ts_pes_New( p_demux ); if( !pid->u.p_pes ) return false; break; case TYPE_SDT: case TYPE_TDT: case TYPE_EIT: PIDReset( pid ); pid->u.p_psi = ts_psi_New( p_demux ); if( !pid->u.p_psi ) return false; break; default: assert(false); break; } pid->i_refcount++; pid->type = i_type; pid->p_parent = p_parent; } else if( pid->type == i_type && pid->i_refcount < UINT8_MAX ) { pid->i_refcount++; } else { if( pid->type != TYPE_FREE ) msg_Warn( p_demux, "Tried to redeclare pid %d with another type", pid->i_pid ); return false; } return true; } static void PIDRelease( demux_t *p_demux, ts_pid_t *pid ) { if( pid->i_refcount == 0 ) { assert( pid->type == TYPE_FREE ); return; } else if( pid->i_refcount == 1 ) { pid->i_refcount--; } else if( pid->i_refcount > 1 ) { assert( pid->type != TYPE_FREE && pid->type != TYPE_PAT ); pid->i_refcount--; } if( pid->i_refcount == 0 ) { switch( pid->type ) { default: case TYPE_FREE: /* nonsense ?*/ assert( pid->type != TYPE_FREE ); break; case TYPE_PAT: ts_pat_Del( p_demux, pid->u.p_pat ); pid->u.p_pat = NULL; break; case TYPE_PMT: ts_pmt_Del( p_demux, pid->u.p_pmt ); pid->u.p_pmt = NULL; break; case TYPE_PES: ts_pes_Del( p_demux, pid->u.p_pes ); pid->u.p_pes = NULL; break; case TYPE_SDT: case TYPE_TDT: case TYPE_EIT: ts_psi_Del( p_demux, pid->u.p_psi ); pid->u.p_psi = NULL; break; } SetPIDFilter( p_demux->p_sys, pid, false ); PIDReset( pid ); } } static int16_t read_opus_flag(uint8_t **buf, size_t *len) { if (*len < 2) return -1; int16_t ret = ((*buf)[0] << 8) | (*buf)[1]; *len -= 2; *buf += 2; if (ret & (3<<13)) ret = -1; return ret; } static block_t *Opus_Parse(demux_t *demux, block_t *block) { block_t *out = NULL; block_t **last = NULL; uint8_t *buf = block->p_buffer; size_t len = block->i_buffer; while (len > 3 && ((buf[0] << 3) | (buf[1] >> 5)) == 0x3ff) { int16_t start_trim = 0, end_trim = 0; int start_trim_flag = (buf[1] >> 4) & 1; int end_trim_flag = (buf[1] >> 3) & 1; int control_extension_flag = (buf[1] >> 2) & 1; len -= 2; buf += 2; unsigned au_size = 0; while (len--) { int c = *buf++; au_size += c; if (c != 0xff) break; } if (start_trim_flag) { start_trim = read_opus_flag(&buf, &len); if (start_trim < 0) { msg_Err(demux, "Invalid start trimming flag"); } } if (end_trim_flag) { end_trim = read_opus_flag(&buf, &len); if (end_trim < 0) { msg_Err(demux, "Invalid end trimming flag"); } } if (control_extension_flag && len) { unsigned l = *buf++; len--; if (l > len) { msg_Err(demux, "Invalid control extension length %d > %zu", l, len); break; } buf += l; len -= l; } if (!au_size || au_size > len) { msg_Err(demux, "Invalid Opus AU size %d (PES %zu)", au_size, len); break; } block_t *au = block_Alloc(au_size); if (!au) break; memcpy(au->p_buffer, buf, au_size); block_CopyProperties(au, block); au->p_next = NULL; if (!out) out = au; else *last = au; last = &au->p_next; au->i_nb_samples = opus_frame_duration(buf, au_size); if (end_trim && (uint16_t) end_trim <= au->i_nb_samples) au->i_length = end_trim; /* Blatant abuse of the i_length field. */ else au->i_length = 0; if (start_trim && start_trim < (au->i_nb_samples - au->i_length)) { au->i_nb_samples -= start_trim; if (au->i_nb_samples == 0) au->i_flags |= BLOCK_FLAG_PREROLL; } buf += au_size; len -= au_size; } block_Release(block); return out; } /**************************************************************************** * gathering stuff ****************************************************************************/ static void ParsePES( demux_t *p_demux, ts_pid_t *pid, block_t *p_pes ) { uint8_t header[34]; unsigned i_pes_size = 0; unsigned i_skip = 0; mtime_t i_dts = -1; mtime_t i_pts = -1; mtime_t i_length = 0; uint8_t i_stream_id; const es_mpeg4_descriptor_t *p_mpeg4desc = NULL; assert(pid->type == TYPE_PES); assert(pid->p_parent && pid->p_parent->type == TYPE_PMT); const int i_max = block_ChainExtract( p_pes, header, 34 ); if ( i_max < 4 ) { block_ChainRelease( p_pes ); return; } if( SCRAMBLED(*pid) || header[0] != 0 || header[1] != 0 || header[2] != 1 ) { if ( !SCRAMBLED(*pid) ) msg_Warn( p_demux, "invalid header [0x%02x:%02x:%02x:%02x] (pid: %d)", header[0], header[1],header[2],header[3], pid->i_pid ); block_ChainRelease( p_pes ); return; } if( ParsePESHeader( VLC_OBJECT(p_demux), (uint8_t*)&header, i_max, &i_skip, &i_dts, &i_pts, &i_stream_id ) == VLC_EGENERIC ) { block_ChainRelease( p_pes ); return; } else { if( i_pts != -1 ) i_pts = TimeStampWrapAround( pid->p_parent->u.p_pmt, i_pts ); if( i_dts != -1 ) i_dts = TimeStampWrapAround( pid->p_parent->u.p_pmt, i_dts ); } if( pid->u.p_pes->es.i_sl_es_id ) p_mpeg4desc = GetMPEG4DescByEsId( pid->p_parent->u.p_pmt, pid->u.p_pes->es.i_sl_es_id ); if( pid->u.p_pes->es.fmt.i_codec == VLC_FOURCC( 'a', '5', '2', 'b' ) || pid->u.p_pes->es.fmt.i_codec == VLC_FOURCC( 'd', 't', 's', 'b' ) ) { i_skip += 4; } else if( pid->u.p_pes->es.fmt.i_codec == VLC_FOURCC( 'l', 'p', 'c', 'b' ) || pid->u.p_pes->es.fmt.i_codec == VLC_FOURCC( 's', 'p', 'u', 'b' ) || pid->u.p_pes->es.fmt.i_codec == VLC_FOURCC( 's', 'd', 'd', 'b' ) ) { i_skip += 1; } else if( pid->u.p_pes->es.fmt.i_codec == VLC_CODEC_SUBT && p_mpeg4desc ) { const decoder_config_descriptor_t *dcd = &p_mpeg4desc->dec_descr; if( dcd->i_extra > 2 && dcd->p_extra[0] == 0x10 && ( dcd->p_extra[1]&0x10 ) ) { /* display length */ if( p_pes->i_buffer + 2 <= i_skip ) i_length = GetWBE( &p_pes->p_buffer[i_skip] ); i_skip += 2; } if( p_pes->i_buffer + 2 <= i_skip ) i_pes_size = GetWBE( &p_pes->p_buffer[i_skip] ); /* */ i_skip += 2; } /* skip header */ while( p_pes && i_skip > 0 ) { if( p_pes->i_buffer <= i_skip ) { block_t *p_next = p_pes->p_next; i_skip -= p_pes->i_buffer; block_Release( p_pes ); p_pes = p_next; } else { p_pes->i_buffer -= i_skip; p_pes->p_buffer += i_skip; break; } } /* ISO/IEC 13818-1 2.7.5: if no pts and no dts, then dts == pts */ if( i_pts >= 0 && i_dts < 0 ) i_dts = i_pts; if( p_pes ) { block_t *p_block; if( i_dts >= 0 ) p_pes->i_dts = VLC_TS_0 + i_dts * 100 / 9; if( i_pts >= 0 ) p_pes->i_pts = VLC_TS_0 + i_pts * 100 / 9; p_pes->i_length = i_length * 100 / 9; p_block = block_ChainGather( p_pes ); if( pid->u.p_pes->es.fmt.i_codec == VLC_CODEC_SUBT ) { if( i_pes_size > 0 && p_block->i_buffer > i_pes_size ) { p_block->i_buffer = i_pes_size; } /* Append a \0 */ p_block = block_Realloc( p_block, 0, p_block->i_buffer + 1 ); if( !p_block ) return; p_block->p_buffer[p_block->i_buffer -1] = '\0'; } else if( pid->u.p_pes->es.fmt.i_codec == VLC_CODEC_TELETEXT ) { if( p_block->i_pts <= VLC_TS_INVALID && pid->p_parent ) { /* Teletext may have missing PTS (ETSI EN 300 472 Annexe A) * In this case use the last PCR + 40ms */ assert( pid->p_parent->type == TYPE_PMT ); if( likely(pid->p_parent->type == TYPE_PMT) ) { mtime_t i_pcr = pid->p_parent->u.p_pmt->pcr.i_current; if( i_pcr > VLC_TS_INVALID ) p_block->i_pts = VLC_TS_0 + i_pcr * 100 / 9 + 40000; } } } else if( pid->u.p_pes->es.fmt.i_codec == VLC_CODEC_ARIB_A || pid->u.p_pes->es.fmt.i_codec == VLC_CODEC_ARIB_C ) { if( p_block->i_pts <= VLC_TS_INVALID ) { if( i_pes_size > 0 && p_block->i_buffer > i_pes_size ) { p_block->i_buffer = i_pes_size; } /* Append a \0 */ p_block = block_Realloc( p_block, 0, p_block->i_buffer + 1 ); if( !p_block ) return; p_block->p_buffer[p_block->i_buffer -1] = '\0'; } } else if( pid->u.p_pes->es.fmt.i_codec == VLC_CODEC_OPUS) { p_block = Opus_Parse(p_demux, p_block); } if( !pid->p_parent || pid->p_parent->type != TYPE_PMT ) { block_Release( p_block ); return; } ts_pmt_t *p_pmt = pid->p_parent->u.p_pmt; while (p_block) { block_t *p_next = p_block->p_next; p_block->p_next = NULL; if( !p_pmt->pcr.b_fix_done ) /* Not seen yet */ PCRFixHandle( p_demux, p_pmt, p_block ); if( pid->u.p_pes->es.id && (p_pmt->pcr.i_current > -1 || p_pmt->pcr.b_disable) ) { if( pid->u.p_pes->p_prepcr_outqueue ) { block_ChainAppend( &pid->u.p_pes->p_prepcr_outqueue, p_block ); p_block = pid->u.p_pes->p_prepcr_outqueue; p_next = p_block->p_next; p_block->p_next = NULL; pid->u.p_pes->p_prepcr_outqueue = NULL; } if ( p_pmt->pcr.b_disable && p_block->i_dts > VLC_TS_INVALID && ( p_pmt->i_pid_pcr == pid->i_pid || p_pmt->i_pid_pcr == 0x1FFF ) ) { ProgramSetPCR( p_demux, p_pmt, (p_block->i_dts - VLC_TS_0) * 9 / 100 - 120000 ); } /* Compute PCR/DTS offset if any */ if( p_pmt->pcr.i_pcroffset == -1 && p_block->i_dts > VLC_TS_INVALID && p_pmt->pcr.i_current > VLC_TS_INVALID ) { int64_t i_dts27 = (p_block->i_dts - VLC_TS_0) * 9 / 100; int64_t i_pcr = TimeStampWrapAround( p_pmt, p_pmt->pcr.i_current ); if( i_dts27 < i_pcr ) { p_pmt->pcr.i_pcroffset = i_pcr - i_dts27 + 80000; msg_Warn( p_demux, "Broken stream: pid %d sends packets with dts %"PRId64 "us later than pcr, applying delay", pid->i_pid, p_pmt->pcr.i_pcroffset * 100 / 9 ); } else p_pmt->pcr.i_pcroffset = 0; } if( p_pmt->pcr.i_pcroffset != -1 ) { if( p_block->i_dts > VLC_TS_INVALID ) p_block->i_dts += (p_pmt->pcr.i_pcroffset * 100 / 9); if( p_block->i_pts > VLC_TS_INVALID ) p_block->i_pts += (p_pmt->pcr.i_pcroffset * 100 / 9); } /* SL in PES */ if( pid->u.p_pes->i_stream_type == 0x12 && ((i_stream_id & 0xFE) == 0xFA) /* 0xFA || 0xFB */ ) { const es_mpeg4_descriptor_t *p_desc = GetMPEG4DescByEsId( p_pmt, pid->u.p_pes->es.i_sl_es_id ); if(!p_desc) { block_Release( p_block ); p_block = NULL; } else { sl_header_data header = DecodeSLHeader( p_block->i_buffer, p_block->p_buffer, &p_mpeg4desc->sl_descr ); p_block->i_buffer -= header.i_size; p_block->p_buffer += header.i_size; p_block->i_dts = header.i_dts ? header.i_dts : p_block->i_dts; p_block->i_pts = header.i_pts ? header.i_pts : p_block->i_pts; /* Assemble access units */ if( header.b_au_start && pid->u.p_pes->sl.p_data ) { block_ChainRelease( pid->u.p_pes->sl.p_data ); pid->u.p_pes->sl.p_data = NULL; pid->u.p_pes->sl.pp_last = &pid->u.p_pes->sl.p_data; } block_ChainLastAppend( &pid->u.p_pes->sl.pp_last, p_block ); p_block = NULL; if( header.b_au_end ) { p_block = block_ChainGather( pid->u.p_pes->sl.p_data ); pid->u.p_pes->sl.p_data = NULL; pid->u.p_pes->sl.pp_last = &pid->u.p_pes->sl.p_data; } } } if ( p_block ) { for( int i = 0; i < pid->u.p_pes->extra_es.i_size; i++ ) { es_out_Send( p_demux->out, pid->u.p_pes->extra_es.p_elems[i]->id, block_Duplicate( p_block ) ); } es_out_Send( p_demux->out, pid->u.p_pes->es.id, p_block ); } } else { if( !p_pmt->pcr.b_fix_done ) /* Not seen yet */ PCRFixHandle( p_demux, p_pmt, p_block ); block_ChainAppend( &pid->u.p_pes->p_prepcr_outqueue, p_block ); } p_block = p_next; } } else { msg_Warn( p_demux, "empty pes" ); } } static void ParseTableSection( demux_t *p_demux, ts_pid_t *pid, block_t *p_data ) { block_t *p_content = block_ChainGather( p_data ); if( p_content->i_buffer <= 9 || pid->type != TYPE_PES ) { block_Release( p_content ); return; } const uint8_t i_table_id = p_content->p_buffer[0]; const uint8_t i_version = ( p_content->p_buffer[5] & 0x3F ) >> 1; ts_pmt_t *p_pmt = pid->p_parent->u.p_pmt; if ( pid->u.p_pes->i_stream_type == 0x82 && i_table_id == 0xC6 ) /* SCTE_27 */ { assert( pid->u.p_pes->es.fmt.i_codec == VLC_CODEC_SCTE_27 ); mtime_t i_date = p_pmt->pcr.i_current; /* We need to extract the truncated pts stored inside the payload */ int i_index = 0; size_t i_offset = 4; if( p_content->p_buffer[3] & 0x40 ) { i_index = ((p_content->p_buffer[7] & 0x0f) << 8) | p_content->p_buffer[8]; i_offset = 9; } if( i_index == 0 && p_content->i_buffer > i_offset + 8 ) { bool is_immediate = p_content->p_buffer[i_offset + 3] & 0x40; if( !is_immediate ) { mtime_t i_display_in = GetDWBE( &p_content->p_buffer[i_offset + 4] ); if( i_display_in < i_date ) i_date = i_display_in + (1ll << 32); else i_date = i_display_in; } } p_content->i_dts = p_content->i_pts = VLC_TS_0 + i_date * 100 / 9; PCRFixHandle( p_demux, p_pmt, p_content ); } /* Object stream SL in table sections */ else if( pid->u.p_pes->i_stream_type == 0x13 && i_table_id == 0x05 && pid->u.p_pes->es.i_sl_es_id && p_content->i_buffer > 12 ) { const es_mpeg4_descriptor_t *p_mpeg4desc = GetMPEG4DescByEsId( p_pmt, pid->u.p_pes->es.i_sl_es_id ); if( p_mpeg4desc && p_mpeg4desc->dec_descr.i_objectTypeIndication == 0x01 && p_mpeg4desc->dec_descr.i_streamType == 0x01 /* Object */ && p_pmt->od.i_version != i_version ) { const uint8_t *p_data = p_content->p_buffer; int i_data = p_content->i_buffer; /* Forward into section */ uint16_t len = ((p_content->p_buffer[1] & 0x0f) << 8) | p_content->p_buffer[2]; p_data += 8; i_data -= 8; // SL in table i_data = __MIN(i_data, len - 5); i_data -= 4; // CRC od_descriptors_t *p_ods = &p_pmt->od; sl_header_data header = DecodeSLHeader( i_data, p_data, &p_mpeg4desc->sl_descr ); DecodeODCommand( VLC_OBJECT(p_demux), p_ods, i_data - header.i_size, &p_data[header.i_size] ); bool b_changed = false; for( int i=0; i<p_ods->objects.i_size; i++ ) { od_descriptor_t *p_od = p_ods->objects.p_elems[i]; for( int j = 0; j < ES_DESCRIPTOR_COUNT && p_od->es_descr[j].b_ok; j++ ) { p_mpeg4desc = &p_od->es_descr[j]; ts_pes_es_t *p_es = GetPMTESBySLEsId( p_pmt, p_mpeg4desc->i_es_id ); es_format_t fmt; es_format_Init( &fmt, UNKNOWN_ES, 0 ); fmt.i_id = p_es->fmt.i_id; fmt.i_group = p_es->fmt.i_group; if ( p_mpeg4desc && p_mpeg4desc->b_ok && p_es && SetupISO14496LogicalStream( p_demux, &p_mpeg4desc->dec_descr, &fmt ) && !es_format_IsSimilar( &fmt, &p_es->fmt ) ) { es_format_Clean( &p_es->fmt ); p_es->fmt = fmt; es_out_Del( p_demux->out, p_es->id ); p_es->fmt.b_packetized = true; /* Split by access unit, no sync code */ FREENULL( p_es->fmt.psz_description ); p_es->id = es_out_Add( p_demux->out, &p_es->fmt ); b_changed = true; } } } if( b_changed ) UpdatePESFilters( p_demux, p_demux->p_sys->b_es_all ); p_ods->i_version = i_version; } block_Release( p_content ); return; } if( pid->u.p_pes->es.id ) es_out_Send( p_demux->out, pid->u.p_pes->es.id, p_content ); else block_Release( p_content ); } static void ParseData( demux_t *p_demux, ts_pid_t *pid ) { block_t *p_data = pid->u.p_pes->p_data; assert(p_data); if(!p_data) return; /* remove the pes from pid */ pid->u.p_pes->p_data = NULL; pid->u.p_pes->i_data_size = 0; pid->u.p_pes->i_data_gathered = 0; pid->u.p_pes->pp_last = &pid->u.p_pes->p_data; if( pid->u.p_pes->data_type == TS_ES_DATA_PES ) { ParsePES( p_demux, pid, p_data ); } else if( pid->u.p_pes->data_type == TS_ES_DATA_TABLE_SECTION ) { ParseTableSection( p_demux, pid, p_data ); } else { block_ChainRelease( p_data ); } } static block_t* ReadTSPacket( demux_t *p_demux ) { demux_sys_t *p_sys = p_demux->p_sys; block_t *p_pkt; /* Get a new TS packet */ if( !( p_pkt = stream_Block( p_sys->stream, p_sys->i_packet_size ) ) ) { if( stream_Tell( p_sys->stream ) == stream_Size( p_sys->stream ) ) msg_Dbg( p_demux, "EOF at %"PRId64, stream_Tell( p_sys->stream ) ); else msg_Dbg( p_demux, "Can't read TS packet at %"PRId64, stream_Tell(p_sys->stream) ); return NULL; } if( p_pkt->i_buffer < TS_HEADER_SIZE + p_sys->i_packet_header_size ) { block_Release( p_pkt ); return NULL; } /* Skip header (BluRay streams). * re-sync logic would do this (by adjusting packet start), but this would result in losing first and last ts packets. * First packet is usually PAT, and losing it means losing whole first GOP. This is fatal with still-image based menus. */ p_pkt->p_buffer += p_sys->i_packet_header_size; p_pkt->i_buffer -= p_sys->i_packet_header_size; /* Check sync byte and re-sync if needed */ if( p_pkt->p_buffer[0] != 0x47 ) { msg_Warn( p_demux, "lost synchro" ); block_Release( p_pkt ); for( ;; ) { const uint8_t *p_peek; int i_peek = 0; unsigned i_skip = 0; i_peek = stream_Peek( p_sys->stream, &p_peek, p_sys->i_packet_size * 10 ); if( i_peek < 0 || (unsigned)i_peek < p_sys->i_packet_size + 1 ) { msg_Dbg( p_demux, "eof ?" ); return NULL; } while( i_skip < i_peek - p_sys->i_packet_size ) { if( p_peek[i_skip + p_sys->i_packet_header_size] == 0x47 && p_peek[i_skip + p_sys->i_packet_header_size + p_sys->i_packet_size] == 0x47 ) { break; } i_skip++; } msg_Dbg( p_demux, "skipping %d bytes of garbage", i_skip ); stream_Read( p_sys->stream, NULL, i_skip ); if( i_skip < i_peek - p_sys->i_packet_size ) { break; } } if( !( p_pkt = stream_Block( p_sys->stream, p_sys->i_packet_size ) ) ) { msg_Dbg( p_demux, "eof ?" ); return NULL; } } return p_pkt; } static int64_t TimeStampWrapAround( ts_pmt_t *p_pmt, int64_t i_time ) { int64_t i_adjust = 0; if( p_pmt && p_pmt->pcr.i_first > 0x0FFFFFFFF && i_time < 0x0FFFFFFFF ) i_adjust = 0x1FFFFFFFF; return i_time + i_adjust; } static mtime_t GetPCR( block_t *p_pkt ) { const uint8_t *p = p_pkt->p_buffer; mtime_t i_pcr = -1; if( ( p[3]&0x20 ) && /* adaptation */ ( p[5]&0x10 ) && ( p[4] >= 7 ) ) { /* PCR is 33 bits */ i_pcr = ( (mtime_t)p[6] << 25 ) | ( (mtime_t)p[7] << 17 ) | ( (mtime_t)p[8] << 9 ) | ( (mtime_t)p[9] << 1 ) | ( (mtime_t)p[10] >> 7 ); } return i_pcr; } static void UpdateScrambledState( demux_t *p_demux, ts_pid_t *p_pid, bool b_scrambled ) { if( !SCRAMBLED(*p_pid) == !b_scrambled ) return; msg_Warn( p_demux, "scrambled state changed on pid %d (%d->%d)", p_pid->i_pid, !!SCRAMBLED(*p_pid), b_scrambled ); if( b_scrambled ) p_pid->i_flags |= FLAG_SCRAMBLED; else p_pid->i_flags &= ~FLAG_SCRAMBLED; if( p_pid->type == TYPE_PES && p_pid->u.p_pes->es.id ) { for( int i = 0; i < p_pid->u.p_pes->extra_es.i_size; i++ ) { if( p_pid->u.p_pes->extra_es.p_elems[i]->id ) es_out_Control( p_demux->out, ES_OUT_SET_ES_SCRAMBLED_STATE, p_pid->u.p_pes->extra_es.p_elems[i]->id, b_scrambled ); } es_out_Control( p_demux->out, ES_OUT_SET_ES_SCRAMBLED_STATE, p_pid->u.p_pes->es.id, b_scrambled ); } } static inline void FlushESBuffer( ts_pes_t *p_pes ) { if( p_pes->p_data ) { p_pes->i_data_gathered = p_pes->i_data_size = 0; block_ChainRelease( p_pes->p_data ); p_pes->p_data = NULL; p_pes->pp_last = &p_pes->p_data; } if( p_pes->sl.p_data ) { block_ChainRelease( p_pes->sl.p_data ); p_pes->sl.p_data = NULL; p_pes->sl.pp_last = &p_pes->sl.p_data; } } static void ReadyQueuesPostSeek( demux_t *p_demux ) { demux_sys_t *p_sys = p_demux->p_sys; ts_pat_t *p_pat = GetPID(p_sys, 0)->u.p_pat; for( int i=0; i< p_pat->programs.i_size; i++ ) { ts_pmt_t *p_pmt = p_pat->programs.p_elems[i]->u.p_pmt; for( int j=0; j<p_pmt->e_streams.i_size; j++ ) { ts_pid_t *pid = p_pmt->e_streams.p_elems[j]; if( pid->type != TYPE_PES ) continue; if( pid->u.p_pes->es.id ) { block_t *p_block = block_Alloc(1); if( p_block ) { p_block->i_buffer = 0; p_block->i_flags = BLOCK_FLAG_DISCONTINUITY | BLOCK_FLAG_CORRUPTED; es_out_Send( p_demux->out, pid->u.p_pes->es.id, p_block ); } } pid->i_cc = 0xff; if( pid->u.p_pes->p_prepcr_outqueue ) { block_ChainRelease( pid->u.p_pes->p_prepcr_outqueue ); pid->u.p_pes->p_prepcr_outqueue = NULL; } FlushESBuffer( pid->u.p_pes ); } p_pmt->pcr.i_current = -1; } } static int SeekToTime( demux_t *p_demux, ts_pmt_t *p_pmt, int64_t i_scaledtime ) { demux_sys_t *p_sys = p_demux->p_sys; /* Deal with common but worst binary search case */ if( p_pmt->pcr.i_first == i_scaledtime && p_sys->b_canseek ) return stream_Seek( p_sys->stream, 0 ); if( !p_sys->b_canfastseek ) return VLC_EGENERIC; int64_t i_initial_pos = stream_Tell( p_sys->stream ); /* Find the time position by using binary search algorithm. */ int64_t i_head_pos = 0; int64_t i_tail_pos = stream_Size( p_sys->stream ) - p_sys->i_packet_size; if( i_head_pos >= i_tail_pos ) return VLC_EGENERIC; bool b_found = false; while( (i_head_pos + p_sys->i_packet_size) <= i_tail_pos && !b_found ) { /* Round i_pos to a multiple of p_sys->i_packet_size */ int64_t i_splitpos = i_head_pos + (i_tail_pos - i_head_pos) / 2; int64_t i_div = i_splitpos % p_sys->i_packet_size; i_splitpos -= i_div; if ( stream_Seek( p_sys->stream, i_splitpos ) != VLC_SUCCESS ) break; int64_t i_pos = i_splitpos; while( i_pos > -1 && i_pos < i_tail_pos ) { int64_t i_pcr = -1; block_t *p_pkt = ReadTSPacket( p_demux ); if( !p_pkt ) { i_head_pos = i_tail_pos; break; } else i_pos = stream_Tell( p_sys->stream ); int i_pid = PIDGet( p_pkt ); if( i_pid != 0x1FFF && GetPID(p_sys, i_pid)->type == TYPE_PES && GetPID(p_sys, i_pid)->p_parent->u.p_pmt == p_pmt && (p_pkt->p_buffer[1] & 0xC0) == 0x40 && /* Payload start but not corrupt */ (p_pkt->p_buffer[3] & 0xD0) == 0x10 /* Has payload but is not encrypted */ ) { unsigned i_skip = 4; if ( p_pkt->p_buffer[3] & 0x20 ) // adaptation field { if( p_pkt->i_buffer >= 4 + 2 + 5 ) { i_pcr = GetPCR( p_pkt ); i_skip += 1 + p_pkt->p_buffer[4]; } } else { mtime_t i_dts = -1; mtime_t i_pts = -1; uint8_t i_stream_id; if ( VLC_SUCCESS == ParsePESHeader( VLC_OBJECT(p_demux), &p_pkt->p_buffer[i_skip], p_pkt->i_buffer - i_skip, &i_skip, &i_dts, &i_pts, &i_stream_id ) ) { if( i_dts > -1 ) i_pcr = i_dts; } } } block_Release( p_pkt ); if( i_pcr != -1 ) { int64_t i_diff = i_scaledtime - TimeStampWrapAround( p_pmt, i_pcr ); if ( i_diff < 0 ) i_tail_pos = i_splitpos - p_sys->i_packet_size; else if( i_diff < TO_SCALE(VLC_TS_0 + CLOCK_FREQ / 2) ) // 500ms b_found = true; else i_head_pos = i_pos; break; } } if ( !b_found && i_pos > i_tail_pos - p_sys->i_packet_size ) i_tail_pos = i_splitpos - p_sys->i_packet_size; } if( !b_found ) { msg_Dbg( p_demux, "Seek():cannot find a time position." ); stream_Seek( p_sys->stream, i_initial_pos ); return VLC_EGENERIC; } return VLC_SUCCESS; } static ts_pid_t *GetPID( demux_sys_t *p_sys, uint16_t i_pid ) { switch( i_pid ) { case 0: return &p_sys->pids.pat; case 0x1FFF: return &p_sys->pids.dummy; default: if( p_sys->pids.i_last_pid == i_pid ) return p_sys->pids.p_last; break; } for( int i=0; i < p_sys->pids.i_all; i++ ) { if( p_sys->pids.pp_all[i]->i_pid == i_pid ) { p_sys->pids.p_last = p_sys->pids.pp_all[i]; p_sys->pids.i_last_pid = i_pid; return p_sys->pids.p_last; } } if( p_sys->pids.i_all >= p_sys->pids.i_all_alloc ) { ts_pid_t **p_realloc = realloc( p_sys->pids.pp_all, (p_sys->pids.i_all_alloc + PID_ALLOC_CHUNK) * sizeof(ts_pid_t *) ); if( !p_realloc ) return NULL; p_sys->pids.pp_all = p_realloc; p_sys->pids.i_all_alloc += PID_ALLOC_CHUNK; } ts_pid_t *p_pid = calloc( 1, sizeof(*p_pid) ); if( !p_pid ) { abort(); //return NULL; } p_pid->i_pid = i_pid; p_sys->pids.pp_all[p_sys->pids.i_all++] = p_pid; p_sys->pids.p_last = p_pid; p_sys->pids.i_last_pid = i_pid; return p_pid; } static ts_pmt_t * GetProgramByID( demux_sys_t *p_sys, int i_program ) { if(unlikely(GetPID(p_sys, 0)->type != TYPE_PAT)) return NULL; ts_pat_t *p_pat = GetPID(p_sys, 0)->u.p_pat; for( int i = 0; i < p_pat->programs.i_size; i++ ) { assert(p_pat->programs.p_elems[i]->type == TYPE_PMT); ts_pmt_t *p_pmt = p_pat->programs.p_elems[i]->u.p_pmt; if( p_pmt->i_number == i_program ) return p_pmt; } return NULL; } #define PROBE_CHUNK_COUNT 250 static int ProbeChunk( demux_t *p_demux, int i_program, bool b_end, int64_t *pi_pcr, bool *pb_found ) { demux_sys_t *p_sys = p_demux->p_sys; int i_count = 0; block_t *p_pkt = NULL; for( ;; ) { *pi_pcr = -1; if( i_count++ > PROBE_CHUNK_COUNT || !( p_pkt = ReadTSPacket( p_demux ) ) ) { break; } const int i_pid = PIDGet( p_pkt ); ts_pid_t *p_pid = GetPID(p_sys, i_pid); p_pid->i_flags |= FLAG_SEEN; if( i_pid != 0x1FFF && (p_pkt->p_buffer[1] & 0x80) == 0 ) /* not corrupt */ { bool b_pcrresult = true; bool b_adaptfield = p_pkt->p_buffer[3] & 0x20; if( b_adaptfield && p_pkt->i_buffer >= 4 + 2 + 5 ) *pi_pcr = GetPCR( p_pkt ); if( *pi_pcr == -1 && (p_pkt->p_buffer[1] & 0xC0) == 0x40 && /* payload start */ (p_pkt->p_buffer[3] & 0xD0) == 0x10 && /* Has payload but is not encrypted */ p_pid->type == TYPE_PES && p_pid->u.p_pes->es.fmt.i_cat != UNKNOWN_ES ) { b_pcrresult = false; mtime_t i_dts = -1; mtime_t i_pts = -1; uint8_t i_stream_id; unsigned i_skip = 4; if ( b_adaptfield ) // adaptation field i_skip += 1 + p_pkt->p_buffer[4]; if ( VLC_SUCCESS == ParsePESHeader( VLC_OBJECT(p_demux), &p_pkt->p_buffer[i_skip], p_pkt->i_buffer - i_skip, &i_skip, &i_dts, &i_pts, &i_stream_id ) ) { if( i_dts != -1 ) *pi_pcr = i_dts; else if( i_pts != -1 ) *pi_pcr = i_pts; } } if( *pi_pcr != -1 ) { ts_pat_t *p_pat = GetPID(p_sys, 0)->u.p_pat; for( int i=0; i<p_pat->programs.i_size; i++ ) { ts_pmt_t *p_pmt = p_pat->programs.p_elems[i]->u.p_pmt; if( ( p_pmt->i_pid_pcr == p_pid->i_pid || ( p_pmt->i_pid_pcr == 0x1FFF && p_pid->p_parent == p_pat->programs.p_elems[i] ) ) ) { if( b_end ) { p_pmt->i_last_dts = *pi_pcr; } /* Start, only keep first */ else if( b_pcrresult && p_pmt->pcr.i_first == -1 ) { p_pmt->pcr.i_first = *pi_pcr; } else if( p_pmt->pcr.i_first_dts < VLC_TS_0 ) { p_pmt->pcr.i_first_dts = FROM_SCALE(*pi_pcr); } if( i_program == 0 || i_program == p_pmt->i_number ) *pb_found = true; } } } } block_Release( p_pkt ); } return i_count; } static int ProbeStart( demux_t *p_demux, int i_program ) { demux_sys_t *p_sys = p_demux->p_sys; const int64_t i_initial_pos = stream_Tell( p_sys->stream ); int64_t i_stream_size = stream_Size( p_sys->stream ); int i_probe_count = 0; int64_t i_pos; mtime_t i_pcr = -1; bool b_found = false; do { i_pos = p_sys->i_packet_size * i_probe_count; i_pos = __MIN( i_pos, i_stream_size ); if( stream_Seek( p_sys->stream, i_pos ) ) return VLC_EGENERIC; ProbeChunk( p_demux, i_program, false, &i_pcr, &b_found ); /* Go ahead one more chunk if end of file contained only stuffing packets */ i_probe_count += PROBE_CHUNK_COUNT; } while( i_pos > 0 && (i_pcr == -1 || !b_found) && i_probe_count < (2 * PROBE_CHUNK_COUNT) ); stream_Seek( p_sys->stream, i_initial_pos ); return (b_found) ? VLC_SUCCESS : VLC_EGENERIC; } static int ProbeEnd( demux_t *p_demux, int i_program ) { demux_sys_t *p_sys = p_demux->p_sys; const int64_t i_initial_pos = stream_Tell( p_sys->stream ); int64_t i_stream_size = stream_Size( p_sys->stream ); int i_probe_count = PROBE_CHUNK_COUNT; int64_t i_pos; mtime_t i_pcr = -1; bool b_found = false; do { i_pos = i_stream_size - (p_sys->i_packet_size * i_probe_count); i_pos = __MAX( i_pos, 0 ); if( stream_Seek( p_sys->stream, i_pos ) ) return VLC_EGENERIC; ProbeChunk( p_demux, i_program, true, &i_pcr, &b_found ); /* Go ahead one more chunk if end of file contained only stuffing packets */ i_probe_count += PROBE_CHUNK_COUNT; } while( i_pos > 0 && (i_pcr == -1 || !b_found) && i_probe_count < (6 * PROBE_CHUNK_COUNT) ); stream_Seek( p_sys->stream, i_initial_pos ); return (b_found) ? VLC_SUCCESS : VLC_EGENERIC; } static void ProgramSetPCR( demux_t *p_demux, ts_pmt_t *p_pmt, mtime_t i_pcr ) { demux_sys_t *p_sys = p_demux->p_sys; /* Check if we have enqueued blocks waiting the/before the PCR barrier, and then adapt pcr so they have valid PCR when dequeuing */ if( p_pmt->pcr.i_current == -1 && p_pmt->pcr.b_fix_done ) { mtime_t i_mindts = -1; ts_pat_t *p_pat = GetPID(p_sys, 0)->u.p_pat; for( int i=0; i< p_pat->programs.i_size; i++ ) { ts_pmt_t *p_pmt = p_pat->programs.p_elems[i]->u.p_pmt; for( int j=0; j<p_pmt->e_streams.i_size; j++ ) { ts_pid_t *p_pid = p_pmt->e_streams.p_elems[j]; block_t *p_block = p_pid->u.p_pes->p_prepcr_outqueue; while( p_block && p_block->i_dts == VLC_TS_INVALID ) p_block = p_block->p_next; if( p_block && ( i_mindts == -1 || p_block->i_dts < i_mindts ) ) i_mindts = p_block->i_dts; } } if( i_mindts > VLC_TS_INVALID ) { msg_Dbg( p_demux, "Program %d PCR prequeue fixup %"PRId64"->%"PRId64, p_pmt->i_number, TO_SCALE(i_mindts), i_pcr ); i_pcr = TO_SCALE(i_mindts); } } p_pmt->pcr.i_current = i_pcr; if( p_pmt->pcr.i_first == -1 ) { p_pmt->pcr.i_first = i_pcr; // now seen } if ( p_sys->i_pmt_es ) { es_out_Control( p_demux->out, ES_OUT_SET_GROUP_PCR, p_pmt->i_number, VLC_TS_0 + i_pcr * 100 / 9 ); } } static void PCRHandle( demux_t *p_demux, ts_pid_t *pid, block_t *p_bk ) { demux_sys_t *p_sys = p_demux->p_sys; mtime_t i_pcr = GetPCR( p_bk ); if( i_pcr < 0 ) return; pid->probed.i_pcr_count++; if( p_sys->i_pmt_es <= 0 ) return; if(unlikely(GetPID(p_sys, 0)->type != TYPE_PAT)) return; /* Search program and set the PCR */ ts_pat_t *p_pat = GetPID(p_sys, 0)->u.p_pat; for( int i = 0; i < p_pat->programs.i_size; i++ ) { ts_pmt_t *p_pmt = p_pat->programs.p_elems[i]->u.p_pmt; mtime_t i_program_pcr = TimeStampWrapAround( p_pmt, i_pcr ); if( p_pmt->i_pid_pcr == 0x1FFF ) /* That program has no dedicated PCR pid ISO/IEC 13818-1 2.4.4.9 */ { if( pid->p_parent == p_pat->programs.p_elems[i] ) /* PCR shall be on pid itself */ { /* ? update PCR for the whole group program ? */ ProgramSetPCR( p_demux, p_pmt, i_program_pcr ); } } else /* set PCR provided by current pid to program(s) referencing it */ { /* Can be dedicated PCR pid (no owned then) or another pid (owner == pmt) */ if( p_pmt->i_pid_pcr == pid->i_pid ) /* If that program references current pid as PCR */ { /* We've found a target group for update */ ProgramSetPCR( p_demux, p_pmt, i_program_pcr ); } } } } static int FindPCRCandidate( ts_pmt_t *p_pmt ) { ts_pid_t *p_cand = NULL; int i_previous = p_pmt->i_pid_pcr; for( int i=0; i<p_pmt->e_streams.i_size; i++ ) { ts_pid_t *p_pid = p_pmt->e_streams.p_elems[i]; if( SEEN(p_pid) && (!p_cand || p_cand->i_pid != i_previous) ) { if( p_pid->probed.i_pcr_count ) /* check PCR frequency first */ { if( !p_cand || p_pid->probed.i_pcr_count > p_cand->probed.i_pcr_count ) { p_cand = p_pid; continue; } } if( p_pid->u.p_pes->es.fmt.i_cat == AUDIO_ES ) { if( !p_cand ) p_cand = p_pid; } else if ( p_pid->u.p_pes->es.fmt.i_cat == VIDEO_ES ) /* Otherwise prioritize video dts */ { if( !p_cand || p_cand->u.p_pes->es.fmt.i_cat == AUDIO_ES ) p_cand = p_pid; } } } if( p_cand ) return p_cand->i_pid; else return 0x1FFF; } /* Tries to reselect a new PCR when none has been received */ static void PCRFixHandle( demux_t *p_demux, ts_pmt_t *p_pmt, block_t *p_block ) { if ( p_pmt->pcr.b_disable || p_pmt->pcr.b_fix_done ) { return; } /* Record the first data packet timestamp in case there wont be any PCR */ else if( !p_pmt->pcr.i_first_dts ) { p_pmt->pcr.i_first_dts = p_block->i_dts; } else if( p_block->i_dts - p_pmt->pcr.i_first_dts > CLOCK_FREQ / 2 ) /* "PCR repeat rate shall not exceed 100ms" */ { if( p_pmt->pcr.i_current < 0 && GetPID( p_demux->p_sys, p_pmt->i_pid_pcr )->probed.i_pcr_count == 0 ) { int i_cand = FindPCRCandidate( p_pmt ); p_pmt->i_pid_pcr = i_cand; if ( GetPID( p_demux->p_sys, p_pmt->i_pid_pcr )->probed.i_pcr_count == 0 ) p_pmt->pcr.b_disable = true; msg_Warn( p_demux, "No PCR received for program %d, set up workaround using pid %d", p_pmt->i_number, i_cand ); UpdatePESFilters( p_demux, p_demux->p_sys->b_es_all ); } p_pmt->pcr.b_fix_done = true; } } static bool GatherData( demux_t *p_demux, ts_pid_t *pid, block_t *p_bk ) { const uint8_t *p = p_bk->p_buffer; const bool b_unit_start = p[1]&0x40; const bool b_adaptation = p[3]&0x20; const bool b_payload = p[3]&0x10; const int i_cc = p[3]&0x0f; /* continuity counter */ bool b_discontinuity = false; /* discontinuity */ /* transport_scrambling_control is ignored */ int i_skip = 0; bool i_ret = false; #if 0 msg_Dbg( p_demux, "pid=%d unit_start=%d adaptation=%d payload=%d " "cc=0x%x", pid->i_pid, b_unit_start, b_adaptation, b_payload, i_cc ); #endif /* For now, ignore additional error correction * TODO: handle Reed-Solomon 204,188 error correction */ p_bk->i_buffer = TS_PACKET_SIZE_188; if( p[1]&0x80 ) { msg_Dbg( p_demux, "transport_error_indicator set (pid=%d)", pid->i_pid ); if( pid->u.p_pes->p_data ) //&& pid->es->fmt.i_cat == VIDEO_ES ) pid->u.p_pes->p_data->i_flags |= BLOCK_FLAG_CORRUPTED; } if( p_demux->p_sys->csa ) { vlc_mutex_lock( &p_demux->p_sys->csa_lock ); csa_Decrypt( p_demux->p_sys->csa, p_bk->p_buffer, p_demux->p_sys->i_csa_pkt_size ); vlc_mutex_unlock( &p_demux->p_sys->csa_lock ); } if( !b_adaptation ) { /* We don't have any adaptation_field, so payload starts * immediately after the 4 byte TS header */ i_skip = 4; } else { /* p[4] is adaptation_field_length minus one */ i_skip = 5 + p[4]; if( p[4] > 0 ) { /* discontinuity indicator found in stream */ b_discontinuity = (p[5]&0x80) ? true : false; if( b_discontinuity && pid->u.p_pes->p_data ) { msg_Warn( p_demux, "discontinuity indicator (pid=%d) ", pid->i_pid ); /* pid->es->p_data->i_flags |= BLOCK_FLAG_DISCONTINUITY; */ } #if 0 if( p[5]&0x40 ) msg_Dbg( p_demux, "random access indicator (pid=%d) ", pid->i_pid ); #endif } } /* Test continuity counter */ /* continuous when (one of this): * diff == 1 * diff == 0 and payload == 0 * diff == 0 and duplicate packet (playload != 0) <- should we * test the content ? */ const int i_diff = ( i_cc - pid->i_cc )&0x0f; if( b_payload && i_diff == 1 ) { pid->i_cc = ( pid->i_cc + 1 ) & 0xf; } else { if( pid->i_cc == 0xff ) { msg_Warn( p_demux, "first packet for pid=%d cc=0x%x", pid->i_pid, i_cc ); pid->i_cc = i_cc; } else if( i_diff != 0 && !b_discontinuity ) { msg_Warn( p_demux, "discontinuity received 0x%x instead of 0x%x (pid=%d)", i_cc, ( pid->i_cc + 1 )&0x0f, pid->i_pid ); pid->i_cc = i_cc; if( pid->u.p_pes->p_data && pid->u.p_pes->es.fmt.i_cat != VIDEO_ES && pid->u.p_pes->es.fmt.i_cat != AUDIO_ES ) { /* Small audio/video artifacts are usually better than * dropping full frames */ pid->u.p_pes->p_data->i_flags |= BLOCK_FLAG_CORRUPTED; } } } PCRHandle( p_demux, pid, p_bk ); if( i_skip >= 188 ) { block_Release( p_bk ); return i_ret; } /* We have to gather it */ p_bk->p_buffer += i_skip; p_bk->i_buffer -= i_skip; if( b_unit_start ) { if( pid->u.p_pes->data_type == TS_ES_DATA_TABLE_SECTION && p_bk->i_buffer > 0 ) { int i_pointer_field = __MIN( p_bk->p_buffer[0], p_bk->i_buffer - 1 ); block_t *p = block_Duplicate( p_bk ); if( p ) { p->i_buffer = i_pointer_field; p->p_buffer++; block_ChainLastAppend( &pid->u.p_pes->pp_last, p ); } p_bk->i_buffer -= 1 + i_pointer_field; p_bk->p_buffer += 1 + i_pointer_field; } if( pid->u.p_pes->p_data ) { ParseData( p_demux, pid ); i_ret = true; } block_ChainLastAppend( &pid->u.p_pes->pp_last, p_bk ); if( pid->u.p_pes->data_type == TS_ES_DATA_PES ) { if( p_bk->i_buffer > 6 ) { pid->u.p_pes->i_data_size = GetWBE( &p_bk->p_buffer[4] ); if( pid->u.p_pes->i_data_size > 0 ) { pid->u.p_pes->i_data_size += 6; } } } else if( pid->u.p_pes->data_type == TS_ES_DATA_TABLE_SECTION ) { if( p_bk->i_buffer > 3 && p_bk->p_buffer[0] != 0xff ) { pid->u.p_pes->i_data_size = 3 + (((p_bk->p_buffer[1] & 0xf) << 8) | p_bk->p_buffer[2]); } } pid->u.p_pes->i_data_gathered += p_bk->i_buffer; if( pid->u.p_pes->i_data_size > 0 && pid->u.p_pes->i_data_gathered >= pid->u.p_pes->i_data_size ) { ParseData( p_demux, pid ); i_ret = true; } } else { if( pid->u.p_pes->p_data == NULL ) { /* msg_Dbg( p_demux, "broken packet" ); */ block_Release( p_bk ); } else { block_ChainLastAppend( &pid->u.p_pes->pp_last, p_bk ); pid->u.p_pes->i_data_gathered += p_bk->i_buffer; if( pid->u.p_pes->i_data_size > 0 && pid->u.p_pes->i_data_gathered >= pid->u.p_pes->i_data_size ) { ParseData( p_demux, pid ); i_ret = true; } } } return i_ret; } static void PIDFillFormat( es_format_t *fmt, int i_stream_type, ts_es_data_type_t *p_datatype ) { switch( i_stream_type ) { case 0x01: /* MPEG-1 video */ case 0x02: /* MPEG-2 video */ case 0x80: /* MPEG-2 MOTO video */ es_format_Init( fmt, VIDEO_ES, VLC_CODEC_MPGV ); break; case 0x03: /* MPEG-1 audio */ case 0x04: /* MPEG-2 audio */ es_format_Init( fmt, AUDIO_ES, VLC_CODEC_MPGA ); break; case 0x11: /* MPEG4 (audio) LATM */ case 0x0f: /* ISO/IEC 13818-7 Audio with ADTS transport syntax */ case 0x1c: /* ISO/IEC 14496-3 Audio, without using any additional transport syntax, such as DST, ALS and SLS */ es_format_Init( fmt, AUDIO_ES, VLC_CODEC_MP4A ); break; case 0x10: /* MPEG4 (video) */ es_format_Init( fmt, VIDEO_ES, VLC_CODEC_MP4V ); break; case 0x1B: /* H264 <- check transport syntax/needed descriptor */ es_format_Init( fmt, VIDEO_ES, VLC_CODEC_H264 ); break; case 0x24: /* HEVC */ es_format_Init( fmt, VIDEO_ES, VLC_CODEC_HEVC ); break; case 0x42: /* CAVS (Chinese AVS) */ es_format_Init( fmt, VIDEO_ES, VLC_CODEC_CAVS ); break; case 0x81: /* A52 (audio) */ es_format_Init( fmt, AUDIO_ES, VLC_CODEC_A52 ); break; case 0x82: /* SCTE-27 (sub) */ es_format_Init( fmt, SPU_ES, VLC_CODEC_SCTE_27 ); *p_datatype = TS_ES_DATA_TABLE_SECTION; break; case 0x84: /* SDDS (audio) */ es_format_Init( fmt, AUDIO_ES, VLC_CODEC_SDDS ); break; case 0x85: /* DTS (audio) */ es_format_Init( fmt, AUDIO_ES, VLC_CODEC_DTS ); break; case 0x87: /* E-AC3 */ es_format_Init( fmt, AUDIO_ES, VLC_CODEC_EAC3 ); break; case 0x91: /* A52 vls (audio) */ es_format_Init( fmt, AUDIO_ES, VLC_FOURCC( 'a', '5', '2', 'b' ) ); break; case 0x92: /* DVD_SPU vls (sub) */ es_format_Init( fmt, SPU_ES, VLC_FOURCC( 's', 'p', 'u', 'b' ) ); break; case 0x94: /* SDDS (audio) */ es_format_Init( fmt, AUDIO_ES, VLC_FOURCC( 's', 'd', 'd', 'b' ) ); break; case 0xa0: /* MSCODEC vlc (video) (fixed later) */ es_format_Init( fmt, UNKNOWN_ES, 0 ); break; case 0x06: /* PES_PRIVATE (fixed later) */ case 0x12: /* MPEG-4 generic (sub/scene/...) (fixed later) */ case 0xEA: /* Privately managed ES (VC-1) (fixed later */ default: es_format_Init( fmt, UNKNOWN_ES, 0 ); break; } /* PES packets usually contain truncated frames */ fmt->b_packetized = false; } /**************************************************************************** **************************************************************************** ** libdvbpsi callbacks **************************************************************************** ****************************************************************************/ static bool ProgramIsSelected( demux_sys_t *p_sys, uint16_t i_pgrm ) { for(int i=0; i<p_sys->programs.i_size; i++) if( p_sys->programs.p_elems[i] == i_pgrm ) return true; return false; } static void ValidateDVBMeta( demux_t *p_demux, int i_pid ) { demux_sys_t *p_sys = p_demux->p_sys; if( !p_sys->b_dvb_meta || ( i_pid != 0x11 && i_pid != 0x12 && i_pid != 0x14 ) ) return; msg_Warn( p_demux, "Switching to non DVB mode" ); /* This doesn't look like a DVB stream so don't try * parsing the SDT/EDT/TDT */ PIDRelease( p_demux, GetPID(p_sys, 0x11) ); PIDRelease( p_demux, GetPID(p_sys, 0x12) ); PIDRelease( p_demux, GetPID(p_sys, 0x14) ); p_sys->b_dvb_meta = false; } #include "../dvb-text.h" static char *EITConvertToUTF8( demux_t *p_demux, const unsigned char *psz_instring, size_t i_length, bool b_broken ) { demux_sys_t *p_sys = p_demux->p_sys; #ifdef HAVE_ARIBB24 if( p_sys->arib.e_mode == ARIBMODE_ENABLED ) { if ( !p_sys->arib.p_instance ) p_sys->arib.p_instance = arib_instance_new( p_demux ); if ( !p_sys->arib.p_instance ) return NULL; arib_decoder_t *p_decoder = arib_get_decoder( p_sys->arib.p_instance ); if ( !p_decoder ) return NULL; char *psz_outstring = NULL; size_t i_out; i_out = i_length * 4; psz_outstring = (char*) calloc( i_out + 1, sizeof(char) ); if( !psz_outstring ) return NULL; arib_initialize_decoder( p_decoder ); i_out = arib_decode_buffer( p_decoder, psz_instring, i_length, psz_outstring, i_out ); arib_finalize_decoder( p_decoder ); return psz_outstring; } #else VLC_UNUSED(p_sys); #endif /* Deal with no longer broken providers (no switch byte but sending ISO_8859-1 instead of ISO_6937) without removing them from the broken providers table (keep the entry for correctly handling recorded TS). */ b_broken = b_broken && i_length && *psz_instring > 0x20; if( b_broken ) return FromCharset( "ISO_8859-1", psz_instring, i_length ); return vlc_from_EIT( psz_instring, i_length ); } static void SDTCallBack( demux_t *p_demux, dvbpsi_sdt_t *p_sdt ) { demux_sys_t *p_sys = p_demux->p_sys; ts_pid_t *sdt = GetPID(p_sys, 0x11); dvbpsi_sdt_service_t *p_srv; msg_Dbg( p_demux, "SDTCallBack called" ); if( p_sys->es_creation != CREATE_ES || !p_sdt->b_current_next || p_sdt->i_version == sdt->u.p_psi->i_version ) { dvbpsi_sdt_delete( p_sdt ); return; } msg_Dbg( p_demux, "new SDT ts_id=%d version=%d current_next=%d " "network_id=%d", p_sdt->i_extension, p_sdt->i_version, p_sdt->b_current_next, p_sdt->i_network_id ); p_sys->b_broken_charset = false; for( p_srv = p_sdt->p_first_service; p_srv; p_srv = p_srv->p_next ) { vlc_meta_t *p_meta; dvbpsi_descriptor_t *p_dr; const char *psz_type = NULL; const char *psz_status = NULL; msg_Dbg( p_demux, " * service id=%d eit schedule=%d present=%d " "running=%d free_ca=%d", p_srv->i_service_id, p_srv->b_eit_schedule, p_srv->b_eit_present, p_srv->i_running_status, p_srv->b_free_ca ); if( p_sys->vdr.i_service && p_srv->i_service_id != p_sys->vdr.i_service ) { msg_Dbg( p_demux, " * service id=%d skipped (not declared in vdr header)", p_sys->vdr.i_service ); continue; } p_meta = vlc_meta_New(); for( p_dr = p_srv->p_first_descriptor; p_dr; p_dr = p_dr->p_next ) { if( p_dr->i_tag == 0x48 ) { static const char *ppsz_type[17] = { "Reserved", "Digital television service", "Digital radio sound service", "Teletext service", "NVOD reference service", "NVOD time-shifted service", "Mosaic service", "PAL coded signal", "SECAM coded signal", "D/D2-MAC", "FM Radio", "NTSC coded signal", "Data broadcast service", "Reserved for Common Interface Usage", "RCS Map (see EN 301 790 [35])", "RCS FLS (see EN 301 790 [35])", "DVB MHP service" }; dvbpsi_service_dr_t *pD = dvbpsi_DecodeServiceDr( p_dr ); char *str1 = NULL; char *str2 = NULL; /* Workarounds for broadcasters with broken EPG */ if( p_sdt->i_network_id == 133 ) p_sys->b_broken_charset = true; /* SKY DE & BetaDigital use ISO8859-1 */ /* List of providers using ISO8859-1 */ static const char ppsz_broken_providers[][8] = { "CSAT", /* CanalSat FR */ "GR1", /* France televisions */ "MULTI4", /* NT1 */ "MR5", /* France 2/M6 HD */ "" }; for( int i = 0; *ppsz_broken_providers[i]; i++ ) { const size_t i_length = strlen(ppsz_broken_providers[i]); if( pD->i_service_provider_name_length == i_length && !strncmp( (char *)pD->i_service_provider_name, ppsz_broken_providers[i], i_length ) ) p_sys->b_broken_charset = true; } /* FIXME: Digital+ ES also uses ISO8859-1 */ str1 = EITConvertToUTF8(p_demux, pD->i_service_provider_name, pD->i_service_provider_name_length, p_sys->b_broken_charset ); str2 = EITConvertToUTF8(p_demux, pD->i_service_name, pD->i_service_name_length, p_sys->b_broken_charset ); msg_Dbg( p_demux, " - type=%d provider=%s name=%s", pD->i_service_type, str1, str2 ); vlc_meta_SetTitle( p_meta, str2 ); vlc_meta_SetPublisher( p_meta, str1 ); if( pD->i_service_type >= 0x01 && pD->i_service_type <= 0x10 ) psz_type = ppsz_type[pD->i_service_type]; free( str1 ); free( str2 ); } } if( p_srv->i_running_status >= 0x01 && p_srv->i_running_status <= 0x04 ) { static const char *ppsz_status[5] = { "Unknown", "Not running", "Starts in a few seconds", "Pausing", "Running" }; psz_status = ppsz_status[p_srv->i_running_status]; } if( psz_type ) vlc_meta_AddExtra( p_meta, "Type", psz_type ); if( psz_status ) vlc_meta_AddExtra( p_meta, "Status", psz_status ); es_out_Control( p_demux->out, ES_OUT_SET_GROUP_META, p_srv->i_service_id, p_meta ); vlc_meta_Delete( p_meta ); } sdt->u.p_psi->i_version = p_sdt->i_version; dvbpsi_sdt_delete( p_sdt ); } /* i_year: year - 1900 i_month: 0-11 i_mday: 1-31 i_hour: 0-23 i_minute: 0-59 i_second: 0-59 */ static int64_t vlc_timegm( int i_year, int i_month, int i_mday, int i_hour, int i_minute, int i_second ) { static const int pn_day[12+1] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; int64_t i_day; if( i_year < 70 || i_month < 0 || i_month > 11 || i_mday < 1 || i_mday > 31 || i_hour < 0 || i_hour > 23 || i_minute < 0 || i_minute > 59 || i_second < 0 || i_second > 59 ) return -1; /* Count the number of days */ i_day = 365 * (i_year-70) + pn_day[i_month] + i_mday - 1; #define LEAP(y) ( ((y)%4) == 0 && (((y)%100) != 0 || ((y)%400) == 0) ? 1 : 0) for( int i = 70; i < i_year; i++ ) i_day += LEAP(1900+i); if( i_month > 1 ) i_day += LEAP(1900+i_year); #undef LEAP /**/ return ((24*i_day + i_hour)*60 + i_minute)*60 + i_second; } static void EITDecodeMjd( int i_mjd, int *p_y, int *p_m, int *p_d ) { const int yp = (int)( ( (double)i_mjd - 15078.2)/365.25 ); const int mp = (int)( ((double)i_mjd - 14956.1 - (int)(yp * 365.25)) / 30.6001 ); const int c = ( mp == 14 || mp == 15 ) ? 1 : 0; *p_y = 1900 + yp + c*1; *p_m = mp - 1 - c*12; *p_d = i_mjd - 14956 - (int)(yp*365.25) - (int)(mp*30.6001); } #define CVT_FROM_BCD(v) ((((v) >> 4)&0xf)*10 + ((v)&0xf)) static int64_t EITConvertStartTime( uint64_t i_date ) { const int i_mjd = i_date >> 24; const int i_hour = CVT_FROM_BCD(i_date >> 16); const int i_minute = CVT_FROM_BCD(i_date >> 8); const int i_second = CVT_FROM_BCD(i_date ); int i_year; int i_month; int i_day; /* if all 40 bits are 1, the start is unknown */ if( i_date == UINT64_C(0xffffffffff) ) return -1; EITDecodeMjd( i_mjd, &i_year, &i_month, &i_day ); return vlc_timegm( i_year - 1900, i_month - 1, i_day, i_hour, i_minute, i_second ); } static int EITConvertDuration( uint32_t i_duration ) { return CVT_FROM_BCD(i_duration >> 16) * 3600 + CVT_FROM_BCD(i_duration >> 8 ) * 60 + CVT_FROM_BCD(i_duration ); } #undef CVT_FROM_BCD static void TDTCallBack( demux_t *p_demux, dvbpsi_tot_t *p_tdt ) { demux_sys_t *p_sys = p_demux->p_sys; p_sys->i_tdt_delta = CLOCK_FREQ * EITConvertStartTime( p_tdt->i_utc_time ) - mdate(); dvbpsi_tot_delete(p_tdt); } static void EITCallBack( demux_t *p_demux, dvbpsi_eit_t *p_eit, bool b_current_following ) { demux_sys_t *p_sys = p_demux->p_sys; dvbpsi_eit_event_t *p_evt; vlc_epg_t *p_epg; msg_Dbg( p_demux, "EITCallBack called" ); if( !p_eit->b_current_next ) { dvbpsi_eit_delete( p_eit ); return; } msg_Dbg( p_demux, "new EIT service_id=%d version=%d current_next=%d " "ts_id=%d network_id=%d segment_last_section_number=%d " "last_table_id=%d", p_eit->i_extension, p_eit->i_version, p_eit->b_current_next, p_eit->i_ts_id, p_eit->i_network_id, p_eit->i_segment_last_section_number, p_eit->i_last_table_id ); p_epg = vlc_epg_New( NULL ); for( p_evt = p_eit->p_first_event; p_evt; p_evt = p_evt->p_next ) { dvbpsi_descriptor_t *p_dr; char *psz_name = NULL; char *psz_text = NULL; char *psz_extra = strdup(""); int64_t i_start; int i_duration; int i_min_age = 0; int64_t i_tot_time = 0; i_start = EITConvertStartTime( p_evt->i_start_time ); i_duration = EITConvertDuration( p_evt->i_duration ); if( p_sys->arib.e_mode == ARIBMODE_ENABLED ) { if( p_sys->i_tdt_delta == 0 ) p_sys->i_tdt_delta = CLOCK_FREQ * (i_start + i_duration - 5) - mdate(); i_tot_time = (mdate() + p_sys->i_tdt_delta) / CLOCK_FREQ; tzset(); // JST -> UTC i_start += timezone; // FIXME: what about DST? i_tot_time += timezone; if( p_evt->i_running_status == 0x00 && (i_start - 5 < i_tot_time && i_tot_time < i_start + i_duration + 5) ) { p_evt->i_running_status = 0x04; msg_Dbg( p_demux, " EIT running status 0x00 -> 0x04" ); } } msg_Dbg( p_demux, " * event id=%d start_time:%d duration=%d " "running=%d free_ca=%d", p_evt->i_event_id, (int)i_start, (int)i_duration, p_evt->i_running_status, p_evt->b_free_ca ); for( p_dr = p_evt->p_first_descriptor; p_dr; p_dr = p_dr->p_next ) { switch(p_dr->i_tag) { case 0x4d: { dvbpsi_short_event_dr_t *pE = dvbpsi_DecodeShortEventDr( p_dr ); /* Only take first description, as we don't handle language-info for epg atm*/ if( pE && psz_name == NULL ) { psz_name = EITConvertToUTF8( p_demux, pE->i_event_name, pE->i_event_name_length, p_sys->b_broken_charset ); free( psz_text ); psz_text = EITConvertToUTF8( p_demux, pE->i_text, pE->i_text_length, p_sys->b_broken_charset ); msg_Dbg( p_demux, " - short event lang=%3.3s '%s' : '%s'", pE->i_iso_639_code, psz_name, psz_text ); } } break; case 0x4e: { dvbpsi_extended_event_dr_t *pE = dvbpsi_DecodeExtendedEventDr( p_dr ); if( pE ) { msg_Dbg( p_demux, " - extended event lang=%3.3s [%d/%d]", pE->i_iso_639_code, pE->i_descriptor_number, pE->i_last_descriptor_number ); if( pE->i_text_length > 0 ) { char *psz_text = EITConvertToUTF8( p_demux, pE->i_text, pE->i_text_length, p_sys->b_broken_charset ); if( psz_text ) { msg_Dbg( p_demux, " - text='%s'", psz_text ); psz_extra = xrealloc( psz_extra, strlen(psz_extra) + strlen(psz_text) + 1 ); strcat( psz_extra, psz_text ); free( psz_text ); } } for( int i = 0; i < pE->i_entry_count; i++ ) { char *psz_dsc = EITConvertToUTF8( p_demux, pE->i_item_description[i], pE->i_item_description_length[i], p_sys->b_broken_charset ); char *psz_itm = EITConvertToUTF8( p_demux, pE->i_item[i], pE->i_item_length[i], p_sys->b_broken_charset ); if( psz_dsc && psz_itm ) { msg_Dbg( p_demux, " - desc='%s' item='%s'", psz_dsc, psz_itm ); #if 0 psz_extra = xrealloc( psz_extra, strlen(psz_extra) + strlen(psz_dsc) + strlen(psz_itm) + 3 + 1 ); strcat( psz_extra, "(" ); strcat( psz_extra, psz_dsc ); strcat( psz_extra, " " ); strcat( psz_extra, psz_itm ); strcat( psz_extra, ")" ); #endif } free( psz_dsc ); free( psz_itm ); } } } break; case 0x55: { dvbpsi_parental_rating_dr_t *pR = dvbpsi_DecodeParentalRatingDr( p_dr ); if ( pR ) { for ( int i = 0; i < pR->i_ratings_number; i++ ) { const dvbpsi_parental_rating_t *p_rating = & pR->p_parental_rating[ i ]; if ( p_rating->i_rating > 0x00 && p_rating->i_rating <= 0x0F ) { if ( p_rating->i_rating + 3 > i_min_age ) i_min_age = p_rating->i_rating + 3; msg_Dbg( p_demux, " - parental control set to %d years", i_min_age ); } } } } break; default: msg_Dbg( p_demux, " - event unknown dr 0x%x(%d)", p_dr->i_tag, p_dr->i_tag ); break; } } /* */ if( i_start > 0 && psz_name && psz_text) vlc_epg_AddEvent( p_epg, i_start, i_duration, psz_name, psz_text, *psz_extra ? psz_extra : NULL, i_min_age ); /* Update "now playing" field */ if( p_evt->i_running_status == 0x04 && i_start > 0 && psz_name && psz_text ) vlc_epg_SetCurrent( p_epg, i_start ); free( psz_name ); free( psz_text ); free( psz_extra ); } if( p_epg->i_event > 0 ) { if( b_current_following && ( p_sys->programs.i_size == 0 || p_sys->programs.p_elems[0] == p_eit->i_extension ) ) { p_sys->i_dvb_length = 0; p_sys->i_dvb_start = 0; if( p_epg->p_current ) { p_sys->i_dvb_start = CLOCK_FREQ * p_epg->p_current->i_start; p_sys->i_dvb_length = CLOCK_FREQ * p_epg->p_current->i_duration; } } es_out_Control( p_demux->out, ES_OUT_SET_GROUP_EPG, p_eit->i_extension, p_epg ); } vlc_epg_Delete( p_epg ); dvbpsi_eit_delete( p_eit ); } static void EITCallBackCurrentFollowing( demux_t *p_demux, dvbpsi_eit_t *p_eit ) { EITCallBack( p_demux, p_eit, true ); } static void EITCallBackSchedule( demux_t *p_demux, dvbpsi_eit_t *p_eit ) { EITCallBack( p_demux, p_eit, false ); } static void PSINewTableCallBack( dvbpsi_t *h, uint8_t i_table_id, uint16_t i_extension, demux_t *p_demux ) { demux_sys_t *p_sys = p_demux->p_sys; assert( h ); #if 0 msg_Dbg( p_demux, "PSINewTableCallBack: table 0x%x(%d) ext=0x%x(%d)", i_table_id, i_table_id, i_extension, i_extension ); #endif if( GetPID(p_sys, 0)->u.p_pat->i_version != -1 && i_table_id == 0x42 ) { msg_Dbg( p_demux, "PSINewTableCallBack: table 0x%x(%d) ext=0x%x(%d)", i_table_id, i_table_id, i_extension, i_extension ); if( !dvbpsi_sdt_attach( h, i_table_id, i_extension, (dvbpsi_sdt_callback)SDTCallBack, p_demux ) ) msg_Err( p_demux, "PSINewTableCallback: failed attaching SDTCallback" ); } else if( GetPID(p_sys, 0x11)->u.p_psi->i_version != -1 && ( i_table_id == 0x4e || /* Current/Following */ (i_table_id >= 0x50 && i_table_id <= 0x5f) ) ) /* Schedule */ { msg_Dbg( p_demux, "PSINewTableCallBack: table 0x%x(%d) ext=0x%x(%d)", i_table_id, i_table_id, i_extension, i_extension ); dvbpsi_eit_callback cb = i_table_id == 0x4e ? (dvbpsi_eit_callback)EITCallBackCurrentFollowing : (dvbpsi_eit_callback)EITCallBackSchedule; if( !dvbpsi_eit_attach( h, i_table_id, i_extension, cb, p_demux ) ) msg_Err( p_demux, "PSINewTableCallback: failed attaching EITCallback" ); } else if( GetPID(p_sys, 0x11)->u.p_psi->i_version != -1 && (i_table_id == 0x70 /* TDT */ || i_table_id == 0x73 /* TOT */) ) { msg_Dbg( p_demux, "PSINewTableCallBack: table 0x%x(%d) ext=0x%x(%d)", i_table_id, i_table_id, i_extension, i_extension ); if( !dvbpsi_tot_attach( h, i_table_id, i_extension, (dvbpsi_tot_callback)TDTCallBack, p_demux ) ) msg_Err( p_demux, "PSINewTableCallback: failed attaching TDTCallback" ); } } /***************************************************************************** * PMT callback and helpers *****************************************************************************/ static dvbpsi_descriptor_t *PMTEsFindDescriptor( const dvbpsi_pmt_es_t *p_es, int i_tag ) { dvbpsi_descriptor_t *p_dr = p_es->p_first_descriptor;; while( p_dr && ( p_dr->i_tag != i_tag ) ) p_dr = p_dr->p_next; return p_dr; } static bool PMTEsHasRegistration( demux_t *p_demux, const dvbpsi_pmt_es_t *p_es, const char *psz_tag ) { dvbpsi_descriptor_t *p_dr = PMTEsFindDescriptor( p_es, 0x05 ); if( !p_dr ) return false; if( p_dr->i_length < 4 ) { msg_Warn( p_demux, "invalid Registration Descriptor" ); return false; } assert( strlen(psz_tag) == 4 ); return !memcmp( p_dr->p_data, psz_tag, 4 ); } static bool PMTEsHasComponentTag( const dvbpsi_pmt_es_t *p_es, int i_component_tag ) { dvbpsi_descriptor_t *p_dr = PMTEsFindDescriptor( p_es, 0x52 ); if( !p_dr ) return false; dvbpsi_stream_identifier_dr_t *p_si = dvbpsi_DecodeStreamIdentifierDr( p_dr ); if( !p_si ) return false; return p_si->i_component_tag == i_component_tag; } static const es_mpeg4_descriptor_t * GetMPEG4DescByEsId( const ts_pmt_t *pmt, uint16_t i_es_id ) { for( int i = 0; i < ES_DESCRIPTOR_COUNT; i++ ) { const es_mpeg4_descriptor_t *es_descr = &pmt->iod->es_descr[i]; if( es_descr->i_es_id == i_es_id && es_descr->b_ok ) return es_descr; } for( int i=0; i<pmt->od.objects.i_size; i++ ) { const od_descriptor_t *od = pmt->od.objects.p_elems[i]; for( int j = 0; j < ES_DESCRIPTOR_COUNT; j++ ) { const es_mpeg4_descriptor_t *es_descr = &od->es_descr[j]; if( es_descr->i_es_id == i_es_id && es_descr->b_ok ) return es_descr; } } return NULL; } static ts_pes_es_t * GetPMTESBySLEsId( ts_pmt_t *pmt, uint16_t i_sl_es_id ) { for( int i=0; i< pmt->e_streams.i_size; i++ ) { ts_pes_es_t *p_es = &pmt->e_streams.p_elems[i]->u.p_pes->es; if( p_es->i_sl_es_id == i_sl_es_id ) return p_es; } return NULL; } static bool SetupISO14496LogicalStream( demux_t *p_demux, const decoder_config_descriptor_t *dcd, es_format_t *p_fmt ) { msg_Dbg( p_demux, " - IOD objecttype: %"PRIx8" streamtype:%"PRIx8, dcd->i_objectTypeIndication, dcd->i_streamType ); if( dcd->i_streamType == 0x04 ) /* VisualStream */ { p_fmt->i_cat = VIDEO_ES; switch( dcd->i_objectTypeIndication ) { case 0x0B: /* mpeg4 sub */ p_fmt->i_cat = SPU_ES; p_fmt->i_codec = VLC_CODEC_SUBT; break; case 0x20: /* mpeg4 */ p_fmt->i_codec = VLC_CODEC_MP4V; break; case 0x21: /* h264 */ p_fmt->i_codec = VLC_CODEC_H264; break; case 0x60: case 0x61: case 0x62: case 0x63: case 0x64: case 0x65: /* mpeg2 */ p_fmt->i_codec = VLC_CODEC_MPGV; break; case 0x6a: /* mpeg1 */ p_fmt->i_codec = VLC_CODEC_MPGV; break; case 0x6c: /* mpeg1 */ p_fmt->i_codec = VLC_CODEC_JPEG; break; default: p_fmt->i_cat = UNKNOWN_ES; break; } } else if( dcd->i_streamType == 0x05 ) /* AudioStream */ { p_fmt->i_cat = AUDIO_ES; switch( dcd->i_objectTypeIndication ) { case 0x40: /* mpeg4 */ p_fmt->i_codec = VLC_CODEC_MP4A; break; case 0x66: case 0x67: case 0x68: /* mpeg2 aac */ p_fmt->i_codec = VLC_CODEC_MP4A; break; case 0x69: /* mpeg2 */ p_fmt->i_codec = VLC_CODEC_MPGA; break; case 0x6b: /* mpeg1 */ p_fmt->i_codec = VLC_CODEC_MPGA; break; default: p_fmt->i_cat = UNKNOWN_ES; break; } } else { p_fmt->i_cat = UNKNOWN_ES; } if( p_fmt->i_cat != UNKNOWN_ES ) { p_fmt->i_extra = __MIN(dcd->i_extra, INT32_MAX); if( p_fmt->i_extra > 0 ) { p_fmt->p_extra = malloc( p_fmt->i_extra ); if( p_fmt->p_extra ) memcpy( p_fmt->p_extra, dcd->p_extra, p_fmt->i_extra ); else p_fmt->i_extra = 0; } } return true; } static void SetupISO14496Descriptors( demux_t *p_demux, ts_pes_es_t *p_es, const ts_pmt_t *p_pmt, const dvbpsi_pmt_es_t *p_dvbpsies ) { const dvbpsi_descriptor_t *p_dr = p_dvbpsies->p_first_descriptor; while( p_dr ) { uint8_t i_length = p_dr->i_length; switch( p_dr->i_tag ) { case 0x1f: /* FMC Descriptor */ while( i_length >= 3 && !p_es->i_sl_es_id ) { p_es->i_sl_es_id = ( p_dr->p_data[0] << 8 ) | p_dr->p_data[1]; /* FIXME: map all ids and flexmux channels */ i_length -= 3; msg_Dbg( p_demux, " - found FMC_descriptor mapping es_id=%"PRIu16, p_es->i_sl_es_id ); } break; case 0x1e: /* SL Descriptor */ if( i_length == 2 ) { p_es->i_sl_es_id = ( p_dr->p_data[0] << 8 ) | p_dr->p_data[1]; msg_Dbg( p_demux, " - found SL_descriptor mapping es_id=%"PRIu16, p_es->i_sl_es_id ); } break; default: break; } p_dr = p_dr->p_next; } if( p_es->i_sl_es_id ) { const es_mpeg4_descriptor_t *p_mpeg4desc = GetMPEG4DescByEsId( p_pmt, p_es->i_sl_es_id ); if( p_mpeg4desc && p_mpeg4desc->b_ok ) { if( !SetupISO14496LogicalStream( p_demux, &p_mpeg4desc->dec_descr, &p_es->fmt ) ) msg_Dbg( p_demux, " - IOD not yet available for es_id=%"PRIu16, p_es->i_sl_es_id ); } } else { switch( p_dvbpsies->i_type ) { /* non fatal, set by packetizer */ case 0x0f: /* ADTS */ case 0x11: /* LOAS */ msg_Info( p_demux, " - SL/FMC descriptor not found/matched" ); break; default: msg_Err( p_demux, " - SL/FMC descriptor not found/matched" ); break; } } } typedef struct { int i_type; int i_magazine; int i_page; char p_iso639[3]; } ts_teletext_page_t; static void PMTSetupEsTeletext( demux_t *p_demux, ts_pes_t *p_pes, const dvbpsi_pmt_es_t *p_dvbpsies ) { es_format_t *p_fmt = &p_pes->es.fmt; ts_teletext_page_t p_page[2 * 64 + 20]; unsigned i_page = 0; /* Gather pages information */ for( unsigned i_tag_idx = 0; i_tag_idx < 2; i_tag_idx++ ) { dvbpsi_descriptor_t *p_dr = PMTEsFindDescriptor( p_dvbpsies, i_tag_idx == 0 ? 0x46 : 0x56 ); if( !p_dr ) continue; dvbpsi_teletext_dr_t *p_sub = dvbpsi_DecodeTeletextDr( p_dr ); if( !p_sub ) continue; for( int i = 0; i < p_sub->i_pages_number; i++ ) { const dvbpsi_teletextpage_t *p_src = &p_sub->p_pages[i]; if( p_src->i_teletext_type >= 0x06 ) continue; assert( i_page < sizeof(p_page)/sizeof(*p_page) ); ts_teletext_page_t *p_dst = &p_page[i_page++]; p_dst->i_type = p_src->i_teletext_type; p_dst->i_magazine = p_src->i_teletext_magazine_number ? p_src->i_teletext_magazine_number : 8; p_dst->i_page = p_src->i_teletext_page_number; memcpy( p_dst->p_iso639, p_src->i_iso6392_language_code, 3 ); } } dvbpsi_descriptor_t *p_dr = PMTEsFindDescriptor( p_dvbpsies, 0x59 ); if( p_dr ) { dvbpsi_subtitling_dr_t *p_sub = dvbpsi_DecodeSubtitlingDr( p_dr ); for( int i = 0; p_sub && i < p_sub->i_subtitles_number; i++ ) { dvbpsi_subtitle_t *p_src = &p_sub->p_subtitle[i]; if( p_src->i_subtitling_type < 0x01 || p_src->i_subtitling_type > 0x03 ) continue; assert( i_page < sizeof(p_page)/sizeof(*p_page) ); ts_teletext_page_t *p_dst = &p_page[i_page++]; switch( p_src->i_subtitling_type ) { case 0x01: p_dst->i_type = 0x02; break; default: p_dst->i_type = 0x03; break; } /* FIXME check if it is the right split */ p_dst->i_magazine = (p_src->i_composition_page_id >> 8) ? (p_src->i_composition_page_id >> 8) : 8; p_dst->i_page = p_src->i_composition_page_id & 0xff; memcpy( p_dst->p_iso639, p_src->i_iso6392_language_code, 3 ); } } /* */ es_format_Init( p_fmt, SPU_ES, VLC_CODEC_TELETEXT ); if( !p_demux->p_sys->b_split_es || i_page <= 0 ) { p_fmt->subs.teletext.i_magazine = -1; p_fmt->subs.teletext.i_page = 0; p_fmt->psz_description = strdup( vlc_gettext(ppsz_teletext_type[1]) ); dvbpsi_descriptor_t *p_dr; p_dr = PMTEsFindDescriptor( p_dvbpsies, 0x46 ); if( !p_dr ) p_dr = PMTEsFindDescriptor( p_dvbpsies, 0x56 ); if( !p_demux->p_sys->b_split_es && p_dr && p_dr->i_length > 0 ) { /* Descriptor pass-through */ p_fmt->p_extra = malloc( p_dr->i_length ); if( p_fmt->p_extra ) { p_fmt->i_extra = p_dr->i_length; memcpy( p_fmt->p_extra, p_dr->p_data, p_dr->i_length ); } } } else { for( unsigned i = 0; i < i_page; i++ ) { ts_pes_es_t *p_page_es; /* */ if( i == 0 ) { p_page_es = &p_pes->es; } else { p_page_es = calloc( 1, sizeof(*p_page_es) ); if( !p_page_es ) break; es_format_Copy( &p_page_es->fmt, p_fmt ); free( p_page_es->fmt.psz_language ); free( p_page_es->fmt.psz_description ); p_page_es->fmt.psz_language = NULL; p_page_es->fmt.psz_description = NULL; ARRAY_APPEND( p_pes->extra_es, p_page_es ); } /* */ const ts_teletext_page_t *p = &p_page[i]; p_page_es->fmt.i_priority = (p->i_type == 0x02 || p->i_type == 0x05) ? ES_PRIORITY_SELECTABLE_MIN : ES_PRIORITY_NOT_DEFAULTABLE; p_page_es->fmt.psz_language = strndup( p->p_iso639, 3 ); p_page_es->fmt.psz_description = strdup(vlc_gettext(ppsz_teletext_type[p->i_type])); p_page_es->fmt.subs.teletext.i_magazine = p->i_magazine; p_page_es->fmt.subs.teletext.i_page = p->i_page; msg_Dbg( p_demux, " * ttxt type=%s lan=%s page=%d%02x", p_page_es->fmt.psz_description, p_page_es->fmt.psz_language, p->i_magazine, p->i_page ); } } } static void PMTSetupEsDvbSubtitle( demux_t *p_demux, ts_pes_t *p_pes, const dvbpsi_pmt_es_t *p_dvbpsies ) { es_format_t *p_fmt = &p_pes->es.fmt; es_format_Init( p_fmt, SPU_ES, VLC_CODEC_DVBS ); dvbpsi_descriptor_t *p_dr = PMTEsFindDescriptor( p_dvbpsies, 0x59 ); int i_page = 0; dvbpsi_subtitling_dr_t *p_sub = dvbpsi_DecodeSubtitlingDr( p_dr ); for( int i = 0; p_sub && i < p_sub->i_subtitles_number; i++ ) { const int i_type = p_sub->p_subtitle[i].i_subtitling_type; if( ( i_type >= 0x10 && i_type <= 0x14 ) || ( i_type >= 0x20 && i_type <= 0x24 ) ) i_page++; } if( !p_demux->p_sys->b_split_es || i_page <= 0 ) { p_fmt->subs.dvb.i_id = -1; p_fmt->psz_description = strdup( _("DVB subtitles") ); if( !p_demux->p_sys->b_split_es && p_dr && p_dr->i_length > 0 ) { /* Descriptor pass-through */ p_fmt->p_extra = malloc( p_dr->i_length ); if( p_fmt->p_extra ) { p_fmt->i_extra = p_dr->i_length; memcpy( p_fmt->p_extra, p_dr->p_data, p_dr->i_length ); } } } else { for( int i = 0; i < p_sub->i_subtitles_number; i++ ) { ts_pes_es_t *p_subs_es; /* */ if( i == 0 ) { p_subs_es = &p_pes->es; } else { p_subs_es = malloc( sizeof(*p_subs_es) ); if( !p_subs_es ) break; es_format_Copy( &p_subs_es->fmt, p_fmt ); free( p_subs_es->fmt.psz_language ); free( p_subs_es->fmt.psz_description ); p_subs_es->fmt.psz_language = NULL; p_subs_es->fmt.psz_description = NULL; ARRAY_APPEND( p_pes->extra_es, p_subs_es ); } /* */ const dvbpsi_subtitle_t *p = &p_sub->p_subtitle[i]; p_subs_es->fmt.psz_language = strndup( (char *)p->i_iso6392_language_code, 3 ); switch( p->i_subtitling_type ) { case 0x10: /* unspec. */ case 0x11: /* 4:3 */ case 0x12: /* 16:9 */ case 0x13: /* 2.21:1 */ case 0x14: /* HD monitor */ p_subs_es->fmt.psz_description = strdup( _("DVB subtitles") ); break; case 0x20: /* Hearing impaired unspec. */ case 0x21: /* h.i. 4:3 */ case 0x22: /* h.i. 16:9 */ case 0x23: /* h.i. 2.21:1 */ case 0x24: /* h.i. HD monitor */ p_subs_es->fmt.psz_description = strdup( _("DVB subtitles: hearing impaired") ); break; default: break; } /* Hack, FIXME */ p_subs_es->fmt.subs.dvb.i_id = ( p->i_composition_page_id << 0 ) | ( p->i_ancillary_page_id << 16 ); } } } static int vlc_ceil_log2( const unsigned int val ) { int n = 31 - clz(val); if ((1U << n) != val) n++; return n; } static void OpusSetup(demux_t *demux, uint8_t *p, size_t len, es_format_t *p_fmt) { OpusHeader h; /* default mapping */ static const unsigned char map[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; memcpy(h.stream_map, map, sizeof(map)); int csc, mapping; int channels = 0; int stream_count = 0; int ccc = p[1]; // channel_config_code if (ccc <= 8) { channels = ccc; if (channels) mapping = channels > 2; else { mapping = 255; channels = 2; // dual mono } static const uint8_t p_csc[8] = { 0, 1, 1, 2, 2, 2, 3, 3 }; csc = p_csc[channels - 1]; stream_count = channels - csc; static const uint8_t map[6][7] = { { 2,1 }, { 1,2,3 }, { 4,1,2,3 }, { 4,1,2,3,5 }, { 4,1,2,3,5,6 }, { 6,1,2,3,4,5,7 }, }; if (channels > 2) memcpy(&h.stream_map[1], map[channels-3], channels - 1); } else if (ccc == 0x81) { if (len < 4) goto explicit_config_too_short; channels = p[2]; mapping = p[3]; csc = 0; if (mapping) { bs_t s; bs_init(&s, &p[4], len - 4); stream_count = 1; if (channels) { int bits = vlc_ceil_log2(channels); if (s.i_left < bits) goto explicit_config_too_short; stream_count = bs_read(&s, bits) + 1; bits = vlc_ceil_log2(stream_count + 1); if (s.i_left < bits) goto explicit_config_too_short; csc = bs_read(&s, bits); } int channel_bits = vlc_ceil_log2(stream_count + csc + 1); if (s.i_left < channels * channel_bits) goto explicit_config_too_short; unsigned char silence = (1U << (stream_count + csc + 1)) - 1; for (int i = 0; i < channels; i++) { unsigned char m = bs_read(&s, channel_bits); if (m == silence) m = 0xff; h.stream_map[i] = m; } } } else if (ccc >= 0x80 && ccc <= 0x88) { channels = ccc - 0x80; if (channels) mapping = 1; else { mapping = 255; channels = 2; // dual mono } csc = 0; stream_count = channels; } else { msg_Err(demux, "Opus channel configuration 0x%.2x is reserved", ccc); } if (!channels) { msg_Err(demux, "Opus channel configuration 0x%.2x not supported yet", p[1]); return; } opus_prepare_header(channels, 0, &h); h.preskip = 0; h.input_sample_rate = 48000; h.nb_coupled = csc; h.nb_streams = channels - csc; h.channel_mapping = mapping; if (h.channels) { opus_write_header((uint8_t**)&p_fmt->p_extra, &p_fmt->i_extra, &h, NULL /* FIXME */); if (p_fmt->p_extra) { p_fmt->i_cat = AUDIO_ES; p_fmt->i_codec = VLC_CODEC_OPUS; p_fmt->audio.i_channels = h.channels; p_fmt->audio.i_rate = 48000; } } return; explicit_config_too_short: msg_Err(demux, "Opus descriptor too short"); } static void PMTSetupEs0x06( demux_t *p_demux, ts_pes_t *p_pes, const dvbpsi_pmt_es_t *p_dvbpsies ) { es_format_t *p_fmt = &p_pes->es.fmt; dvbpsi_descriptor_t *p_subs_dr = PMTEsFindDescriptor( p_dvbpsies, 0x59 ); dvbpsi_descriptor_t *desc; if( PMTEsHasRegistration( p_demux, p_dvbpsies, "AC-3" ) || PMTEsFindDescriptor( p_dvbpsies, 0x6a ) || PMTEsFindDescriptor( p_dvbpsies, 0x81 ) ) { p_fmt->i_cat = AUDIO_ES; p_fmt->i_codec = VLC_CODEC_A52; } else if( (desc = PMTEsFindDescriptor( p_dvbpsies, 0x7f ) ) && desc->i_length >= 2 && PMTEsHasRegistration(p_demux, p_dvbpsies, "Opus")) { OpusSetup(p_demux, desc->p_data, desc->i_length, p_fmt); } else if( PMTEsFindDescriptor( p_dvbpsies, 0x7a ) ) { /* DVB with stream_type 0x06 (ETS EN 300 468) */ p_fmt->i_cat = AUDIO_ES; p_fmt->i_codec = VLC_CODEC_EAC3; } else if( PMTEsHasRegistration( p_demux, p_dvbpsies, "DTS1" ) || PMTEsHasRegistration( p_demux, p_dvbpsies, "DTS2" ) || PMTEsHasRegistration( p_demux, p_dvbpsies, "DTS3" ) || PMTEsFindDescriptor( p_dvbpsies, 0x73 ) ) { /*registration descriptor(ETSI TS 101 154 Annex F)*/ p_fmt->i_cat = AUDIO_ES; p_fmt->i_codec = VLC_CODEC_DTS; } else if( PMTEsHasRegistration( p_demux, p_dvbpsies, "BSSD" ) && !p_subs_dr ) { /* BSSD is AES3 DATA, but could also be subtitles * we need to check for secondary descriptor then s*/ p_fmt->i_cat = AUDIO_ES; p_fmt->b_packetized = true; p_fmt->i_codec = VLC_CODEC_302M; } else if( PMTEsHasRegistration( p_demux, p_dvbpsies, "HEVC" ) ) { p_fmt->i_cat = VIDEO_ES; p_fmt->i_codec = VLC_CODEC_HEVC; } else if ( p_demux->p_sys->arib.e_mode == ARIBMODE_ENABLED ) { /* Lookup our data component descriptor first ARIB STD B10 6.4 */ dvbpsi_descriptor_t *p_dr = PMTEsFindDescriptor( p_dvbpsies, 0xFD ); /* and check that it maps to something ARIB STD B14 Table 5.1/5.2 */ if ( p_dr && p_dr->i_length >= 2 ) { if( !memcmp( p_dr->p_data, "\x00\x08", 2 ) && ( PMTEsHasComponentTag( p_dvbpsies, 0x30 ) || PMTEsHasComponentTag( p_dvbpsies, 0x31 ) || PMTEsHasComponentTag( p_dvbpsies, 0x32 ) || PMTEsHasComponentTag( p_dvbpsies, 0x33 ) || PMTEsHasComponentTag( p_dvbpsies, 0x34 ) || PMTEsHasComponentTag( p_dvbpsies, 0x35 ) || PMTEsHasComponentTag( p_dvbpsies, 0x36 ) || PMTEsHasComponentTag( p_dvbpsies, 0x37 ) ) ) { es_format_Init( p_fmt, SPU_ES, VLC_CODEC_ARIB_A ); p_fmt->psz_language = strndup ( "jpn", 3 ); p_fmt->psz_description = strdup( _("ARIB subtitles") ); } else if( !memcmp( p_dr->p_data, "\x00\x12", 2 ) && ( PMTEsHasComponentTag( p_dvbpsies, 0x87 ) || PMTEsHasComponentTag( p_dvbpsies, 0x88 ) ) ) { es_format_Init( p_fmt, SPU_ES, VLC_CODEC_ARIB_C ); p_fmt->psz_language = strndup ( "jpn", 3 ); p_fmt->psz_description = strdup( _("ARIB subtitles") ); } } } else { /* Subtitle/Teletext/VBI fallbacks */ dvbpsi_subtitling_dr_t *p_sub; if( p_subs_dr && ( p_sub = dvbpsi_DecodeSubtitlingDr( p_subs_dr ) ) ) { for( int i = 0; i < p_sub->i_subtitles_number; i++ ) { if( p_fmt->i_cat != UNKNOWN_ES ) break; switch( p_sub->p_subtitle[i].i_subtitling_type ) { case 0x01: /* EBU Teletext subtitles */ case 0x02: /* Associated EBU Teletext */ case 0x03: /* VBI data */ PMTSetupEsTeletext( p_demux, p_pes, p_dvbpsies ); break; case 0x10: /* DVB Subtitle (normal) with no monitor AR critical */ case 0x11: /* ... on 4:3 AR monitor */ case 0x12: /* ... on 16:9 AR monitor */ case 0x13: /* ... on 2.21:1 AR monitor */ case 0x14: /* ... for display on a high definition monitor */ case 0x20: /* DVB Subtitle (impaired) with no monitor AR critical */ case 0x21: /* ... on 4:3 AR monitor */ case 0x22: /* ... on 16:9 AR monitor */ case 0x23: /* ... on 2.21:1 AR monitor */ case 0x24: /* ... for display on a high definition monitor */ PMTSetupEsDvbSubtitle( p_demux, p_pes, p_dvbpsies ); break; default: msg_Err( p_demux, "Unrecognized DVB subtitle type (0x%x)", p_sub->p_subtitle[i].i_subtitling_type ); break; } } } if( p_fmt->i_cat == UNKNOWN_ES && ( PMTEsFindDescriptor( p_dvbpsies, 0x45 ) || /* VBI Data descriptor */ PMTEsFindDescriptor( p_dvbpsies, 0x46 ) || /* VBI Teletext descriptor */ PMTEsFindDescriptor( p_dvbpsies, 0x56 ) ) ) /* EBU Teletext descriptor */ { /* Teletext/VBI */ PMTSetupEsTeletext( p_demux, p_pes, p_dvbpsies ); } } /* FIXME is it useful ? */ if( PMTEsFindDescriptor( p_dvbpsies, 0x52 ) ) { dvbpsi_descriptor_t *p_dr = PMTEsFindDescriptor( p_dvbpsies, 0x52 ); dvbpsi_stream_identifier_dr_t *p_si = dvbpsi_DecodeStreamIdentifierDr( p_dr ); msg_Dbg( p_demux, " * Stream Component Identifier: %d", p_si->i_component_tag ); } } static void PMTSetupEs0xEA( demux_t *p_demux, ts_pes_es_t *p_es, const dvbpsi_pmt_es_t *p_dvbpsies ) { /* Registration Descriptor */ if( !PMTEsHasRegistration( p_demux, p_dvbpsies, "VC-1" ) ) { msg_Err( p_demux, "Registration descriptor not found or invalid" ); return; } es_format_t *p_fmt = &p_es->fmt; /* registration descriptor for VC-1 (SMPTE rp227) */ p_fmt->i_cat = VIDEO_ES; p_fmt->i_codec = VLC_CODEC_VC1; /* XXX With Simple and Main profile the SEQUENCE * header is modified: video width and height are * inserted just after the start code as 2 int16_t * The packetizer will take care of that. */ } static void PMTSetupEs0xD1( demux_t *p_demux, ts_pes_es_t *p_es, const dvbpsi_pmt_es_t *p_dvbpsies ) { /* Registration Descriptor */ if( !PMTEsHasRegistration( p_demux, p_dvbpsies, "drac" ) ) { msg_Err( p_demux, "Registration descriptor not found or invalid" ); return; } es_format_t *p_fmt = &p_es->fmt; /* registration descriptor for Dirac * (backwards compatable with VC-2 (SMPTE Sxxxx:2008)) */ p_fmt->i_cat = VIDEO_ES; p_fmt->i_codec = VLC_CODEC_DIRAC; } static void PMTSetupEs0xA0( demux_t *p_demux, ts_pes_es_t *p_es, const dvbpsi_pmt_es_t *p_dvbpsies ) { /* MSCODEC sent by vlc */ dvbpsi_descriptor_t *p_dr = PMTEsFindDescriptor( p_dvbpsies, 0xa0 ); if( !p_dr || p_dr->i_length < 10 ) { msg_Warn( p_demux, "private MSCODEC (vlc) without bih private descriptor" ); return; } es_format_t *p_fmt = &p_es->fmt; p_fmt->i_cat = VIDEO_ES; p_fmt->i_codec = VLC_FOURCC( p_dr->p_data[0], p_dr->p_data[1], p_dr->p_data[2], p_dr->p_data[3] ); p_fmt->video.i_width = GetWBE( &p_dr->p_data[4] ); p_fmt->video.i_height = GetWBE( &p_dr->p_data[6] ); p_fmt->i_extra = GetWBE( &p_dr->p_data[8] ); if( p_fmt->i_extra > 0 ) { p_fmt->p_extra = malloc( p_fmt->i_extra ); if( p_fmt->p_extra ) memcpy( p_fmt->p_extra, &p_dr->p_data[10], __MIN( p_fmt->i_extra, p_dr->i_length - 10 ) ); else p_fmt->i_extra = 0; } /* For such stream we will gather them ourself and don't launch a * packetizer. * Yes it's ugly but it's the only way to have DIV3 working */ p_fmt->b_packetized = true; } static void PMTSetupEs0x83( const dvbpsi_pmt_t *p_pmt, ts_pes_es_t *p_es, int i_pid ) { /* WiDi broadcasts without registration on PMT 0x1, PCR 0x1000 and * with audio track pid being 0x1100..0x11FF */ if ( p_pmt->i_program_number == 0x1 && p_pmt->i_pcr_pid == 0x1000 && ( i_pid >> 8 ) == 0x11 ) { /* Not enough ? might contain 0x83 private descriptor, 2 bytes 0x473F */ es_format_Init( &p_es->fmt, AUDIO_ES, VLC_CODEC_WIDI_LPCM ); } else es_format_Init( &p_es->fmt, AUDIO_ES, VLC_CODEC_DVD_LPCM ); } static bool PMTSetupEsHDMV( demux_t *p_demux, ts_pes_es_t *p_es, const dvbpsi_pmt_es_t *p_dvbpsies ) { es_format_t *p_fmt = &p_es->fmt; /* Blu-Ray mapping */ switch( p_dvbpsies->i_type ) { case 0x80: p_fmt->i_cat = AUDIO_ES; p_fmt->i_codec = VLC_CODEC_BD_LPCM; break; case 0x81: p_fmt->i_cat = AUDIO_ES; p_fmt->i_codec = VLC_CODEC_A52; break; case 0x82: case 0x85: /* DTS-HD High resolution audio */ case 0x86: /* DTS-HD Master audio */ case 0xA2: /* Secondary DTS audio */ p_fmt->i_cat = AUDIO_ES; p_fmt->i_codec = VLC_CODEC_DTS; break; case 0x83: /* TrueHD AC3 */ p_fmt->i_cat = AUDIO_ES; p_fmt->i_codec = VLC_CODEC_TRUEHD; break; case 0x84: /* E-AC3 */ case 0xA1: /* Secondary E-AC3 */ p_fmt->i_cat = AUDIO_ES; p_fmt->i_codec = VLC_CODEC_EAC3; break; case 0x90: /* Presentation graphics */ p_fmt->i_cat = SPU_ES; p_fmt->i_codec = VLC_CODEC_BD_PG; break; case 0x91: /* Interactive graphics */ case 0x92: /* Subtitle */ return false; case 0xEA: p_fmt->i_cat = VIDEO_ES; p_fmt->i_codec = VLC_CODEC_VC1; break; default: msg_Info( p_demux, "HDMV registration not implemented for pid 0x%x type 0x%x", p_dvbpsies->i_pid, p_dvbpsies->i_type ); return false; break; } return true; } static bool PMTSetupEsRegistration( demux_t *p_demux, ts_pes_es_t *p_es, const dvbpsi_pmt_es_t *p_dvbpsies ) { static const struct { char psz_tag[5]; int i_cat; vlc_fourcc_t i_codec; } p_regs[] = { { "AC-3", AUDIO_ES, VLC_CODEC_A52 }, { "DTS1", AUDIO_ES, VLC_CODEC_DTS }, { "DTS2", AUDIO_ES, VLC_CODEC_DTS }, { "DTS3", AUDIO_ES, VLC_CODEC_DTS }, { "BSSD", AUDIO_ES, VLC_CODEC_302M }, { "VC-1", VIDEO_ES, VLC_CODEC_VC1 }, { "drac", VIDEO_ES, VLC_CODEC_DIRAC }, { "", UNKNOWN_ES, 0 } }; es_format_t *p_fmt = &p_es->fmt; for( int i = 0; p_regs[i].i_cat != UNKNOWN_ES; i++ ) { if( PMTEsHasRegistration( p_demux, p_dvbpsies, p_regs[i].psz_tag ) ) { p_fmt->i_cat = p_regs[i].i_cat; p_fmt->i_codec = p_regs[i].i_codec; if (p_dvbpsies->i_type == 0x87) p_fmt->i_codec = VLC_CODEC_EAC3; return true; } } return false; } static char *GetAudioTypeDesc(demux_t *p_demux, int type) { static const char *audio_type[] = { NULL, N_("clean effects"), N_("hearing impaired"), N_("visual impaired commentary"), }; if (type < 0 || type > 3) msg_Dbg( p_demux, "unknown audio type: %d", type); else if (type > 0) return strdup(audio_type[type]); return NULL; } static void PMTParseEsIso639( demux_t *p_demux, ts_pes_es_t *p_es, const dvbpsi_pmt_es_t *p_dvbpsies ) { /* get language descriptor */ dvbpsi_descriptor_t *p_dr = PMTEsFindDescriptor( p_dvbpsies, 0x0a ); if( !p_dr ) return; dvbpsi_iso639_dr_t *p_decoded = dvbpsi_DecodeISO639Dr( p_dr ); if( !p_decoded ) { msg_Err( p_demux, "Failed to decode a ISO 639 descriptor" ); return; } #if defined(DR_0A_API_VER) && (DR_0A_API_VER >= 2) p_es->fmt.psz_language = malloc( 4 ); if( p_es->fmt.psz_language ) { memcpy( p_es->fmt.psz_language, p_decoded->code[0].iso_639_code, 3 ); p_es->fmt.psz_language[3] = 0; msg_Dbg( p_demux, "found language: %s", p_es->fmt.psz_language); } int type = p_decoded->code[0].i_audio_type; p_es->fmt.psz_description = GetAudioTypeDesc(p_demux, type); if (type == 0) p_es->fmt.i_priority = ES_PRIORITY_SELECTABLE_MIN + 1; // prioritize normal audio tracks p_es->fmt.i_extra_languages = p_decoded->i_code_count-1; if( p_es->fmt.i_extra_languages > 0 ) p_es->fmt.p_extra_languages = malloc( sizeof(*p_es->fmt.p_extra_languages) * p_es->fmt.i_extra_languages ); if( p_es->fmt.p_extra_languages ) { for( unsigned i = 0; i < p_es->fmt.i_extra_languages; i++ ) { p_es->fmt.p_extra_languages[i].psz_language = malloc(4); if( p_es->fmt.p_extra_languages[i].psz_language ) { memcpy( p_es->fmt.p_extra_languages[i].psz_language, p_decoded->code[i+1].iso_639_code, 3 ); p_es->fmt.p_extra_languages[i].psz_language[3] = '\0'; } int type = p_decoded->code[i].i_audio_type; p_es->fmt.p_extra_languages[i].psz_description = GetAudioTypeDesc(p_demux, type); } } #else p_es->fmt.psz_language = malloc( 4 ); if( p_es->fmt.psz_language ) { memcpy( p_es->fmt.psz_language, p_decoded->i_iso_639_code, 3 ); p_es->fmt.psz_language[3] = 0; } #endif } static inline void SetExtraESGroupAndID( demux_sys_t *p_sys, es_format_t *p_fmt, const es_format_t *p_parent_fmt ) { if ( p_sys->b_es_id_pid ) /* pid is 13 bits */ p_fmt->i_id = (p_sys->i_next_extraid++ << 13) | p_parent_fmt->i_id; p_fmt->i_group = p_parent_fmt->i_group; } static void AddAndCreateES( demux_t *p_demux, ts_pid_t *pid, bool b_create_delayed ) { demux_sys_t *p_sys = p_demux->p_sys; if( b_create_delayed ) p_sys->es_creation = CREATE_ES; if( pid && p_sys->es_creation == CREATE_ES ) { /* FIXME: other owners / shared pid */ pid->u.p_pes->es.id = es_out_Add( p_demux->out, &pid->u.p_pes->es.fmt ); for( int i = 0; i < pid->u.p_pes->extra_es.i_size; i++ ) { es_format_t *p_fmt = &pid->u.p_pes->extra_es.p_elems[i]->fmt; SetExtraESGroupAndID( p_sys, p_fmt, &pid->u.p_pes->es.fmt ); pid->u.p_pes->extra_es.p_elems[i]->id = es_out_Add( p_demux->out, p_fmt ); } p_sys->i_pmt_es += 1 + pid->u.p_pes->extra_es.i_size; /* Update the default program == first created ES group */ if( p_sys->b_default_selection ) { p_sys->b_default_selection = false; assert(p_sys->programs.i_size == 1); if( p_sys->programs.p_elems[0] != pid->p_parent->u.p_pmt->i_number ) p_sys->programs.p_elems[0] = pid->p_parent->u.p_pmt->i_number; msg_Dbg( p_demux, "Default program is %d", pid->p_parent->u.p_pmt->i_number ); } } if( b_create_delayed ) { ts_pat_t *p_pat = GetPID(p_sys, 0)->u.p_pat; for( int i=0; i< p_pat->programs.i_size; i++ ) { ts_pmt_t *p_pmt = p_pat->programs.p_elems[i]->u.p_pmt; for( int j=0; j<p_pmt->e_streams.i_size; j++ ) { ts_pid_t *pid = p_pmt->e_streams.p_elems[j]; if( pid->u.p_pes->es.id ) continue; pid->u.p_pes->es.id = es_out_Add( p_demux->out, &pid->u.p_pes->es.fmt ); for( int k = 0; k < pid->u.p_pes->extra_es.i_size; k++ ) { es_format_t *p_fmt = &pid->u.p_pes->extra_es.p_elems[k]->fmt; SetExtraESGroupAndID( p_sys, p_fmt, &pid->u.p_pes->es.fmt ); pid->u.p_pes->extra_es.p_elems[k]->id = es_out_Add( p_demux->out, p_fmt ); } p_sys->i_pmt_es += 1 + pid->u.p_pes->extra_es.i_size; } } } UpdatePESFilters( p_demux, p_sys->b_es_all ); } static void PMTCallBack( void *data, dvbpsi_pmt_t *p_dvbpsipmt ) { demux_t *p_demux = data; demux_sys_t *p_sys = p_demux->p_sys; ts_pid_t *pmtpid = NULL; ts_pmt_t *p_pmt = NULL; msg_Dbg( p_demux, "PMTCallBack called" ); if (unlikely(GetPID(p_sys, 0)->type != TYPE_PAT)) { assert(GetPID(p_sys, 0)->type == TYPE_PAT); dvbpsi_pmt_delete(p_dvbpsipmt); } const ts_pat_t *p_pat = GetPID(p_sys, 0)->u.p_pat; /* First find this PMT declared in PAT */ for( int i = 0; !pmtpid && i < p_pat->programs.i_size; i++ ) { const int i_pmt_prgnumber = p_pat->programs.p_elems[i]->u.p_pmt->i_number; if( i_pmt_prgnumber != TS_USER_PMT_NUMBER && i_pmt_prgnumber == p_dvbpsipmt->i_program_number ) { pmtpid = p_pat->programs.p_elems[i]; assert(pmtpid->type == TYPE_PMT); p_pmt = pmtpid->u.p_pmt; } } if( pmtpid == NULL ) { msg_Warn( p_demux, "unreferenced program (broken stream)" ); dvbpsi_pmt_delete(p_dvbpsipmt); return; } pmtpid->i_flags |= FLAG_SEEN; if( p_pmt->i_version != -1 && ( !p_dvbpsipmt->b_current_next || p_pmt->i_version == p_dvbpsipmt->i_version ) ) { dvbpsi_pmt_delete( p_dvbpsipmt ); return; } /* Save old es array */ DECL_ARRAY(ts_pid_t *) old_es_rm; old_es_rm.i_alloc = p_pmt->e_streams.i_alloc; old_es_rm.i_size = p_pmt->e_streams.i_size; old_es_rm.p_elems = p_pmt->e_streams.p_elems; ARRAY_INIT(p_pmt->e_streams); if( p_pmt->iod ) { ODFree( p_pmt->iod ); p_pmt->iod = NULL; } msg_Dbg( p_demux, "new PMT program number=%d version=%d pid_pcr=%d", p_dvbpsipmt->i_program_number, p_dvbpsipmt->i_version, p_dvbpsipmt->i_pcr_pid ); p_pmt->i_pid_pcr = p_dvbpsipmt->i_pcr_pid; p_pmt->i_version = p_dvbpsipmt->i_version; ValidateDVBMeta( p_demux, p_pmt->i_pid_pcr ); if( ProgramIsSelected( p_sys, p_pmt->i_number ) ) SetPIDFilter( p_sys, GetPID(p_sys, p_pmt->i_pid_pcr), true ); /* Set demux filter */ /* Parse PMT descriptors */ ts_pmt_registration_type_t registration_type = TS_PMT_REGISTRATION_NONE; dvbpsi_descriptor_t *p_dr; /* First pass for standard detection */ if ( p_sys->arib.e_mode == ARIBMODE_AUTO ) { int i_arib_flags = 0; /* Descriptors can be repeated */ for( p_dr = p_dvbpsipmt->p_first_descriptor; p_dr != NULL; p_dr = p_dr->p_next ) { switch(p_dr->i_tag) { case 0x09: { dvbpsi_ca_dr_t *p_cadr = dvbpsi_DecodeCADr( p_dr ); i_arib_flags |= (p_cadr->i_ca_system_id == 0x05); } break; case 0xF6: i_arib_flags |= 1 << 1; break; case 0xC1: i_arib_flags |= 1 << 2; break; default: break; } } if ( i_arib_flags == 0x07 ) //0b111 p_sys->arib.e_mode = ARIBMODE_ENABLED; } for( p_dr = p_dvbpsipmt->p_first_descriptor; p_dr != NULL; p_dr = p_dr->p_next ) { /* special descriptors handling */ switch(p_dr->i_tag) { case 0x1d: /* We have found an IOD descriptor */ msg_Dbg( p_demux, " * PMT descriptor : IOD (0x1d)" ); p_pmt->iod = IODNew( VLC_OBJECT(p_demux), p_dr->i_length, p_dr->p_data ); break; case 0x9: msg_Dbg( p_demux, " * PMT descriptor : CA (0x9) SysID 0x%x", (p_dr->p_data[0] << 8) | p_dr->p_data[1] ); break; case 0x5: /* Registration Descriptor */ if( p_dr->i_length != 4 ) { msg_Warn( p_demux, " * PMT invalid Registration Descriptor" ); } else { msg_Dbg( p_demux, " * PMT descriptor : registration %4.4s", p_dr->p_data ); if( !memcmp( p_dr->p_data, "HDMV", 4 ) || !memcmp( p_dr->p_data, "HDPR", 4 ) ) registration_type = TS_PMT_REGISTRATION_HDMV; /* Blu-Ray */ } break; case 0x0f: msg_Dbg( p_demux, " * PMT descriptor : Private Data (0x0f)" ); break; case 0xC1: msg_Dbg( p_demux, " * PMT descriptor : Digital copy control (0xC1)" ); break; case 0x88: /* EACEM Simulcast HD Logical channels ordering */ msg_Dbg( p_demux, " * descriptor : EACEM Simulcast HD" ); /* TODO: apply visibility flags */ break; default: msg_Dbg( p_demux, " * PMT descriptor : unknown (0x%x)", p_dr->i_tag ); } } dvbpsi_pmt_es_t *p_dvbpsies; for( p_dvbpsies = p_dvbpsipmt->p_first_es; p_dvbpsies != NULL; p_dvbpsies = p_dvbpsies->p_next ) { bool b_reusing_pid = false; ts_pes_t *p_pes; ts_pid_t *pespid = GetPID(p_sys, p_dvbpsies->i_pid); if ( pespid->type == TYPE_PES && pespid->p_parent->u.p_pmt->i_number != p_pmt->i_number ) { msg_Warn( p_demux, " * PMT wants to get a share or pid %d (unsupported)", pespid->i_pid ); continue; } /* Find out if the PID was already declared */ for( int i = 0; i < old_es_rm.i_size; i++ ) { if( old_es_rm.p_elems[i]->i_pid == p_dvbpsies->i_pid ) { b_reusing_pid = true; break; } } ValidateDVBMeta( p_demux, p_dvbpsies->i_pid ); char const * psz_typedesc = ""; switch(p_dvbpsies->i_type) { case 0x00: psz_typedesc = "ISO/IEC Reserved"; break; case 0x01: psz_typedesc = "ISO/IEC 11172 Video"; break; case 0x02: psz_typedesc = "ISO/IEC 13818-2 Video or ISO/IEC 11172-2 constrained parameter video stream"; break; case 0x03: psz_typedesc = "ISO/IEC 11172 Audio"; break; case 0x04: psz_typedesc = "ISO/IEC 13818-3 Audio"; break; case 0x05: psz_typedesc = "ISO/IEC 13818-1 private_sections"; break; case 0x06: psz_typedesc = "ISO/IEC 13818-1 PES packets containing private data"; break; case 0x07: psz_typedesc = "ISO/IEC 13522 MHEG"; break; case 0x08: psz_typedesc = "ISO/IEC 13818-1 Annex A DSM CC"; break; case 0x09: psz_typedesc = "ITU-T Rec. H.222.1"; break; case 0x0A: psz_typedesc = "ISO/IEC 13818-6 type A"; break; case 0x0B: psz_typedesc = "ISO/IEC 13818-6 type B"; break; case 0x0C: psz_typedesc = "ISO/IEC 13818-6 type C"; break; case 0x0D: psz_typedesc = "ISO/IEC 13818-6 type D"; break; case 0x0E: psz_typedesc = "ISO/IEC 13818-1 auxiliary"; break; case 0x12: psz_typedesc = "ISO/IEC 14496-1 SL-packetized or FlexMux stream carried in PES packets"; break; case 0x13: psz_typedesc = "ISO/IEC 14496-1 SL-packetized or FlexMux stream carried in sections"; break; default: if (p_dvbpsies->i_type >= 0x0F && p_dvbpsies->i_type <=0x7F) psz_typedesc = "ISO/IEC 13818-1 Reserved"; else psz_typedesc = "User Private"; } msg_Dbg( p_demux, " * pid=%d type=0x%x %s", p_dvbpsies->i_pid, p_dvbpsies->i_type, psz_typedesc ); for( p_dr = p_dvbpsies->p_first_descriptor; p_dr != NULL; p_dr = p_dr->p_next ) { msg_Dbg( p_demux, " - descriptor tag 0x%x", p_dr->i_tag ); } if ( !PIDSetup( p_demux, TYPE_PES, pespid, pmtpid ) ) { msg_Warn( p_demux, " * pid=%d type=0x%x %s (skipped)", p_dvbpsies->i_pid, p_dvbpsies->i_type, psz_typedesc ); continue; } else { if( b_reusing_pid ) { p_pes = ts_pes_New( p_demux ); if( !p_pes ) continue; } else { p_pes = pespid->u.p_pes; } } ARRAY_APPEND( p_pmt->e_streams, pespid ); PIDFillFormat( &p_pes->es.fmt, p_dvbpsies->i_type, &p_pes->data_type ); p_pes->i_stream_type = p_dvbpsies->i_type; pespid->i_flags |= SEEN(GetPID(p_sys, p_dvbpsies->i_pid)); bool b_registration_applied = false; if ( p_dvbpsies->i_type >= 0x80 ) /* non standard, extensions */ { if ( registration_type == TS_PMT_REGISTRATION_HDMV ) { if (( b_registration_applied = PMTSetupEsHDMV( p_demux, &p_pes->es, p_dvbpsies ) )) msg_Dbg( p_demux, " + HDMV registration applied to pid %d type 0x%x", p_dvbpsies->i_pid, p_dvbpsies->i_type ); } else { if (( b_registration_applied = PMTSetupEsRegistration( p_demux, &p_pes->es, p_dvbpsies ) )) msg_Dbg( p_demux, " + registration applied to pid %d type 0x%x", p_dvbpsies->i_pid, p_dvbpsies->i_type ); } } if ( !b_registration_applied ) { switch( p_dvbpsies->i_type ) { case 0x06: /* Handle PES private data */ PMTSetupEs0x06( p_demux, p_pes, p_dvbpsies ); break; /* All other private or reserved types */ case 0x13: /* SL in sections */ p_pes->data_type = TS_ES_DATA_TABLE_SECTION; //ft case 0x0f: case 0x10: case 0x11: case 0x12: SetupISO14496Descriptors( p_demux, &p_pes->es, p_pmt, p_dvbpsies ); break; case 0x83: /* LPCM (audio) */ PMTSetupEs0x83( p_dvbpsipmt, &p_pes->es, p_dvbpsies->i_pid ); break; case 0xa0: PMTSetupEs0xA0( p_demux, &p_pes->es, p_dvbpsies ); break; case 0xd1: PMTSetupEs0xD1( p_demux, &p_pes->es, p_dvbpsies ); break; case 0xEA: PMTSetupEs0xEA( p_demux, &p_pes->es, p_dvbpsies ); default: break; } } if( p_pes->es.fmt.i_cat == AUDIO_ES || ( p_pes->es.fmt.i_cat == SPU_ES && p_pes->es.fmt.i_codec != VLC_CODEC_DVBS && p_pes->es.fmt.i_codec != VLC_CODEC_TELETEXT ) ) { PMTParseEsIso639( p_demux, &p_pes->es, p_dvbpsies ); } /* Set Groups / ID */ p_pes->es.fmt.i_group = p_dvbpsipmt->i_program_number; if( p_sys->b_es_id_pid ) p_pes->es.fmt.i_id = p_dvbpsies->i_pid; if( p_pes->es.fmt.i_cat == UNKNOWN_ES ) { msg_Dbg( p_demux, " => pid %d content is *unknown*", p_dvbpsies->i_pid ); p_pes->es.fmt.psz_description = strdup( psz_typedesc ); } else { msg_Dbg( p_demux, " => pid %d has now es fcc=%4.4s", p_dvbpsies->i_pid, (char*)&p_pes->es.fmt.i_codec ); /* Check if we can avoid restarting the ES */ if( b_reusing_pid ) { /* p_pes points to a tmp pes */ if( !es_format_IsSimilar( &pespid->u.p_pes->es.fmt, &p_pes->es.fmt ) || pespid->u.p_pes->es.fmt.i_extra != p_pes->es.fmt.i_extra || ( pespid->u.p_pes->es.fmt.i_extra > 0 && memcmp( pespid->u.p_pes->es.fmt.p_extra, p_pes->es.fmt.p_extra, p_pes->es.fmt.i_extra ) ) || pespid->u.p_pes->extra_es.i_size != p_pes->extra_es.i_size || !!pespid->u.p_pes->es.fmt.psz_language != !!p_pes->es.fmt.psz_language || ( pespid->u.p_pes->es.fmt.psz_language != NULL && strcmp( pespid->u.p_pes->es.fmt.psz_language, p_pes->es.fmt.psz_language ) ) ) { /* Differs, swap then */ ts_pes_t *old = pespid->u.p_pes; pespid->u.p_pes = p_pes; AddAndCreateES( p_demux, pespid, false ); ts_pes_Del( p_demux, old ); } else ts_pes_Del( p_demux, p_pes ); // delete temp, stay with current es/es_id } else { AddAndCreateES( p_demux, pespid, false ); } } p_dr = PMTEsFindDescriptor( p_dvbpsies, 0x09 ); if( p_dr && p_dr->i_length >= 2 ) { msg_Dbg( p_demux, " * PMT descriptor : CA (0x9) SysID 0x%x", (p_dr->p_data[0] << 8) | p_dr->p_data[1] ); } } /* Set CAM descrambling */ if( !ProgramIsSelected( p_sys, p_pmt->i_number ) ) { dvbpsi_pmt_delete( p_dvbpsipmt ); } else if( stream_Control( p_sys->stream, STREAM_SET_PRIVATE_ID_CA, p_dvbpsipmt ) != VLC_SUCCESS ) { if ( p_sys->arib.e_mode == ARIBMODE_ENABLED && !p_sys->arib.b25stream ) { p_sys->arib.b25stream = stream_FilterNew( p_demux->s, "aribcam" ); p_sys->stream = ( p_sys->arib.b25stream ) ? p_sys->arib.b25stream : p_demux->s; if (!p_sys->arib.b25stream) dvbpsi_pmt_delete( p_dvbpsipmt ); } else dvbpsi_pmt_delete( p_dvbpsipmt ); } /* Decref or clean now unused es */ for( int i = 0; i < old_es_rm.i_size; i++ ) PIDRelease( p_demux, old_es_rm.p_elems[i] ); ARRAY_RESET( old_es_rm ); UpdatePESFilters( p_demux, p_sys->b_es_all ); if( !p_sys->b_trust_pcr ) { int i_cand = FindPCRCandidate( p_pmt ); p_pmt->i_pid_pcr = i_cand; p_pmt->pcr.b_disable = true; msg_Warn( p_demux, "PCR not trusted for program %d, set up workaround using pid %d", p_pmt->i_number, i_cand ); } /* Probe Boundaries */ if( p_sys->b_canfastseek && p_pmt->i_last_dts == -1 ) { p_pmt->i_last_dts = 0; ProbeStart( p_demux, p_pmt->i_number ); ProbeEnd( p_demux, p_pmt->i_number ); } } static int PATCheck( demux_t *p_demux, dvbpsi_pat_t *p_pat ) { /* Some Dreambox streams have all PMT set to same pid */ int i_prev_pid = -1; for( dvbpsi_pat_program_t * p_program = p_pat->p_first_program; p_program != NULL; p_program = p_program->p_next ) { if( p_program->i_pid == i_prev_pid ) { msg_Warn( p_demux, "PAT check failed: duplicate program pid %d", i_prev_pid ); return VLC_EGENERIC; } i_prev_pid = p_program->i_pid; } return VLC_SUCCESS; } static void PATCallBack( void *data, dvbpsi_pat_t *p_dvbpsipat ) { demux_t *p_demux = data; demux_sys_t *p_sys = p_demux->p_sys; dvbpsi_pat_program_t *p_program; ts_pid_t *patpid = GetPID(p_sys, 0); ts_pat_t *p_pat = GetPID(p_sys, 0)->u.p_pat; patpid->i_flags |= FLAG_SEEN; msg_Dbg( p_demux, "PATCallBack called" ); if(unlikely( GetPID(p_sys, 0)->type != TYPE_PAT )) { msg_Warn( p_demux, "PATCallBack called on invalid pid" ); return; } if( ( p_pat->i_version != -1 && ( !p_dvbpsipat->b_current_next || p_dvbpsipat->i_version == p_pat->i_version ) ) || ( p_pat->i_ts_id != -1 && p_dvbpsipat->i_ts_id != p_pat->i_ts_id ) || p_sys->b_user_pmt || PATCheck( p_demux, p_dvbpsipat ) ) { dvbpsi_pat_delete( p_dvbpsipat ); return; } msg_Dbg( p_demux, "new PAT ts_id=%d version=%d current_next=%d", p_dvbpsipat->i_ts_id, p_dvbpsipat->i_version, p_dvbpsipat->b_current_next ); /* Save old programs array */ DECL_ARRAY(ts_pid_t *) old_pmt_rm; old_pmt_rm.i_alloc = p_pat->programs.i_alloc; old_pmt_rm.i_size = p_pat->programs.i_size; old_pmt_rm.p_elems = p_pat->programs.p_elems; ARRAY_INIT(p_pat->programs); /* now create programs */ for( p_program = p_dvbpsipat->p_first_program; p_program != NULL; p_program = p_program->p_next ) { msg_Dbg( p_demux, " * number=%d pid=%d", p_program->i_number, p_program->i_pid ); if( p_program->i_number == 0 ) continue; ts_pid_t *pmtpid = GetPID(p_sys, p_program->i_pid); ValidateDVBMeta( p_demux, p_program->i_pid ); bool b_existing = (pmtpid->type == TYPE_PMT); /* create or temporary incref pid */ if( !PIDSetup( p_demux, TYPE_PMT, pmtpid, patpid ) ) { msg_Warn( p_demux, " * number=%d pid=%d (ignored)", p_program->i_number, p_program->i_pid ); continue; } if( !b_existing || pmtpid->u.p_pmt->i_number != p_program->i_number ) { if( b_existing && pmtpid->u.p_pmt->i_number != p_program->i_number ) dvbpsi_pmt_detach(pmtpid->u.p_pmt->handle); if( !dvbpsi_pmt_attach( pmtpid->u.p_pmt->handle, p_program->i_number, PMTCallBack, p_demux ) ) msg_Err( p_demux, "PATCallback failed attaching PMTCallback to program %d", p_program->i_number ); } pmtpid->u.p_pmt->i_number = p_program->i_number; ARRAY_APPEND( p_pat->programs, pmtpid ); /* Now select PID at access level */ if( p_sys->programs.i_size == 0 || ProgramIsSelected( p_sys, p_program->i_number ) ) { if( p_sys->programs.i_size == 0 ) { msg_Dbg( p_demux, "temporary receiving program %d", p_program->i_number ); p_sys->b_default_selection = true; ARRAY_APPEND( p_sys->programs, p_program->i_number ); } if( SetPIDFilter( p_sys, pmtpid, true ) ) p_sys->b_access_control = false; else if ( p_sys->es_creation == DELAY_ES ) p_sys->es_creation = CREATE_ES; } } p_pat->i_version = p_dvbpsipat->i_version; p_pat->i_ts_id = p_dvbpsipat->i_ts_id; for(int i=0; i<old_pmt_rm.i_size; i++) { /* decref current or release now unreferenced */ PIDRelease( p_demux, old_pmt_rm.p_elems[i] ); } ARRAY_RESET(old_pmt_rm); dvbpsi_pat_delete( p_dvbpsipat ); } static inline bool handle_Init( demux_t *p_demux, dvbpsi_t **handle ) { *handle = dvbpsi_new( &dvbpsi_messages, DVBPSI_MSG_DEBUG ); if( !*handle ) return false; (*handle)->p_sys = (void *) p_demux; return true; } static ts_pat_t *ts_pat_New( demux_t *p_demux ) { ts_pat_t *pat = malloc( sizeof( ts_pat_t ) ); if( !pat ) return NULL; if( !handle_Init( p_demux, &pat->handle ) ) { free( pat ); return NULL; } pat->i_version = -1; pat->i_ts_id = -1; ARRAY_INIT( pat->programs ); return pat; } static void ts_pat_Del( demux_t *p_demux, ts_pat_t *pat ) { if( dvbpsi_decoder_present( pat->handle ) ) dvbpsi_pat_detach( pat->handle ); dvbpsi_delete( pat->handle ); for( int i=0; i<pat->programs.i_size; i++ ) PIDRelease( p_demux, pat->programs.p_elems[i] ); ARRAY_RESET( pat->programs ); free( pat ); } static ts_pmt_t *ts_pmt_New( demux_t *p_demux ) { ts_pmt_t *pmt = malloc( sizeof( ts_pmt_t ) ); if( !pmt ) return NULL; if( !handle_Init( p_demux, &pmt->handle ) ) { free( pmt ); return NULL; } ARRAY_INIT( pmt->e_streams ); pmt->i_version = -1; pmt->i_number = -1; pmt->i_pid_pcr = 0x1FFF; pmt->iod = NULL; pmt->od.i_version = -1; ARRAY_INIT( pmt->od.objects ); pmt->i_last_dts = -1; pmt->pcr.i_current = -1; pmt->pcr.i_first = -1; pmt->pcr.b_disable = false; pmt->pcr.i_first_dts = VLC_TS_INVALID; pmt->pcr.i_pcroffset = -1; pmt->pcr.b_fix_done = false; return pmt; } static void ts_pmt_Del( demux_t *p_demux, ts_pmt_t *pmt ) { if( dvbpsi_decoder_present( pmt->handle ) ) dvbpsi_pmt_detach( pmt->handle ); dvbpsi_delete( pmt->handle ); for( int i=0; i<pmt->e_streams.i_size; i++ ) PIDRelease( p_demux, pmt->e_streams.p_elems[i] ); ARRAY_RESET( pmt->e_streams ); if( pmt->iod ) ODFree( pmt->iod ); for( int i=0; i<pmt->od.objects.i_size; i++ ) ODFree( pmt->od.objects.p_elems[i] ); ARRAY_RESET( pmt->od.objects ); if( pmt->i_number > -1 ) es_out_Control( p_demux->out, ES_OUT_DEL_GROUP, pmt->i_number ); free( pmt ); } static ts_pes_t *ts_pes_New( demux_t *p_demux ) { VLC_UNUSED(p_demux); ts_pes_t *pes = malloc( sizeof( ts_pes_t ) ); if( !pes ) return NULL; pes->es.id = NULL; pes->es.i_sl_es_id = 0; es_format_Init( &pes->es.fmt, UNKNOWN_ES, 0 ); ARRAY_INIT( pes->extra_es ); pes->i_stream_type = 0; pes->data_type = TS_ES_DATA_PES; pes->i_data_size = 0; pes->i_data_gathered = 0; pes->p_data = NULL; pes->pp_last = &pes->p_data; pes->p_prepcr_outqueue = NULL; pes->sl.p_data = NULL; pes->sl.pp_last = &pes->sl.p_data; return pes; } static void ts_pes_Del( demux_t *p_demux, ts_pes_t *pes ) { if( pes->es.id ) { /* Ensure we don't wait for overlap hacks #14257 */ es_out_Control( p_demux->out, ES_OUT_SET_ES_STATE, pes->es.id, false ); es_out_Del( p_demux->out, pes->es.id ); p_demux->p_sys->i_pmt_es--; } if( pes->p_data ) block_ChainRelease( pes->p_data ); if( pes->p_prepcr_outqueue ) block_ChainRelease( pes->p_prepcr_outqueue ); es_format_Clean( &pes->es.fmt ); for( int i = 0; i < pes->extra_es.i_size; i++ ) { if( pes->extra_es.p_elems[i]->id ) { es_out_Del( p_demux->out, pes->extra_es.p_elems[i]->id ); p_demux->p_sys->i_pmt_es--; } es_format_Clean( &pes->extra_es.p_elems[i]->fmt ); free( pes->extra_es.p_elems[i] ); } ARRAY_RESET( pes->extra_es ); free( pes ); } static ts_psi_t *ts_psi_New( demux_t *p_demux ) { ts_psi_t *psi = malloc( sizeof( ts_psi_t ) ); if( !psi ) return NULL; if( !handle_Init( p_demux, &psi->handle ) ) { free( psi ); return NULL; } psi->i_version = -1; return psi; } static void ts_psi_Del( demux_t *p_demux, ts_psi_t *psi ) { VLC_UNUSED(p_demux); if( dvbpsi_decoder_present( psi->handle ) ) dvbpsi_DetachDemux( psi->handle ); dvbpsi_delete( psi->handle ); free( psi ); }
9034725985/vlc
modules/demux/mpeg/ts.c
C
gpl-2.0
189,480
/*** This file is part of systemd. Copyright 2010 Lennart Poettering systemd is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. systemd is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with systemd; If not, see <http://www.gnu.org/licenses/>. ***/ #include <string.h> #include <unistd.h> #ifdef HAVE_KMOD #include <libkmod.h> #endif #include "bus-util.h" #include "capability-util.h" #include "kmod-setup.h" #include "macro.h" #ifdef HAVE_KMOD static void systemd_kmod_log( void *data, int priority, const char *file, int line, const char *fn, const char *format, va_list args) { /* library logging is enabled at debug only */ DISABLE_WARNING_FORMAT_NONLITERAL; log_internalv(LOG_DEBUG, 0, file, line, fn, format, args); REENABLE_WARNING; } #endif int kmod_setup(void) { #ifdef HAVE_KMOD static const struct { const char *module; const char *path; bool warn_if_unavailable:1; bool warn_if_module:1; bool (*condition_fn)(void); } kmod_table[] = { /* auto-loading on use doesn't work before udev is up */ { "autofs4", "/sys/class/misc/autofs", true, false, NULL }, /* early configure of ::1 on the loopback device */ { "ipv6", "/sys/module/ipv6", false, true, NULL }, /* this should never be a module */ { "unix", "/proc/net/unix", true, true, NULL }, /* IPC is needed before we bring up any other services */ { "kdbus", "/sys/fs/kdbus", false, false, is_kdbus_wanted }, #ifdef HAVE_LIBIPTC /* netfilter is needed by networkd, nspawn among others, and cannot be autoloaded */ { "ip_tables", "/proc/net/ip_tables_names", false, false, NULL }, #endif }; struct kmod_ctx *ctx = NULL; unsigned int i; int r; if (have_effective_cap(CAP_SYS_MODULE) == 0) return 0; for (i = 0; i < ELEMENTSOF(kmod_table); i++) { struct kmod_module *mod; if (kmod_table[i].path && access(kmod_table[i].path, F_OK) >= 0) continue; if (kmod_table[i].condition_fn && !kmod_table[i].condition_fn()) continue; if (kmod_table[i].warn_if_module) log_debug("Your kernel apparently lacks built-in %s support. Might be " "a good idea to compile it in. We'll now try to work around " "this by loading the module...", kmod_table[i].module); if (!ctx) { ctx = kmod_new(NULL, NULL); if (!ctx) return log_oom(); kmod_set_log_fn(ctx, systemd_kmod_log, NULL); kmod_load_resources(ctx); } r = kmod_module_new_from_name(ctx, kmod_table[i].module, &mod); if (r < 0) { log_error("Failed to lookup module '%s'", kmod_table[i].module); continue; } r = kmod_module_probe_insert_module(mod, KMOD_PROBE_APPLY_BLACKLIST, NULL, NULL, NULL, NULL); if (r == 0) log_debug("Inserted module '%s'", kmod_module_get_name(mod)); else if (r == KMOD_PROBE_APPLY_BLACKLIST) log_info("Module '%s' is blacklisted", kmod_module_get_name(mod)); else { bool print_warning = kmod_table[i].warn_if_unavailable || (r < 0 && r != -ENOENT); log_full_errno(print_warning ? LOG_WARNING : LOG_DEBUG, r, "Failed to insert module '%s': %m", kmod_module_get_name(mod)); } kmod_module_unref(mod); } if (ctx) kmod_unref(ctx); #endif return 0; }
thom311/systemd
src/core/kmod-setup.c
C
gpl-2.0
4,725
/*************************************************************************** * Copyright (C) 2006 by Hans Edgington <hans@edgington.nl> * * Copyright (C) 2007-2009 Hans de Goede <hdegoede@redhat.com> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ***************************************************************************/ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/io.h> #include <linux/acpi.h> #define DRVNAME "f71882fg" #define SIO_F71858FG_LD_HWM 0x02 /* Hardware monitor logical device */ #define SIO_F71882FG_LD_HWM 0x04 /* Hardware monitor logical device */ #define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */ #define SIO_LOCK_KEY 0xAA /* Key to diasble Super-I/O */ #define SIO_REG_LDSEL 0x07 /* Logical device select */ #define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */ #define SIO_REG_DEVREV 0x22 /* Device revision */ #define SIO_REG_MANID 0x23 /* Fintek ID (2 bytes) */ #define SIO_REG_ENABLE 0x30 /* Logical device enable */ #define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */ #define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */ #define SIO_F71858_ID 0x0507 /* Chipset ID */ #define SIO_F71862_ID 0x0601 /* Chipset ID */ #define SIO_F71882_ID 0x0541 /* Chipset ID */ #define SIO_F71889_ID 0x0723 /* Chipset ID */ #define SIO_F8000_ID 0x0581 /* Chipset ID */ #define REGION_LENGTH 8 #define ADDR_REG_OFFSET 5 #define DATA_REG_OFFSET 6 #define F71882FG_REG_PECI 0x0A #define F71882FG_REG_IN_STATUS 0x12 /* f71882fg only */ #define F71882FG_REG_IN_BEEP 0x13 /* f71882fg only */ #define F71882FG_REG_IN(nr) (0x20 + (nr)) #define F71882FG_REG_IN1_HIGH 0x32 /* f71882fg only */ #define F71882FG_REG_FAN(nr) (0xA0 + (16 * (nr))) #define F71882FG_REG_FAN_TARGET(nr) (0xA2 + (16 * (nr))) #define F71882FG_REG_FAN_FULL_SPEED(nr) (0xA4 + (16 * (nr))) #define F71882FG_REG_FAN_STATUS 0x92 #define F71882FG_REG_FAN_BEEP 0x93 #define F71882FG_REG_TEMP(nr) (0x70 + 2 * (nr)) #define F71882FG_REG_TEMP_OVT(nr) (0x80 + 2 * (nr)) #define F71882FG_REG_TEMP_HIGH(nr) (0x81 + 2 * (nr)) #define F71882FG_REG_TEMP_STATUS 0x62 #define F71882FG_REG_TEMP_BEEP 0x63 #define F71882FG_REG_TEMP_CONFIG 0x69 #define F71882FG_REG_TEMP_HYST(nr) (0x6C + (nr)) #define F71882FG_REG_TEMP_TYPE 0x6B #define F71882FG_REG_TEMP_DIODE_OPEN 0x6F #define F71882FG_REG_PWM(nr) (0xA3 + (16 * (nr))) #define F71882FG_REG_PWM_TYPE 0x94 #define F71882FG_REG_PWM_ENABLE 0x96 #define F71882FG_REG_FAN_HYST(nr) (0x98 + (nr)) #define F71882FG_REG_POINT_PWM(pwm, point) (0xAA + (point) + (16 * (pwm))) #define F71882FG_REG_POINT_TEMP(pwm, point) (0xA6 + (point) + (16 * (pwm))) #define F71882FG_REG_POINT_MAPPING(nr) (0xAF + 16 * (nr)) #define F71882FG_REG_START 0x01 #define FAN_MIN_DETECT 366 /* Lowest detectable fanspeed */ static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); enum chips { f71858fg, f71862fg, f71882fg, f71889fg, f8000 }; static const char *f71882fg_names[] = { "f71858fg", "f71862fg", "f71882fg", "f71889fg", "f8000", }; static struct platform_device *f71882fg_pdev; /* Super-I/O Function prototypes */ static inline int superio_inb(int base, int reg); static inline int superio_inw(int base, int reg); static inline int superio_enter(int base); static inline void superio_select(int base, int ld); static inline void superio_exit(int base); struct f71882fg_sio_data { enum chips type; }; struct f71882fg_data { unsigned short addr; enum chips type; struct device *hwmon_dev; struct mutex update_lock; int temp_start; /* temp numbering start (0 or 1) */ char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ unsigned long last_limits; /* In jiffies */ /* Register Values */ u8 in[9]; u8 in1_max; u8 in_status; u8 in_beep; u16 fan[4]; u16 fan_target[4]; u16 fan_full_speed[4]; u8 fan_status; u8 fan_beep; /* Note: all models have only 3 temperature channels, but on some they are addressed as 0-2 and on others as 1-3, so for coding convenience we reserve space for 4 channels */ u16 temp[4]; u8 temp_ovt[4]; u8 temp_high[4]; u8 temp_hyst[2]; /* 2 hysts stored per reg */ u8 temp_type[4]; u8 temp_status; u8 temp_beep; u8 temp_diode_open; u8 temp_config; u8 pwm[4]; u8 pwm_enable; u8 pwm_auto_point_hyst[2]; u8 pwm_auto_point_mapping[4]; u8 pwm_auto_point_pwm[4][5]; s8 pwm_auto_point_temp[4][4]; }; /* Sysfs in */ static ssize_t show_in(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t show_in_max(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_in_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_in_beep(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_in_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_in_alarm(struct device *dev, struct device_attribute *devattr, char *buf); /* Sysfs Fan */ static ssize_t show_fan(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t show_fan_full_speed(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_fan_full_speed(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_fan_beep(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_fan_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *devattr, char *buf); /* Sysfs Temp */ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t show_temp_max(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_temp_max_hyst(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_temp_crit(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_temp_crit(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_temp_crit_hyst(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t show_temp_type(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t show_temp_beep(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_temp_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_temp_alarm(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t show_temp_fault(struct device *dev, struct device_attribute *devattr, char *buf); /* PWM and Auto point control */ static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_pwm_interpolate(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm_interpolate(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_pwm_auto_point_channel(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm_auto_point_channel(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_pwm_auto_point_temp_hyst(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_pwm_auto_point_pwm(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm_auto_point_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_pwm_auto_point_temp(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm_auto_point_temp(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); /* Sysfs misc */ static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf); static int __devinit f71882fg_probe(struct platform_device * pdev); static int f71882fg_remove(struct platform_device *pdev); static struct platform_driver f71882fg_driver = { .driver = { .owner = THIS_MODULE, .name = DRVNAME, }, .probe = f71882fg_probe, .remove = f71882fg_remove, }; static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); /* Temp and in attr for the f71858fg, the f71858fg is special as it has its temperature indexes start at 0 (the others start at 1) and it only has 3 voltage inputs */ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = { SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0), SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 0), SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst, store_temp_max_hyst, 0, 0), SENSOR_ATTR_2(temp1_max_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 0), SENSOR_ATTR_2(temp1_crit, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 0), SENSOR_ATTR_2(temp1_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 0), SENSOR_ATTR_2(temp1_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 4), SENSOR_ATTR_2(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0, 0), SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 1), SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 1), SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst, store_temp_max_hyst, 0, 1), SENSOR_ATTR_2(temp2_max_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 1), SENSOR_ATTR_2(temp2_crit, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 1), SENSOR_ATTR_2(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 1), SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5), SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 1), SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1), SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2), SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 2), SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst, store_temp_max_hyst, 0, 2), SENSOR_ATTR_2(temp3_max_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 2), SENSOR_ATTR_2(temp3_crit, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 2), SENSOR_ATTR_2(temp3_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 2), SENSOR_ATTR_2(temp3_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6), SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), }; /* Temp and in attr common to the f71862fg, f71882fg and f71889fg */ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = { SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3), SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4), SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5), SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6), SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7), SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8), SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1), SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 1), SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst, store_temp_max_hyst, 0, 1), /* Should really be temp1_max_alarm, but older versions did not handle the max and crit alarms separately and lm_sensors v2 depends on the presence of temp#_alarm files. The same goes for temp2/3 _alarm. */ SENSOR_ATTR_2(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 1), SENSOR_ATTR_2(temp1_max_beep, S_IRUGO|S_IWUSR, show_temp_beep, store_temp_beep, 0, 1), SENSOR_ATTR_2(temp1_crit, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 1), SENSOR_ATTR_2(temp1_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 1), SENSOR_ATTR_2(temp1_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5), SENSOR_ATTR_2(temp1_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep, store_temp_beep, 0, 5), SENSOR_ATTR_2(temp1_type, S_IRUGO, show_temp_type, NULL, 0, 1), SENSOR_ATTR_2(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0, 1), SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 2), SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 2), SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst, store_temp_max_hyst, 0, 2), /* Should be temp2_max_alarm, see temp1_alarm note */ SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 2), SENSOR_ATTR_2(temp2_max_beep, S_IRUGO|S_IWUSR, show_temp_beep, store_temp_beep, 0, 2), SENSOR_ATTR_2(temp2_crit, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 2), SENSOR_ATTR_2(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 2), SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6), SENSOR_ATTR_2(temp2_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep, store_temp_beep, 0, 6), SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2), SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3), SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 3), SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst, store_temp_max_hyst, 0, 3), /* Should be temp3_max_alarm, see temp1_alarm note */ SENSOR_ATTR_2(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 3), SENSOR_ATTR_2(temp3_max_beep, S_IRUGO|S_IWUSR, show_temp_beep, store_temp_beep, 0, 3), SENSOR_ATTR_2(temp3_crit, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 3), SENSOR_ATTR_2(temp3_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 3), SENSOR_ATTR_2(temp3_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 7), SENSOR_ATTR_2(temp3_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep, store_temp_beep, 0, 7), SENSOR_ATTR_2(temp3_type, S_IRUGO, show_temp_type, NULL, 0, 3), SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 3), }; /* For models with in1 alarm capability */ static struct sensor_device_attribute_2 fxxxx_in1_alarm_attr[] = { SENSOR_ATTR_2(in1_max, S_IRUGO|S_IWUSR, show_in_max, store_in_max, 0, 1), SENSOR_ATTR_2(in1_beep, S_IRUGO|S_IWUSR, show_in_beep, store_in_beep, 0, 1), SENSOR_ATTR_2(in1_alarm, S_IRUGO, show_in_alarm, NULL, 0, 1), }; /* Temp and in attr for the f8000 Note on the f8000 temp_ovt (crit) is used as max, and temp_high (max) is used as hysteresis value to clear alarms Also like the f71858fg its temperature indexes start at 0 */ static struct sensor_device_attribute_2 f8000_in_temp_attr[] = { SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0), SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 0), SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 0), SENSOR_ATTR_2(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 4), SENSOR_ATTR_2(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0, 0), SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 1), SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 1), SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 1), SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5), SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 1), SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1), SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2), SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 2), SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 2), SENSOR_ATTR_2(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6), SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), }; /* Fan / PWM attr common to all models */ static struct sensor_device_attribute_2 fxxxx_fan_attr[4][6] = { { SENSOR_ATTR_2(fan1_input, S_IRUGO, show_fan, NULL, 0, 0), SENSOR_ATTR_2(fan1_full_speed, S_IRUGO|S_IWUSR, show_fan_full_speed, store_fan_full_speed, 0, 0), SENSOR_ATTR_2(fan1_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 0), SENSOR_ATTR_2(pwm1, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 0), SENSOR_ATTR_2(pwm1_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 0, 0), SENSOR_ATTR_2(pwm1_interpolate, S_IRUGO|S_IWUSR, show_pwm_interpolate, store_pwm_interpolate, 0, 0), }, { SENSOR_ATTR_2(fan2_input, S_IRUGO, show_fan, NULL, 0, 1), SENSOR_ATTR_2(fan2_full_speed, S_IRUGO|S_IWUSR, show_fan_full_speed, store_fan_full_speed, 0, 1), SENSOR_ATTR_2(fan2_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 1), SENSOR_ATTR_2(pwm2, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 1), SENSOR_ATTR_2(pwm2_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 0, 1), SENSOR_ATTR_2(pwm2_interpolate, S_IRUGO|S_IWUSR, show_pwm_interpolate, store_pwm_interpolate, 0, 1), }, { SENSOR_ATTR_2(fan3_input, S_IRUGO, show_fan, NULL, 0, 2), SENSOR_ATTR_2(fan3_full_speed, S_IRUGO|S_IWUSR, show_fan_full_speed, store_fan_full_speed, 0, 2), SENSOR_ATTR_2(fan3_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 2), SENSOR_ATTR_2(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 2), SENSOR_ATTR_2(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 0, 2), SENSOR_ATTR_2(pwm3_interpolate, S_IRUGO|S_IWUSR, show_pwm_interpolate, store_pwm_interpolate, 0, 2), }, { SENSOR_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 0, 3), SENSOR_ATTR_2(fan4_full_speed, S_IRUGO|S_IWUSR, show_fan_full_speed, store_fan_full_speed, 0, 3), SENSOR_ATTR_2(fan4_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 3), SENSOR_ATTR_2(pwm4, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 3), SENSOR_ATTR_2(pwm4_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 0, 3), SENSOR_ATTR_2(pwm4_interpolate, S_IRUGO|S_IWUSR, show_pwm_interpolate, store_pwm_interpolate, 0, 3), } }; /* Attr for models which can beep on Fan alarm */ static struct sensor_device_attribute_2 fxxxx_fan_beep_attr[] = { SENSOR_ATTR_2(fan1_beep, S_IRUGO|S_IWUSR, show_fan_beep, store_fan_beep, 0, 0), SENSOR_ATTR_2(fan2_beep, S_IRUGO|S_IWUSR, show_fan_beep, store_fan_beep, 0, 1), SENSOR_ATTR_2(fan3_beep, S_IRUGO|S_IWUSR, show_fan_beep, store_fan_beep, 0, 2), SENSOR_ATTR_2(fan4_beep, S_IRUGO|S_IWUSR, show_fan_beep, store_fan_beep, 0, 3), }; /* PWM attr for the f71862fg, fewer pwms and fewer zones per pwm than the f71858fg / f71882fg / f71889fg */ static struct sensor_device_attribute_2 f71862fg_auto_pwm_attr[] = { SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 0), SENSOR_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 0), SENSOR_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 0), SENSOR_ATTR_2(pwm1_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 0), SENSOR_ATTR_2(pwm1_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 0), SENSOR_ATTR_2(pwm1_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 0), SENSOR_ATTR_2(pwm1_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 0), SENSOR_ATTR_2(pwm2_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 1), SENSOR_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 1), SENSOR_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 1), SENSOR_ATTR_2(pwm2_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 1), SENSOR_ATTR_2(pwm2_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 1), SENSOR_ATTR_2(pwm2_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 1), SENSOR_ATTR_2(pwm2_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 1), SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 2), SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 2), SENSOR_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 2), SENSOR_ATTR_2(pwm3_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 2), SENSOR_ATTR_2(pwm3_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 2), SENSOR_ATTR_2(pwm3_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 2), SENSOR_ATTR_2(pwm3_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 2), }; /* PWM attr common to the f71858fg, f71882fg and f71889fg */ static struct sensor_device_attribute_2 fxxxx_auto_pwm_attr[4][14] = { { SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 0), SENSOR_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 0), SENSOR_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 0), SENSOR_ATTR_2(pwm1_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 2, 0), SENSOR_ATTR_2(pwm1_auto_point4_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 3, 0), SENSOR_ATTR_2(pwm1_auto_point5_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 0), SENSOR_ATTR_2(pwm1_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 0), SENSOR_ATTR_2(pwm1_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 1, 0), SENSOR_ATTR_2(pwm1_auto_point3_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 2, 0), SENSOR_ATTR_2(pwm1_auto_point4_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 0), SENSOR_ATTR_2(pwm1_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 0), SENSOR_ATTR_2(pwm1_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 1, 0), SENSOR_ATTR_2(pwm1_auto_point3_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 2, 0), SENSOR_ATTR_2(pwm1_auto_point4_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 0), }, { SENSOR_ATTR_2(pwm2_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 1), SENSOR_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 1), SENSOR_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 1), SENSOR_ATTR_2(pwm2_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 2, 1), SENSOR_ATTR_2(pwm2_auto_point4_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 3, 1), SENSOR_ATTR_2(pwm2_auto_point5_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 1), SENSOR_ATTR_2(pwm2_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 1), SENSOR_ATTR_2(pwm2_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 1, 1), SENSOR_ATTR_2(pwm2_auto_point3_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 2, 1), SENSOR_ATTR_2(pwm2_auto_point4_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 1), SENSOR_ATTR_2(pwm2_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 1), SENSOR_ATTR_2(pwm2_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 1, 1), SENSOR_ATTR_2(pwm2_auto_point3_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 2, 1), SENSOR_ATTR_2(pwm2_auto_point4_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 1), }, { SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 2), SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 2), SENSOR_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 2), SENSOR_ATTR_2(pwm3_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 2, 2), SENSOR_ATTR_2(pwm3_auto_point4_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 3, 2), SENSOR_ATTR_2(pwm3_auto_point5_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 2), SENSOR_ATTR_2(pwm3_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 2), SENSOR_ATTR_2(pwm3_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 1, 2), SENSOR_ATTR_2(pwm3_auto_point3_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 2, 2), SENSOR_ATTR_2(pwm3_auto_point4_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 2), SENSOR_ATTR_2(pwm3_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 2), SENSOR_ATTR_2(pwm3_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 1, 2), SENSOR_ATTR_2(pwm3_auto_point3_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 2, 2), SENSOR_ATTR_2(pwm3_auto_point4_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 2), }, { SENSOR_ATTR_2(pwm4_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 3), SENSOR_ATTR_2(pwm4_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 3), SENSOR_ATTR_2(pwm4_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 3), SENSOR_ATTR_2(pwm4_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 2, 3), SENSOR_ATTR_2(pwm4_auto_point4_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 3, 3), SENSOR_ATTR_2(pwm4_auto_point5_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 3), SENSOR_ATTR_2(pwm4_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 3), SENSOR_ATTR_2(pwm4_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 1, 3), SENSOR_ATTR_2(pwm4_auto_point3_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 2, 3), SENSOR_ATTR_2(pwm4_auto_point4_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 3), SENSOR_ATTR_2(pwm4_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 3), SENSOR_ATTR_2(pwm4_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 1, 3), SENSOR_ATTR_2(pwm4_auto_point3_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 2, 3), SENSOR_ATTR_2(pwm4_auto_point4_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 3), } }; /* Fan attr specific to the f8000 (4th fan input can only measure speed) */ static struct sensor_device_attribute_2 f8000_fan_attr[] = { SENSOR_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 0, 3), }; /* PWM attr for the f8000, zones mapped to temp instead of to pwm! Also the register block at offset A0 maps to TEMP1 (so our temp2, as the F8000 starts counting temps at 0), B0 maps the TEMP2 and C0 maps to TEMP0 */ static struct sensor_device_attribute_2 f8000_auto_pwm_attr[] = { SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 0), SENSOR_ATTR_2(temp1_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 2), SENSOR_ATTR_2(temp1_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 2), SENSOR_ATTR_2(temp1_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 2, 2), SENSOR_ATTR_2(temp1_auto_point4_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 3, 2), SENSOR_ATTR_2(temp1_auto_point5_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 2), SENSOR_ATTR_2(temp1_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 2), SENSOR_ATTR_2(temp1_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 1, 2), SENSOR_ATTR_2(temp1_auto_point3_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 2, 2), SENSOR_ATTR_2(temp1_auto_point4_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 2), SENSOR_ATTR_2(temp1_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 2), SENSOR_ATTR_2(temp1_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 1, 2), SENSOR_ATTR_2(temp1_auto_point3_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 2, 2), SENSOR_ATTR_2(temp1_auto_point4_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 2), SENSOR_ATTR_2(pwm2_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 1), SENSOR_ATTR_2(temp2_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 0), SENSOR_ATTR_2(temp2_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 0), SENSOR_ATTR_2(temp2_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 2, 0), SENSOR_ATTR_2(temp2_auto_point4_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 3, 0), SENSOR_ATTR_2(temp2_auto_point5_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 0), SENSOR_ATTR_2(temp2_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 0), SENSOR_ATTR_2(temp2_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 1, 0), SENSOR_ATTR_2(temp2_auto_point3_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 2, 0), SENSOR_ATTR_2(temp2_auto_point4_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 0), SENSOR_ATTR_2(temp2_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 0), SENSOR_ATTR_2(temp2_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 1, 0), SENSOR_ATTR_2(temp2_auto_point3_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 2, 0), SENSOR_ATTR_2(temp2_auto_point4_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 0), SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, store_pwm_auto_point_channel, 0, 2), SENSOR_ATTR_2(temp3_auto_point1_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 0, 1), SENSOR_ATTR_2(temp3_auto_point2_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 1, 1), SENSOR_ATTR_2(temp3_auto_point3_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 2, 1), SENSOR_ATTR_2(temp3_auto_point4_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 3, 1), SENSOR_ATTR_2(temp3_auto_point5_pwm, S_IRUGO|S_IWUSR, show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, 4, 1), SENSOR_ATTR_2(temp3_auto_point1_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 0, 1), SENSOR_ATTR_2(temp3_auto_point2_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 1, 1), SENSOR_ATTR_2(temp3_auto_point3_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 2, 1), SENSOR_ATTR_2(temp3_auto_point4_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp, store_pwm_auto_point_temp, 3, 1), SENSOR_ATTR_2(temp3_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, show_pwm_auto_point_temp_hyst, store_pwm_auto_point_temp_hyst, 0, 1), SENSOR_ATTR_2(temp3_auto_point2_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 1, 1), SENSOR_ATTR_2(temp3_auto_point3_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 2, 1), SENSOR_ATTR_2(temp3_auto_point4_temp_hyst, S_IRUGO, show_pwm_auto_point_temp_hyst, NULL, 3, 1), }; /* Super I/O functions */ static inline int superio_inb(int base, int reg) { outb(reg, base); return inb(base + 1); } static int superio_inw(int base, int reg) { int val; val = superio_inb(base, reg) << 8; val |= superio_inb(base, reg + 1); return val; } static inline int superio_enter(int base) { /* Don't step on other drivers' I/O space by accident */ if (!request_muxed_region(base, 2, DRVNAME)) { printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n", base); return -EBUSY; } /* according to the datasheet the key must be send twice! */ outb(SIO_UNLOCK_KEY, base); outb(SIO_UNLOCK_KEY, base); return 0; } static inline void superio_select(int base, int ld) { outb(SIO_REG_LDSEL, base); outb(ld, base + 1); } static inline void superio_exit(int base) { outb(SIO_LOCK_KEY, base); release_region(base, 2); } static inline int fan_from_reg(u16 reg) { return reg ? (1500000 / reg) : 0; } static inline u16 fan_to_reg(int fan) { return fan ? (1500000 / fan) : 0; } static u8 f71882fg_read8(struct f71882fg_data *data, u8 reg) { u8 val; outb(reg, data->addr + ADDR_REG_OFFSET); val = inb(data->addr + DATA_REG_OFFSET); return val; } static u16 f71882fg_read16(struct f71882fg_data *data, u8 reg) { u16 val; val = f71882fg_read8(data, reg) << 8; val |= f71882fg_read8(data, reg + 1); return val; } static void f71882fg_write8(struct f71882fg_data *data, u8 reg, u8 val) { outb(reg, data->addr + ADDR_REG_OFFSET); outb(val, data->addr + DATA_REG_OFFSET); } static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val) { f71882fg_write8(data, reg, val >> 8); f71882fg_write8(data, reg + 1, val & 0xff); } static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr) { if (data->type == f71858fg) return f71882fg_read16(data, F71882FG_REG_TEMP(nr)); else return f71882fg_read8(data, F71882FG_REG_TEMP(nr)); } static struct f71882fg_data *f71882fg_update_device(struct device *dev) { struct f71882fg_data *data = dev_get_drvdata(dev); int nr, reg = 0, reg2; int nr_fans = (data->type == f71882fg) ? 4 : 3; int nr_ins = (data->type == f71858fg || data->type == f8000) ? 3 : 9; mutex_lock(&data->update_lock); /* Update once every 60 seconds */ if (time_after(jiffies, data->last_limits + 60 * HZ) || !data->valid) { if (data->type == f71882fg || data->type == f71889fg) { data->in1_max = f71882fg_read8(data, F71882FG_REG_IN1_HIGH); data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP); } /* Get High & boundary temps*/ for (nr = data->temp_start; nr < 3 + data->temp_start; nr++) { data->temp_ovt[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_OVT(nr)); data->temp_high[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_HIGH(nr)); } if (data->type != f8000) { data->temp_hyst[0] = f71882fg_read8(data, F71882FG_REG_TEMP_HYST(0)); data->temp_hyst[1] = f71882fg_read8(data, F71882FG_REG_TEMP_HYST(1)); } if (data->type == f71862fg || data->type == f71882fg || data->type == f71889fg) { data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP); data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP); /* Have to hardcode type, because temp1 is special */ reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE); data->temp_type[2] = (reg & 0x04) ? 2 : 4; data->temp_type[3] = (reg & 0x08) ? 2 : 4; } /* Determine temp index 1 sensor type */ if (data->type == f71889fg) { reg2 = f71882fg_read8(data, F71882FG_REG_START); switch ((reg2 & 0x60) >> 5) { case 0x00: /* BJT / Thermistor */ data->temp_type[1] = (reg & 0x02) ? 2 : 4; break; case 0x01: /* AMDSI */ data->temp_type[1] = 5; break; case 0x02: /* PECI */ case 0x03: /* Ibex Peak ?? Report as PECI for now */ data->temp_type[1] = 6; break; } } else { reg2 = f71882fg_read8(data, F71882FG_REG_PECI); if ((reg2 & 0x03) == 0x01) data->temp_type[1] = 6; /* PECI */ else if ((reg2 & 0x03) == 0x02) data->temp_type[1] = 5; /* AMDSI */ else if (data->type == f71862fg || data->type == f71882fg) data->temp_type[1] = (reg & 0x02) ? 2 : 4; else /* f71858fg and f8000 only support BJT */ data->temp_type[1] = 2; } data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE); data->pwm_auto_point_hyst[0] = f71882fg_read8(data, F71882FG_REG_FAN_HYST(0)); data->pwm_auto_point_hyst[1] = f71882fg_read8(data, F71882FG_REG_FAN_HYST(1)); for (nr = 0; nr < nr_fans; nr++) { data->pwm_auto_point_mapping[nr] = f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr)); if (data->type != f71862fg) { int point; for (point = 0; point < 5; point++) { data->pwm_auto_point_pwm[nr][point] = f71882fg_read8(data, F71882FG_REG_POINT_PWM (nr, point)); } for (point = 0; point < 4; point++) { data->pwm_auto_point_temp[nr][point] = f71882fg_read8(data, F71882FG_REG_POINT_TEMP (nr, point)); } } else { data->pwm_auto_point_pwm[nr][1] = f71882fg_read8(data, F71882FG_REG_POINT_PWM (nr, 1)); data->pwm_auto_point_pwm[nr][4] = f71882fg_read8(data, F71882FG_REG_POINT_PWM (nr, 4)); data->pwm_auto_point_temp[nr][0] = f71882fg_read8(data, F71882FG_REG_POINT_TEMP (nr, 0)); data->pwm_auto_point_temp[nr][3] = f71882fg_read8(data, F71882FG_REG_POINT_TEMP (nr, 3)); } } data->last_limits = jiffies; } /* Update every second */ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { data->temp_status = f71882fg_read8(data, F71882FG_REG_TEMP_STATUS); data->temp_diode_open = f71882fg_read8(data, F71882FG_REG_TEMP_DIODE_OPEN); for (nr = data->temp_start; nr < 3 + data->temp_start; nr++) data->temp[nr] = f71882fg_read_temp(data, nr); data->fan_status = f71882fg_read8(data, F71882FG_REG_FAN_STATUS); for (nr = 0; nr < nr_fans; nr++) { data->fan[nr] = f71882fg_read16(data, F71882FG_REG_FAN(nr)); data->fan_target[nr] = f71882fg_read16(data, F71882FG_REG_FAN_TARGET(nr)); data->fan_full_speed[nr] = f71882fg_read16(data, F71882FG_REG_FAN_FULL_SPEED(nr)); data->pwm[nr] = f71882fg_read8(data, F71882FG_REG_PWM(nr)); } /* The f8000 can monitor 1 more fan, but has no pwm for it */ if (data->type == f8000) data->fan[3] = f71882fg_read16(data, F71882FG_REG_FAN(3)); if (data->type == f71882fg || data->type == f71889fg) data->in_status = f71882fg_read8(data, F71882FG_REG_IN_STATUS); for (nr = 0; nr < nr_ins; nr++) data->in[nr] = f71882fg_read8(data, F71882FG_REG_IN(nr)); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* Sysfs Interface */ static ssize_t show_fan(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; int speed = fan_from_reg(data->fan[nr]); if (speed == FAN_MIN_DETECT) speed = 0; return sprintf(buf, "%d\n", speed); } static ssize_t show_fan_full_speed(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; int speed = fan_from_reg(data->fan_full_speed[nr]); return sprintf(buf, "%d\n", speed); } static ssize_t store_fan_full_speed(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; long val; err = strict_strtol(buf, 10, &val); if (err) return err; val = SENSORS_LIMIT(val, 23, 1500000); val = fan_to_reg(val); mutex_lock(&data->update_lock); f71882fg_write16(data, F71882FG_REG_FAN_FULL_SPEED(nr), val); data->fan_full_speed[nr] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_fan_beep(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; if (data->fan_beep & (1 << nr)) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t store_fan_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; unsigned long val; err = strict_strtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP); if (val) data->fan_beep |= 1 << nr; else data->fan_beep &= ~(1 << nr); f71882fg_write8(data, F71882FG_REG_FAN_BEEP, data->fan_beep); mutex_unlock(&data->update_lock); return count; } static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; if (data->fan_status & (1 << nr)) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t show_in(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; return sprintf(buf, "%d\n", data->in[nr] * 8); } static ssize_t show_in_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); return sprintf(buf, "%d\n", data->in1_max * 8); } static ssize_t store_in_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err; long val; err = strict_strtol(buf, 10, &val); if (err) return err; val /= 8; val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val); data->in1_max = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_in_beep(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; if (data->in_beep & (1 << nr)) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t store_in_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; unsigned long val; err = strict_strtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP); if (val) data->in_beep |= 1 << nr; else data->in_beep &= ~(1 << nr); f71882fg_write8(data, F71882FG_REG_IN_BEEP, data->in_beep); mutex_unlock(&data->update_lock); return count; } static ssize_t show_in_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; if (data->in_status & (1 << nr)) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; int sign, temp; if (data->type == f71858fg) { /* TEMP_TABLE_SEL 1 or 3 ? */ if (data->temp_config & 1) { sign = data->temp[nr] & 0x0001; temp = (data->temp[nr] >> 5) & 0x7ff; } else { sign = data->temp[nr] & 0x8000; temp = (data->temp[nr] >> 5) & 0x3ff; } temp *= 125; if (sign) temp -= 128000; } else temp = data->temp[nr] * 1000; return sprintf(buf, "%d\n", temp); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; return sprintf(buf, "%d\n", data->temp_high[nr] * 1000); } static ssize_t store_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; long val; err = strict_strtol(buf, 10, &val); if (err) return err; val /= 1000; val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); f71882fg_write8(data, F71882FG_REG_TEMP_HIGH(nr), val); data->temp_high[nr] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_max_hyst(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; int temp_max_hyst; mutex_lock(&data->update_lock); if (nr & 1) temp_max_hyst = data->temp_hyst[nr / 2] >> 4; else temp_max_hyst = data->temp_hyst[nr / 2] & 0x0f; temp_max_hyst = (data->temp_high[nr] - temp_max_hyst) * 1000; mutex_unlock(&data->update_lock); return sprintf(buf, "%d\n", temp_max_hyst); } static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; ssize_t ret = count; u8 reg; long val; err = strict_strtol(buf, 10, &val); if (err) return err; val /= 1000; mutex_lock(&data->update_lock); /* convert abs to relative and check */ data->temp_high[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_HIGH(nr)); val = SENSORS_LIMIT(val, data->temp_high[nr] - 15, data->temp_high[nr]); val = data->temp_high[nr] - val; /* convert value to register contents */ reg = f71882fg_read8(data, F71882FG_REG_TEMP_HYST(nr / 2)); if (nr & 1) reg = (reg & 0x0f) | (val << 4); else reg = (reg & 0xf0) | val; f71882fg_write8(data, F71882FG_REG_TEMP_HYST(nr / 2), reg); data->temp_hyst[nr / 2] = reg; mutex_unlock(&data->update_lock); return ret; } static ssize_t show_temp_crit(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; return sprintf(buf, "%d\n", data->temp_ovt[nr] * 1000); } static ssize_t store_temp_crit(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; long val; err = strict_strtol(buf, 10, &val); if (err) return err; val /= 1000; val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); f71882fg_write8(data, F71882FG_REG_TEMP_OVT(nr), val); data->temp_ovt[nr] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_crit_hyst(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; int temp_crit_hyst; mutex_lock(&data->update_lock); if (nr & 1) temp_crit_hyst = data->temp_hyst[nr / 2] >> 4; else temp_crit_hyst = data->temp_hyst[nr / 2] & 0x0f; temp_crit_hyst = (data->temp_ovt[nr] - temp_crit_hyst) * 1000; mutex_unlock(&data->update_lock); return sprintf(buf, "%d\n", temp_crit_hyst); } static ssize_t show_temp_type(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; return sprintf(buf, "%d\n", data->temp_type[nr]); } static ssize_t show_temp_beep(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; if (data->temp_beep & (1 << nr)) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t store_temp_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; unsigned long val; err = strict_strtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP); if (val) data->temp_beep |= 1 << nr; else data->temp_beep &= ~(1 << nr); f71882fg_write8(data, F71882FG_REG_TEMP_BEEP, data->temp_beep); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; if (data->temp_status & (1 << nr)) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t show_temp_fault(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; if (data->temp_diode_open & (1 << nr)) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = f71882fg_update_device(dev); int val, nr = to_sensor_dev_attr_2(devattr)->index; mutex_lock(&data->update_lock); if (data->pwm_enable & (1 << (2 * nr))) /* PWM mode */ val = data->pwm[nr]; else { /* RPM mode */ val = 255 * fan_from_reg(data->fan_target[nr]) / fan_from_reg(data->fan_full_speed[nr]); } mutex_unlock(&data->update_lock); return sprintf(buf, "%d\n", val); } static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; long val; err = strict_strtol(buf, 10, &val); if (err) return err; val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE); if ((data->type == f8000 && ((data->pwm_enable >> 2 * nr) & 3) != 2) || (data->type != f8000 && !((data->pwm_enable >> 2 * nr) & 2))) { count = -EROFS; goto leave; } if (data->pwm_enable & (1 << (2 * nr))) { /* PWM mode */ f71882fg_write8(data, F71882FG_REG_PWM(nr), val); data->pwm[nr] = val; } else { /* RPM mode */ int target, full_speed; full_speed = f71882fg_read16(data, F71882FG_REG_FAN_FULL_SPEED(nr)); target = fan_to_reg(val * fan_from_reg(full_speed) / 255); f71882fg_write16(data, F71882FG_REG_FAN_TARGET(nr), target); data->fan_target[nr] = target; data->fan_full_speed[nr] = full_speed; } leave: mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf) { int result = 0; struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; switch ((data->pwm_enable >> 2 * nr) & 3) { case 0: case 1: result = 2; /* Normal auto mode */ break; case 2: result = 1; /* Manual mode */ break; case 3: if (data->type == f8000) result = 3; /* Thermostat mode */ else result = 1; /* Manual mode */ break; } return sprintf(buf, "%d\n", result); } static ssize_t store_pwm_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; long val; err = strict_strtol(buf, 10, &val); if (err) return err; /* Special case for F8000 pwm channel 3 which only does auto mode */ if (data->type == f8000 && nr == 2 && val != 2) return -EINVAL; mutex_lock(&data->update_lock); data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE); /* Special case for F8000 auto PWM mode / Thermostat mode */ if (data->type == f8000 && ((data->pwm_enable >> 2 * nr) & 1)) { switch (val) { case 2: data->pwm_enable &= ~(2 << (2 * nr)); break; /* Normal auto mode */ case 3: data->pwm_enable |= 2 << (2 * nr); break; /* Thermostat mode */ default: count = -EINVAL; goto leave; } } else { switch (val) { case 1: /* The f71858fg does not support manual RPM mode */ if (data->type == f71858fg && ((data->pwm_enable >> (2 * nr)) & 1)) { count = -EINVAL; goto leave; } data->pwm_enable |= 2 << (2 * nr); break; /* Manual */ case 2: data->pwm_enable &= ~(2 << (2 * nr)); break; /* Normal auto mode */ default: count = -EINVAL; goto leave; } } f71882fg_write8(data, F71882FG_REG_PWM_ENABLE, data->pwm_enable); leave: mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_auto_point_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { int result; struct f71882fg_data *data = f71882fg_update_device(dev); int pwm = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; mutex_lock(&data->update_lock); if (data->pwm_enable & (1 << (2 * pwm))) { /* PWM mode */ result = data->pwm_auto_point_pwm[pwm][point]; } else { /* RPM mode */ result = 32 * 255 / (32 + data->pwm_auto_point_pwm[pwm][point]); } mutex_unlock(&data->update_lock); return sprintf(buf, "%d\n", result); } static ssize_t store_pwm_auto_point_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, pwm = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; long val; err = strict_strtol(buf, 10, &val); if (err) return err; val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE); if (data->pwm_enable & (1 << (2 * pwm))) { /* PWM mode */ } else { /* RPM mode */ if (val < 29) /* Prevent negative numbers */ val = 255; else val = (255 - val) * 32 / val; } f71882fg_write8(data, F71882FG_REG_POINT_PWM(pwm, point), val); data->pwm_auto_point_pwm[pwm][point] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_auto_point_temp_hyst(struct device *dev, struct device_attribute *devattr, char *buf) { int result = 0; struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; mutex_lock(&data->update_lock); if (nr & 1) result = data->pwm_auto_point_hyst[nr / 2] >> 4; else result = data->pwm_auto_point_hyst[nr / 2] & 0x0f; result = 1000 * (data->pwm_auto_point_temp[nr][point] - result); mutex_unlock(&data->update_lock); return sprintf(buf, "%d\n", result); } static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; u8 reg; long val; err = strict_strtol(buf, 10, &val); if (err) return err; val /= 1000; mutex_lock(&data->update_lock); data->pwm_auto_point_temp[nr][point] = f71882fg_read8(data, F71882FG_REG_POINT_TEMP(nr, point)); val = SENSORS_LIMIT(val, data->pwm_auto_point_temp[nr][point] - 15, data->pwm_auto_point_temp[nr][point]); val = data->pwm_auto_point_temp[nr][point] - val; reg = f71882fg_read8(data, F71882FG_REG_FAN_HYST(nr / 2)); if (nr & 1) reg = (reg & 0x0f) | (val << 4); else reg = (reg & 0xf0) | val; f71882fg_write8(data, F71882FG_REG_FAN_HYST(nr / 2), reg); data->pwm_auto_point_hyst[nr / 2] = reg; mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_interpolate(struct device *dev, struct device_attribute *devattr, char *buf) { int result; struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; result = (data->pwm_auto_point_mapping[nr] >> 4) & 1; return sprintf(buf, "%d\n", result); } static ssize_t store_pwm_interpolate(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; unsigned long val; err = strict_strtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->pwm_auto_point_mapping[nr] = f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr)); if (val) val = data->pwm_auto_point_mapping[nr] | (1 << 4); else val = data->pwm_auto_point_mapping[nr] & (~(1 << 4)); f71882fg_write8(data, F71882FG_REG_POINT_MAPPING(nr), val); data->pwm_auto_point_mapping[nr] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_auto_point_channel(struct device *dev, struct device_attribute *devattr, char *buf) { int result; struct f71882fg_data *data = f71882fg_update_device(dev); int nr = to_sensor_dev_attr_2(devattr)->index; result = 1 << ((data->pwm_auto_point_mapping[nr] & 3) - data->temp_start); return sprintf(buf, "%d\n", result); } static ssize_t store_pwm_auto_point_channel(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, nr = to_sensor_dev_attr_2(devattr)->index; long val; err = strict_strtol(buf, 10, &val); if (err) return err; switch (val) { case 1: val = 0; break; case 2: val = 1; break; case 4: val = 2; break; default: return -EINVAL; } val += data->temp_start; mutex_lock(&data->update_lock); data->pwm_auto_point_mapping[nr] = f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr)); val = (data->pwm_auto_point_mapping[nr] & 0xfc) | val; f71882fg_write8(data, F71882FG_REG_POINT_MAPPING(nr), val); data->pwm_auto_point_mapping[nr] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_auto_point_temp(struct device *dev, struct device_attribute *devattr, char *buf) { int result; struct f71882fg_data *data = f71882fg_update_device(dev); int pwm = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; result = data->pwm_auto_point_temp[pwm][point]; return sprintf(buf, "%d\n", 1000 * result); } static ssize_t store_pwm_auto_point_temp(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); int err, pwm = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; long val; err = strict_strtol(buf, 10, &val); if (err) return err; val /= 1000; if (data->type == f71889fg) val = SENSORS_LIMIT(val, -128, 127); else val = SENSORS_LIMIT(val, 0, 127); mutex_lock(&data->update_lock); f71882fg_write8(data, F71882FG_REG_POINT_TEMP(pwm, point), val); data->pwm_auto_point_temp[pwm][point] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71882fg_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", f71882fg_names[data->type]); } static int __devinit f71882fg_create_sysfs_files(struct platform_device *pdev, struct sensor_device_attribute_2 *attr, int count) { int err, i; for (i = 0; i < count; i++) { err = device_create_file(&pdev->dev, &attr[i].dev_attr); if (err) return err; } return 0; } static void f71882fg_remove_sysfs_files(struct platform_device *pdev, struct sensor_device_attribute_2 *attr, int count) { int i; for (i = 0; i < count; i++) device_remove_file(&pdev->dev, &attr[i].dev_attr); } static int __devinit f71882fg_probe(struct platform_device *pdev) { struct f71882fg_data *data; struct f71882fg_sio_data *sio_data = pdev->dev.platform_data; int err, i, nr_fans = (sio_data->type == f71882fg) ? 4 : 3; u8 start_reg; data = kzalloc(sizeof(struct f71882fg_data), GFP_KERNEL); if (!data) return -ENOMEM; data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start; data->type = sio_data->type; data->temp_start = (data->type == f71858fg || data->type == f8000) ? 0 : 1; mutex_init(&data->update_lock); platform_set_drvdata(pdev, data); start_reg = f71882fg_read8(data, F71882FG_REG_START); if (start_reg & 0x04) { dev_warn(&pdev->dev, "Hardware monitor is powered down\n"); err = -ENODEV; goto exit_free; } if (!(start_reg & 0x03)) { dev_warn(&pdev->dev, "Hardware monitoring not activated\n"); err = -ENODEV; goto exit_free; } /* Register sysfs interface files */ err = device_create_file(&pdev->dev, &dev_attr_name); if (err) goto exit_unregister_sysfs; if (start_reg & 0x01) { switch (data->type) { case f71858fg: data->temp_config = f71882fg_read8(data, F71882FG_REG_TEMP_CONFIG); if (data->temp_config & 0x10) /* The f71858fg temperature alarms behave as the f8000 alarms in this mode */ err = f71882fg_create_sysfs_files(pdev, f8000_in_temp_attr, ARRAY_SIZE(f8000_in_temp_attr)); else err = f71882fg_create_sysfs_files(pdev, f71858fg_in_temp_attr, ARRAY_SIZE(f71858fg_in_temp_attr)); break; case f71882fg: case f71889fg: err = f71882fg_create_sysfs_files(pdev, fxxxx_in1_alarm_attr, ARRAY_SIZE(fxxxx_in1_alarm_attr)); if (err) goto exit_unregister_sysfs; /* fall through! */ case f71862fg: err = f71882fg_create_sysfs_files(pdev, fxxxx_in_temp_attr, ARRAY_SIZE(fxxxx_in_temp_attr)); break; case f8000: err = f71882fg_create_sysfs_files(pdev, f8000_in_temp_attr, ARRAY_SIZE(f8000_in_temp_attr)); break; } if (err) goto exit_unregister_sysfs; } if (start_reg & 0x02) { data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE); /* Sanity check the pwm settings */ switch (data->type) { case f71858fg: err = 0; for (i = 0; i < nr_fans; i++) if (((data->pwm_enable >> (i * 2)) & 3) == 3) err = 1; break; case f71862fg: err = (data->pwm_enable & 0x15) != 0x15; break; case f71882fg: case f71889fg: err = 0; break; case f8000: err = data->pwm_enable & 0x20; break; } if (err) { dev_err(&pdev->dev, "Invalid (reserved) pwm settings: 0x%02x\n", (unsigned int)data->pwm_enable); err = -ENODEV; goto exit_unregister_sysfs; } err = f71882fg_create_sysfs_files(pdev, &fxxxx_fan_attr[0][0], ARRAY_SIZE(fxxxx_fan_attr[0]) * nr_fans); if (err) goto exit_unregister_sysfs; if (data->type == f71862fg || data->type == f71882fg || data->type == f71889fg) { err = f71882fg_create_sysfs_files(pdev, fxxxx_fan_beep_attr, nr_fans); if (err) goto exit_unregister_sysfs; } switch (data->type) { case f71862fg: err = f71882fg_create_sysfs_files(pdev, f71862fg_auto_pwm_attr, ARRAY_SIZE(f71862fg_auto_pwm_attr)); break; case f8000: err = f71882fg_create_sysfs_files(pdev, f8000_fan_attr, ARRAY_SIZE(f8000_fan_attr)); if (err) goto exit_unregister_sysfs; err = f71882fg_create_sysfs_files(pdev, f8000_auto_pwm_attr, ARRAY_SIZE(f8000_auto_pwm_attr)); break; case f71889fg: for (i = 0; i < nr_fans; i++) { data->pwm_auto_point_mapping[i] = f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(i)); if (data->pwm_auto_point_mapping[i] & 0x80) break; } if (i != nr_fans) { dev_warn(&pdev->dev, "Auto pwm controlled by raw digital " "data, disabling pwm auto_point " "sysfs attributes\n"); break; } /* fall through */ default: /* f71858fg / f71882fg */ err = f71882fg_create_sysfs_files(pdev, &fxxxx_auto_pwm_attr[0][0], ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans); } if (err) goto exit_unregister_sysfs; for (i = 0; i < nr_fans; i++) dev_info(&pdev->dev, "Fan: %d is in %s mode\n", i + 1, (data->pwm_enable & (1 << 2 * i)) ? "duty-cycle" : "RPM"); } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); data->hwmon_dev = NULL; goto exit_unregister_sysfs; } return 0; exit_unregister_sysfs: f71882fg_remove(pdev); /* Will unregister the sysfs files for us */ return err; /* f71882fg_remove() also frees our data */ exit_free: kfree(data); return err; } static int f71882fg_remove(struct platform_device *pdev) { struct f71882fg_data *data = platform_get_drvdata(pdev); int nr_fans = (data->type == f71882fg) ? 4 : 3; u8 start_reg = f71882fg_read8(data, F71882FG_REG_START); if (data->hwmon_dev) hwmon_device_unregister(data->hwmon_dev); device_remove_file(&pdev->dev, &dev_attr_name); if (start_reg & 0x01) { switch (data->type) { case f71858fg: if (data->temp_config & 0x10) f71882fg_remove_sysfs_files(pdev, f8000_in_temp_attr, ARRAY_SIZE(f8000_in_temp_attr)); else f71882fg_remove_sysfs_files(pdev, f71858fg_in_temp_attr, ARRAY_SIZE(f71858fg_in_temp_attr)); break; case f71882fg: case f71889fg: f71882fg_remove_sysfs_files(pdev, fxxxx_in1_alarm_attr, ARRAY_SIZE(fxxxx_in1_alarm_attr)); /* fall through! */ case f71862fg: f71882fg_remove_sysfs_files(pdev, fxxxx_in_temp_attr, ARRAY_SIZE(fxxxx_in_temp_attr)); break; case f8000: f71882fg_remove_sysfs_files(pdev, f8000_in_temp_attr, ARRAY_SIZE(f8000_in_temp_attr)); break; } } if (start_reg & 0x02) { f71882fg_remove_sysfs_files(pdev, &fxxxx_fan_attr[0][0], ARRAY_SIZE(fxxxx_fan_attr[0]) * nr_fans); if (data->type == f71862fg || data->type == f71882fg || data->type == f71889fg) f71882fg_remove_sysfs_files(pdev, fxxxx_fan_beep_attr, nr_fans); switch (data->type) { case f71862fg: f71882fg_remove_sysfs_files(pdev, f71862fg_auto_pwm_attr, ARRAY_SIZE(f71862fg_auto_pwm_attr)); break; case f8000: f71882fg_remove_sysfs_files(pdev, f8000_fan_attr, ARRAY_SIZE(f8000_fan_attr)); f71882fg_remove_sysfs_files(pdev, f8000_auto_pwm_attr, ARRAY_SIZE(f8000_auto_pwm_attr)); break; default: /* f71858fg / f71882fg / f71889fg */ f71882fg_remove_sysfs_files(pdev, &fxxxx_auto_pwm_attr[0][0], ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans); } } platform_set_drvdata(pdev, NULL); kfree(data); return 0; } static int __init f71882fg_find(int sioaddr, unsigned short *address, struct f71882fg_sio_data *sio_data) { u16 devid; int err = superio_enter(sioaddr); if (err) return err; devid = superio_inw(sioaddr, SIO_REG_MANID); if (devid != SIO_FINTEK_ID) { pr_debug(DRVNAME ": Not a Fintek device\n"); err = -ENODEV; goto exit; } devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID); switch (devid) { case SIO_F71858_ID: sio_data->type = f71858fg; break; case SIO_F71862_ID: sio_data->type = f71862fg; break; case SIO_F71882_ID: sio_data->type = f71882fg; break; case SIO_F71889_ID: sio_data->type = f71889fg; break; case SIO_F8000_ID: sio_data->type = f8000; break; default: printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n", (unsigned int)devid); err = -ENODEV; goto exit; } if (sio_data->type == f71858fg) superio_select(sioaddr, SIO_F71858FG_LD_HWM); else superio_select(sioaddr, SIO_F71882FG_LD_HWM); if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) { printk(KERN_WARNING DRVNAME ": Device not activated\n"); err = -ENODEV; goto exit; } *address = superio_inw(sioaddr, SIO_REG_ADDR); if (*address == 0) { printk(KERN_WARNING DRVNAME ": Base address not set\n"); err = -ENODEV; goto exit; } *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */ err = 0; printk(KERN_INFO DRVNAME ": Found %s chip at %#x, revision %d\n", f71882fg_names[sio_data->type], (unsigned int)*address, (int)superio_inb(sioaddr, SIO_REG_DEVREV)); exit: superio_exit(sioaddr); return err; } static int __init f71882fg_device_add(unsigned short address, const struct f71882fg_sio_data *sio_data) { struct resource res = { .start = address, .end = address + REGION_LENGTH - 1, .flags = IORESOURCE_IO, }; int err; f71882fg_pdev = platform_device_alloc(DRVNAME, address); if (!f71882fg_pdev) return -ENOMEM; res.name = f71882fg_pdev->name; err = acpi_check_resource_conflict(&res); if (err) goto exit_device_put; err = platform_device_add_resources(f71882fg_pdev, &res, 1); if (err) { printk(KERN_ERR DRVNAME ": Device resource addition failed\n"); goto exit_device_put; } err = platform_device_add_data(f71882fg_pdev, sio_data, sizeof(struct f71882fg_sio_data)); if (err) { printk(KERN_ERR DRVNAME ": Platform data allocation failed\n"); goto exit_device_put; } err = platform_device_add(f71882fg_pdev); if (err) { printk(KERN_ERR DRVNAME ": Device addition failed\n"); goto exit_device_put; } return 0; exit_device_put: platform_device_put(f71882fg_pdev); return err; } static int __init f71882fg_init(void) { int err = -ENODEV; unsigned short address; struct f71882fg_sio_data sio_data; memset(&sio_data, 0, sizeof(sio_data)); if (f71882fg_find(0x2e, &address, &sio_data) && f71882fg_find(0x4e, &address, &sio_data)) goto exit; err = platform_driver_register(&f71882fg_driver); if (err) goto exit; err = f71882fg_device_add(address, &sio_data); if (err) goto exit_driver; return 0; exit_driver: platform_driver_unregister(&f71882fg_driver); exit: return err; } static void __exit f71882fg_exit(void) { platform_device_unregister(f71882fg_pdev); platform_driver_unregister(&f71882fg_driver); } MODULE_DESCRIPTION("F71882FG Hardware Monitoring Driver"); MODULE_AUTHOR("Hans Edgington, Hans de Goede (hdegoede@redhat.com)"); MODULE_LICENSE("GPL"); module_init(f71882fg_init); module_exit(f71882fg_exit);
steppnasty/platform_kernel_msm7x30
drivers/hwmon/f71882fg.c
C
gpl-2.0
75,324
/* * This file is part of the coreboot project. * * Copyright 2016 Google Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <assert.h> #include <console/console.h> #include <ec/google/chromeec/ec.h> #include <vendorcode/google/chromeos/chromeos.h> #define VBOOT_HASH_VSLOT 0 #define VBOOT_HASH_VSLOT_MASK (1 << (VBOOT_HASH_VSLOT)) int vboot_save_hash(void *digest, size_t digest_size) { const int slot = VBOOT_HASH_VSLOT; uint32_t lock_status; int num_slots; /* Ensure the digests being saved match the EC's slot size. */ assert(digest_size == EC_VSTORE_SLOT_SIZE); if (google_chromeec_vstore_write(slot, digest, digest_size)) return -1; /* Assert the slot is locked on successful write. */ num_slots = google_chromeec_vstore_info(&lock_status); /* Normalize to be 0 based. If num_slots returned 0 then it'll be -1. */ num_slots--; if (num_slots < slot) { printk(BIOS_ERR, "Not enough vstore slots for vboot hash: %d\n", num_slots + 1); return -1; } if ((lock_status & VBOOT_HASH_VSLOT_MASK) == 0) { printk(BIOS_ERR, "Vstore slot not locked after write.\n"); return -1; } return 0; } int vboot_retrieve_hash(void *digest, size_t digest_size) { /* Ensure the digests being saved match the EC's slot size. */ assert(digest_size == EC_VSTORE_SLOT_SIZE); return google_chromeec_vstore_read(VBOOT_HASH_VSLOT, digest); }
coolstar/coreboot
src/ec/google/chromeec/vboot_storage.c
C
gpl-2.0
1,784
<?php /* Icinga Web 2 | (c) 2013-2015 Icinga Development Team | GPLv2+ */ namespace Icinga\Module\Monitoring\Timeline; use DateTime; use Exception; use ArrayIterator; use Icinga\Exception\IcingaException; use IteratorAggregate; use Icinga\Data\Filter\Filter; use Icinga\Web\Hook; use Icinga\Web\Session\SessionNamespace; use Icinga\Module\Monitoring\DataView\DataView; /** * Represents a set of events in a specific range of time */ class TimeLine implements IteratorAggregate { /** * The resultset returned by the dataview * * @var array */ private $resultset; /** * The groups this timeline uses for display purposes * * @var array */ private $displayGroups; /** * The session to use * * @var SessionNamespace */ protected $session; /** * The base that is used to calculate each circle's diameter * * @var float */ protected $calculationBase; /** * The dataview to fetch entries from * * @var DataView */ protected $dataview; /** * The names by which to group entries * * @var array */ protected $identifiers; /** * The range of time for which to display entries * * @var TimeRange */ protected $displayRange; /** * The range of time for which to calculate forecasts * * @var TimeRange */ protected $forecastRange; /** * The maximum diameter each circle can have * * @var float */ protected $circleDiameter = 100.0; /** * The minimum diameter each circle can have * * @var float */ protected $minCircleDiameter = 1.0; /** * The unit of a circle's diameter * * @var string */ protected $diameterUnit = 'px'; /** * Return a iterator for this timeline * * @return ArrayIterator */ public function getIterator() { return new ArrayIterator($this->toArray()); } /** * Create a new timeline * * The given dataview must provide the following columns: * - name A string identifying an entry (Corresponds to the keys of "$identifiers") * - time A unix timestamp that defines where to place an entry on the timeline * * @param DataView $dataview The dataview to fetch entries from * @param array $identifiers The names by which to group entries */ public function __construct(DataView $dataview, array $identifiers) { $this->dataview = $dataview; $this->identifiers = $identifiers; } /** * Set the session to use * * @param SessionNamespace $session The session to use */ public function setSession(SessionNamespace $session) { $this->session = $session; } /** * Set the range of time for which to display elements * * @param TimeRange $range The range of time for which to display elements */ public function setDisplayRange(TimeRange $range) { $this->displayRange = $range; } /** * Set the range of time for which to calculate forecasts * * @param TimeRange $range The range of time for which to calculate forecasts */ public function setForecastRange(TimeRange $range) { $this->forecastRange = $range; } /** * Set the maximum diameter each circle can have * * @param string $width The diameter to set, suffixed with its unit * * @throws Exception If the given diameter is invalid */ public function setMaximumCircleWidth($width) { $matches = array(); if (preg_match('#([\d|\.]+)([a-z]+|%)#', $width, $matches)) { $this->circleDiameter = floatval($matches[1]); $this->diameterUnit = $matches[2]; } else { throw new IcingaException( 'Width "%s" is not a valid width', $width ); } } /** * Set the minimum diameter each circle can have * * @param string $width The diameter to set, suffixed with its unit * * @throws Exception If the given diameter is invalid or its unit differs from the maximum */ public function setMinimumCircleWidth($width) { $matches = array(); if (preg_match('#([\d|\.]+)([a-z]+|%)#', $width, $matches)) { if ($matches[2] === $this->diameterUnit) { $this->minCircleDiameter = floatval($matches[1]); } else { throw new IcingaException( 'Unit needs to be in "%s"', $this->diameterUnit ); } } else { throw new IcingaException( 'Width "%s" is not a valid width', $width ); } } /** * Return all known group types (identifiers) with their respective labels and colors as array * * @return array */ public function getGroupInfo() { $groupInfo = array(); foreach ($this->identifiers as $name => $attributes) { $groupInfo[$name]['label'] = $attributes['label']; $groupInfo[$name]['color'] = $attributes['color']; } return $groupInfo; } /** * Return the circle's diameter for the given event group * * @param TimeEntry $group The group for which to return a circle width * @param int $precision Amount of decimal places to preserve * * @return string */ public function calculateCircleWidth(TimeEntry $group, $precision = 0) { $base = $this->getCalculationBase(true); $factor = log($group->getValue() * $group->getWeight(), $base) / 100; $width = $this->circleDiameter * $factor; return sprintf( '%.' . $precision . 'F%s', $width > $this->minCircleDiameter ? $width : $this->minCircleDiameter, $this->diameterUnit ); } /** * Return an extrapolated circle width for the given event group * * @param TimeEntry $group The event group for which to return an extrapolated circle width * @param int $precision Amount of decimal places to preserve * * @return string */ public function getExtrapolatedCircleWidth(TimeEntry $group, $precision = 0) { $eventCount = 0; foreach ($this->displayGroups as $groups) { if (array_key_exists($group->getName(), $groups)) { $eventCount += $groups[$group->getName()]->getValue(); } } $extrapolatedCount = (int) $eventCount / count($this->displayGroups); if ($extrapolatedCount < $group->getValue()) { return $this->calculateCircleWidth($group, $precision); } return $this->calculateCircleWidth( TimeEntry::fromArray( array( 'value' => $extrapolatedCount, 'weight' => $group->getWeight() ) ), $precision ); } /** * Return the base that should be used to calculate circle widths * * @param bool $create Whether to generate a new base if none is known yet * * @return float|null */ public function getCalculationBase($create) { if ($this->calculationBase === null) { $calculationBase = $this->session !== null ? $this->session->get('calculationBase') : null; if ($create) { $new = $this->generateCalculationBase(); if ($new > $calculationBase) { $this->calculationBase = $new; if ($this->session !== null) { $this->session->calculationBase = $new; } } else { $this->calculationBase = $calculationBase; } } else { return $calculationBase; } } return $this->calculationBase; } /** * Generate a new base to calculate circle widths with * * @return float */ protected function generateCalculationBase() { $allEntries = $this->groupEntries( array_merge( $this->fetchEntries(), $this->fetchForecasts() ), new TimeRange( $this->displayRange->getStart(), $this->forecastRange->getEnd(), $this->displayRange->getInterval() ) ); $highestValue = 0; foreach ($allEntries as $groups) { foreach ($groups as $group) { if ($group->getValue() * $group->getWeight() > $highestValue) { $highestValue = $group->getValue() * $group->getWeight(); } } } return pow($highestValue, 1 / 100); // 100 == 100% } /** * Fetch all entries and forecasts by using the dataview associated with this timeline * * @return array The dataview's result */ private function fetchResults() { $hookResults = array(); foreach (Hook::all('timeline') as $timelineProvider) { $hookResults = array_merge( $hookResults, $timelineProvider->fetchEntries($this->displayRange), $timelineProvider->fetchForecasts($this->forecastRange) ); foreach ($timelineProvider->getIdentifiers() as $identifier => $attributes) { if (!array_key_exists($identifier, $this->identifiers)) { $this->identifiers[$identifier] = $attributes; } } } $query = $this->dataview; $filter = Filter::matchAll( Filter::where('type', array_keys($this->identifiers)), Filter::expression('timestamp', '<=', $this->displayRange->getStart()->getTimestamp()), Filter::expression('timestamp', '>', $this->displayRange->getEnd()->getTimestamp()) ); $query->applyFilter($filter); return array_merge($query->getQuery()->fetchAll(), $hookResults); } /** * Fetch all entries * * @return array The entries to display on the timeline */ protected function fetchEntries() { if ($this->resultset === null) { $this->resultset = $this->fetchResults(); } $range = $this->displayRange; return array_filter( $this->resultset, function ($e) use ($range) { return $range->validateTime($e->time); } ); } /** * Fetch all forecasts * * @return array The entries to calculate forecasts with */ protected function fetchForecasts() { if ($this->resultset === null) { $this->resultset = $this->fetchResults(); } $range = $this->forecastRange; return array_filter( $this->resultset, function ($e) use ($range) { return $range->validateTime($e->time); } ); } /** * Return the given entries grouped together * * @param array $entries The entries to group * @param TimeRange $timeRange The range of time to group by * * @return array displayGroups The grouped entries */ protected function groupEntries(array $entries, TimeRange $timeRange) { $counts = array(); foreach ($entries as $entry) { $entryTime = new DateTime(); $entryTime->setTimestamp($entry->time); $timestamp = $timeRange->findTimeframe($entryTime, true); if ($timestamp !== null) { if (array_key_exists($entry->name, $counts)) { if (array_key_exists($timestamp, $counts[$entry->name])) { $counts[$entry->name][$timestamp] += 1; } else { $counts[$entry->name][$timestamp] = 1; } } else { $counts[$entry->name][$timestamp] = 1; } } } $groups = array(); foreach ($counts as $name => $data) { foreach ($data as $timestamp => $count) { $dateTime = new DateTime(); $dateTime->setTimestamp($timestamp); $groups[$timestamp][$name] = TimeEntry::fromArray( array_merge( $this->identifiers[$name], array( 'name' => $name, 'value' => $count, 'dateTime' => $dateTime ) ) ); } } return $groups; } /** * Return the contents of this timeline as array * * @return array */ protected function toArray() { $this->displayGroups = $this->groupEntries($this->fetchEntries(), $this->displayRange); $array = array(); foreach ($this->displayRange as $timestamp => $timeframe) { $array[] = array( $timeframe, array_key_exists($timestamp, $this->displayGroups) ? $this->displayGroups[$timestamp] : array() ); } return $array; } }
JakobGM/icingaweb2
modules/monitoring/library/Monitoring/Timeline/TimeLine.php
PHP
gpl-2.0
13,721
<?PHP // $Id: lib.php,v 1.1.2.1 2006/01/03 16:01:22 andreas Exp $ function show_edit_dropdown($item, $usehtmleditor = false) { $item->presentation=empty($item->presentation)?'':$item->presentation; ?> <table> <tr> <th colspan="2"><?php print_string('dropdownlist', 'feedback');?> &nbsp;(<input type="checkbox" name="required" value="1" <?php $item->required=isset($item->required)?$item->required:0; echo ($item->required == 1?'checked="checked"':''); ?> />&nbsp;<?php print_string('required', 'feedback');?>) </th> </tr> <tr> <td><?php print_string('item_name', 'feedback');?></td> <td><input type="text" id="itemname" name="itemname" size="40" maxlength="255" value="<?php echo isset($item->name)?stripslashes_safe($item->name):'';?>" /></td> </tr> <tr> <td> <?php print_string('dropdown_values', 'feedback');?> <?php print_string('use_one_line_for_each_value', 'feedback');?> </td> <td> <?php $itemvalues = str_replace('|', "\n", stripslashes_safe($item->presentation)); ?> <textarea name="itemvalues" cols="30" rows="5"><?php echo $itemvalues;?></textarea> </td> </tr> </table> <?php } //liefert ein eindimensionales Array mit drei Werten(typ, name, XXX) //XXX ist ein eindimensionales Array (anzahl der Antworten bei Typ DropDown) Jedes Element ist eine Struktur (answertext, answercount) function get_analysed_dropdown($item, $groupid = false) { $analysedItem = array(); $analysedItem[] = $item->typ; $analysedItem[] = $item->name; //die moeglichen Antworten extrahieren $answers = null; $answers = explode ("|", stripslashes_safe($item->presentation)); if(!is_array($answers)) return null; //die Werte holen //$values = get_records('feedback_value', 'item', $item->id); $values = get_feedback_group_values($item, $groupid); if(!$values) return null; //schleife ueber den Werten und ueber die Antwortmoeglichkeiten $analysedAnswer = array(); for($i = 1; $i <= sizeof($answers); $i++) { $ans = null; $ans->answertext = $answers[$i-1]; $ans->answercount = 0; foreach($values as $value) { //ist die Antwort gleich dem index der Antworten + 1? if ($value->value == $i) { $ans->answercount++; } } $ans->quotient = $ans->answercount / sizeof($values); $analysedAnswer[] = $ans; } $analysedItem[] = $analysedAnswer; return $analysedItem; } function get_feedback_printval_dropdown($item, $value) { $printval = ''; $presentation = explode ("|", stripslashes_safe($item->presentation)); $index = 1; foreach($presentation as $pres){ if($value->value == $index){ $printval = $pres; break; } $index++; } return $printval; } function print_analysed_dropdown($item, $itemnr = 0, $groupid = false) { $analysedItem = get_analysed_dropdown($item, $groupid); if($analysedItem) { //echo '<table>'; $itemnr++; echo '<tr><th colspan="2">'. $itemnr . '.)&nbsp;' . $analysedItem[1] .'</th></tr>'; $analysedVals = $analysedItem[2]; $pixnr = 0; foreach($analysedVals as $val) { if( function_exists("bcmod")) { $pix = 'pics/' . bcmod($pixnr, 10) . '.gif'; }else { $pix = 'pics/0.gif'; } $pixnr++; $pixwidth = intval($val->quotient * feedback_MAX_PIX_LENGTH); $quotient = number_format(($val->quotient * 100), 2, ',', '.'); echo '<tr><td align="right"><b>' . trim($val->answertext) . ':</b></td><td align="left"><img style=" vertical-align: baseline;" src="'.$pix.'" height="5" width="'.$pixwidth.'" />&nbsp;' . $val->answercount . (($val->quotient > 0)?'&nbsp;('. $quotient . '&nbsp;%)':'').'</td></tr>'; } //echo '</table>'; } return $itemnr; } function excelprint_item_dropdown(&$worksheet, $rowOffset, $item, $groupid) { $analysed_item = get_analysed_dropdown($item, $groupid); $data = $analysed_item[2]; $worksheet->setFormat("<l><f><ro2><vo><c:green>"); //frage schreiben $worksheet->write_string($rowOffset, 0, $analysed_item[1]); if(is_array($data)) { for($i = 0; $i < sizeof($data); $i++) { $aData = $data[$i]; $worksheet->setFormat("<l><f><ro2><vo><c:blue>"); $worksheet->write_string($rowOffset, $i + 1, trim($aData->answertext)); $worksheet->setFormat("<l><vo>"); $worksheet->write_number($rowOffset + 1, $i + 1, $aData->answercount); $worksheet->setFormat("<l><f><vo><pr>"); $worksheet->write_number($rowOffset + 2, $i + 1, $aData->quotient); } } $rowOffset +=3 ; return $rowOffset; } function print_feedback_dropdown($item, $value = false, $readonly = false){ $presentation = explode ("|", stripslashes_safe($item->presentation)); $requiredmark = ($item->required == 1)?'<font color="red">*</font>':''; ?> <td valign="top" align="left"><?php echo text_to_html(stripslashes_safe($item->name) . $requiredmark, true, false, false);?></td> <td valign="top" align="left"> <?php $index = 1; $selected = ''; if($readonly){ foreach($presentation as $dropdown){ if($value == $index){ print_simple_box_start('left'); echo text_to_html($dropdown, true, false, false); print_simple_box_end(); break; } $index++; } } else { ?> <select name="<?php echo $item->typ .'_' . $item->id;?>" size="1"> <option value="0">&nbsp;</option> <?php foreach($presentation as $dropdown){ if($value == $index){ $selected = 'selected="selected"'; }else{ $selected = ''; } ?> <option value="<?php echo $index;?>" <?php echo $selected;?>><?php echo text_to_html($dropdown, true, false, false);?></option> <?php $index++; } ?> </select> <?php } ?> </td> <?php } function check_feedback_value_dropdown($value) { if($value == 0)return false; return true; } function create_feedback_value_dropdown($data) { return $data; } function get_presentation_dropdown($data) { $present = str_replace("\n", '|', trim($data->itemvalues)); return $present; } function get_feedback_hasvalue_dropdown() { return 1; } ?>
ewout/moodle-atpusp
theme/aardvark15/pix/mod/feedback/item/dropdown/lib.php
PHP
gpl-2.0
6,458
/* * arch/arm/mach-tegra/sysfs-cluster.c * * Copyright (c) 2010-2013, NVIDIA CORPORATION, All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * This driver creates the /sys/kernel/cluster node and attributes for CPU * switch testing. Node attributes: * * active: currently active CPU (G or LP) * write: 'g' = switch to G CPU * 'lp' = switch to LP CPU * 'toggle' = switch to the other CPU * read: returns the currently active CPU (g or lp) * * force: force switch even if already on target CPU * write: '0' = do not perform switch if * active CPU == target CPU (default) * '1' = force switch regardless of * currently active CPU * read: returns the current status of the force flag * * immediate: request immediate wake-up from switch request * write: '0' = non-immediate wake-up on next interrupt (default) * '1' = immediate wake-up * read: returns the current status of the immediate flag * * power_mode: power mode to use for switch (LP1 or LP2) * write: '1' = use LP1 power mode * '2' = use LP2 power mode (default) * read: returns the current status of the immediate flag * * wake_ms: wake time (in milliseconds) -- ignored if immediate==1 * write: '0' = wake up at the next non-timer interrupt * 'n' = (n > 0) wake-up after 'n' milliseconds or the * next non-timer interrupt (whichever comes first) * read: returns the current wake_ms value * * power_gate: additional power gate partitions * write: 'none' = no additional partitions * 'noncpu' = CxNC partition * 'crail' = CRAIL partition (implies noncpu also, default) * read: returns the current power_gate value * * Writing the force, immediate and wake_ms attributes simply updates the * state of internal variables that will be used for the next switch request. * Writing to the active attribute initates a switch request using the * current values of the force, immediate, and wake_ms attributes. * * The OS tick timer is not a valid interrupt source for waking up following * a switch request. This is because the kernel uses local timers that are * part of the CPU complex. These get shut down when the CPU complex is * placed into reset by the switch request. If you want a timed wake up * from a switch, you must specify a positive wake_ms value. This will * ensure that a non-local timer is programmed to fire an interrupt * after the desired interval. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sysfs.h> #include <linux/kobject.h> #include <linux/smp.h> #include <linux/io.h> #include <linux/clk.h> #include <mach/iomap.h> #include "clock.h" #include "sleep.h" #include "pm.h" #define SYSFS_CLUSTER_PRINTS 1 /* Nonzero: enable status prints */ #define SYSFS_CLUSTER_TRACE_PRINTS 0 /* Nonzero: enable trace prints */ #define SYSFS_CLUSTER_POWER_MODE 1 /* Nonzero: use power modes other than LP2*/ #if SYSFS_CLUSTER_TRACE_PRINTS #define TRACE_CLUSTER(x) printk x #else #define TRACE_CLUSTER(x) #endif #if SYSFS_CLUSTER_PRINTS #define PRINT_CLUSTER(x) printk x #else #define PRINT_CLUSTER(x) #endif static struct kobject *cluster_kobj; static spinlock_t cluster_lock; static unsigned int flags = 0; static unsigned int wake_ms = 0; static ssize_t sysfscluster_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf); static ssize_t sysfscluster_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count); /* Active CPU: "G", "LP", "toggle" */ static struct kobj_attribute cluster_active_attr = __ATTR(active, 0640, sysfscluster_show, sysfscluster_store); /* Immediate wake-up when performing switch: 0, 1 */ static struct kobj_attribute cluster_immediate_attr = __ATTR(immediate, 0640, sysfscluster_show, sysfscluster_store); /* Force power transition even if already on the desired CPU: 0, 1 */ static struct kobj_attribute cluster_force_attr = __ATTR(force, 0640, sysfscluster_show, sysfscluster_store); /* Wake time (in milliseconds) */ static struct kobj_attribute cluster_wake_ms_attr = __ATTR(wake_ms, 0640, sysfscluster_show, sysfscluster_store); #if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE /* LPx power mode to use when switching CPUs: 1=LP1, 2=LP2 */ static unsigned int power_mode = 2; static struct kobj_attribute cluster_powermode_attr = __ATTR(power_mode, 0640, sysfscluster_show, sysfscluster_store); #endif #ifdef CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE /* Additional partitions to power gate. */ static unsigned int power_gate = TEGRA_POWER_CLUSTER_PART_CRAIL; static struct kobj_attribute cluster_powergate_attr = __ATTR(power_gate, 0640, sysfscluster_show, sysfscluster_store); static const char *decode_power_gate(unsigned int mode) { if (mode & TEGRA_POWER_CLUSTER_PART_CRAIL) return "crail"; else if (mode & TEGRA_POWER_CLUSTER_PART_NONCPU) return "noncpu"; else return "none"; } #endif #if DEBUG_CLUSTER_SWITCH unsigned int tegra_cluster_debug = 0; static struct kobj_attribute cluster_debug_attr = __ATTR(debug, 0640, sysfscluster_show, sysfscluster_store); #endif typedef enum { ClusterAttr_Invalid = 0, ClusterAttr_Active, ClusterAttr_Immediate, ClusterAttr_Force, ClusterAttr_WakeMs, #if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE ClusterAttr_PowerMode, #endif #ifdef CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE ClusterAttr_PowerGate, #endif #if DEBUG_CLUSTER_SWITCH ClusterAttr_Debug #endif } ClusterAttr; static ClusterAttr GetClusterAttr(const char *name) { if (!strcmp(name, "active")) return ClusterAttr_Active; if (!strcmp(name, "immediate")) return ClusterAttr_Immediate; if (!strcmp(name, "force")) return ClusterAttr_Force; if (!strcmp(name, "wake_ms")) return ClusterAttr_WakeMs; #if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE if (!strcmp(name, "power_mode")) return ClusterAttr_PowerMode; #endif #ifdef CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE if (!strcmp(name, "power_gate")) return ClusterAttr_PowerGate; #endif #if DEBUG_CLUSTER_SWITCH if (!strcmp(name, "debug")) return ClusterAttr_Debug; #endif TRACE_CLUSTER(("GetClusterAttr(%s): invalid\n", name)); return ClusterAttr_Invalid; } static ssize_t sysfscluster_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ClusterAttr type; ssize_t len; TRACE_CLUSTER(("+sysfscluster_show\n")); type = GetClusterAttr(attr->attr.name); switch (type) { case ClusterAttr_Active: len = sprintf(buf, "%s\n", is_lp_cluster() ? "LP" : "G"); break; case ClusterAttr_Immediate: len = sprintf(buf, "%d\n", ((flags & TEGRA_POWER_CLUSTER_IMMEDIATE) != 0)); break; case ClusterAttr_Force: len = sprintf(buf, "%d\n", ((flags & TEGRA_POWER_CLUSTER_FORCE) != 0)); break; case ClusterAttr_WakeMs: len = sprintf(buf, "%d\n", wake_ms); break; #if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE case ClusterAttr_PowerMode: len = sprintf(buf, "%d\n", power_mode); break; #endif #ifdef CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE case ClusterAttr_PowerGate: len = sprintf(buf, "%s\n", decode_power_gate(power_gate)); break; #endif #if DEBUG_CLUSTER_SWITCH case ClusterAttr_Debug: len = sprintf(buf, "%d\n", tegra_cluster_debug); break; #endif default: len = sprintf(buf, "invalid\n"); break; } TRACE_CLUSTER(("-sysfscluster_show\n")); return len; } static ssize_t sysfscluster_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { ClusterAttr type; ssize_t ret = count--; unsigned request; int e; int tmp; int cnt; struct clk *cpu_clk = tegra_get_clock_by_name("cpu"); struct clk *cpu_g_clk = tegra_get_clock_by_name("cpu_g"); struct clk *cpu_lp_clk = tegra_get_clock_by_name("cpu_lp"); struct clk *new_parent = NULL; if (!cpu_clk || !cpu_g_clk || !cpu_lp_clk) { ret = -ENOSYS; goto fail; } TRACE_CLUSTER(("+sysfscluster_store: %p, %d\n", buf, count)); /* The count includes data bytes follow by a line feed character. */ if (!buf || (count < 1)) { ret = -EINVAL; goto fail; } type = GetClusterAttr(attr->attr.name); spin_lock(&cluster_lock); switch (type) { case ClusterAttr_Active: if (!strncasecmp(buf, "g", count)) { flags &= ~TEGRA_POWER_CLUSTER_MASK; flags |= TEGRA_POWER_CLUSTER_G; } else if (!strncasecmp(buf, "lp", count)) { flags &= ~TEGRA_POWER_CLUSTER_MASK; flags |= TEGRA_POWER_CLUSTER_LP; } else if (!strncasecmp(buf, "toggle", count)) { flags &= ~TEGRA_POWER_CLUSTER_MASK; if (is_lp_cluster()) flags |= TEGRA_POWER_CLUSTER_G; else flags |= TEGRA_POWER_CLUSTER_LP; } else { PRINT_CLUSTER(("cluster/active: '%*.*s' invalid, " " must be g, lp, or toggle\n", count, count, buf)); ret = -EINVAL; break; } PRINT_CLUSTER(("cluster/active -> %s\n", (flags & TEGRA_POWER_CLUSTER_G) ? "G" : "LP")); request = flags; #ifdef CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE request |= power_gate; #endif #if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE if (power_mode == 1) { request |= TEGRA_POWER_SDRAM_SELFREFRESH; } #endif tegra_cluster_switch_set_parameters(wake_ms * 1000, request); new_parent = (flags & TEGRA_POWER_CLUSTER_LP) ? cpu_lp_clk : cpu_g_clk; break; case ClusterAttr_Immediate: if ((count == 1) && (*buf == '0')) flags &= ~TEGRA_POWER_CLUSTER_IMMEDIATE; else if ((count == 1) && *buf == '1') flags |= TEGRA_POWER_CLUSTER_IMMEDIATE; else { PRINT_CLUSTER(("cluster/immediate: '%*.*s' invalid, " "must be 0 or 1\n", count, count, buf)); ret = -EINVAL; break; } PRINT_CLUSTER(("cluster/immediate -> %c\n", (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) ? '1' : '0')); break; case ClusterAttr_Force: if ((count == 1) && (*buf == '0')) flags &= ~TEGRA_POWER_CLUSTER_FORCE; else if ((count == 1) && (*buf == '1')) flags |= TEGRA_POWER_CLUSTER_FORCE; else { PRINT_CLUSTER(("cluster/force: '%*.*s' invalid, " "must be 0 or 1\n", count, count, buf)); ret = -EINVAL; break; } PRINT_CLUSTER(("cluster/force -> %c\n", (flags & TEGRA_POWER_CLUSTER_FORCE) ? '1' : '0')); break; case ClusterAttr_WakeMs: tmp = 0; cnt = sscanf(buf, "%d\n", &tmp); if ((cnt != 1) || (tmp < 0)) { PRINT_CLUSTER(("cluster/wake_ms: '%*.*s' is invalid\n", count, count, buf)); ret = -EINVAL; break; } wake_ms = tmp; PRINT_CLUSTER(("cluster/wake_ms -> %d\n", wake_ms)); break; #if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE case ClusterAttr_PowerMode: if ((count == 1) && (*buf == '2')) power_mode = 2; else if ((count == 1) && *buf == '1') power_mode = 1; else { PRINT_CLUSTER(("cluster/power_mode: '%*.*s' invalid, " "must be 2 or 1\n", count, count, buf)); ret = -EINVAL; break; } PRINT_CLUSTER(("cluster/power_mode -> %d\n", power_mode)); break; #endif #ifdef CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE case ClusterAttr_PowerGate: if (!strncasecmp(buf, "crail", count)) power_gate = TEGRA_POWER_CLUSTER_PART_CRAIL; else if (!strncasecmp(buf, "noncpu", count)) power_gate = TEGRA_POWER_CLUSTER_PART_NONCPU; else if (!strncasecmp(buf, "none", count)) power_gate = 0; else { PRINT_CLUSTER(("cluster/power_gate: '%*.*s' invalid, " "must be 'none', 'crail', or 'noncpu'\n", count, count, buf)); ret = -EINVAL; break; } PRINT_CLUSTER(("cluster/power_gate -> %s\n", decode_power_gate(power_gate))); break; #endif #if DEBUG_CLUSTER_SWITCH case ClusterAttr_Debug: if ((count == 1) && (*buf == '0')) tegra_cluster_debug = 0; else if ((count == 1) && (*buf == '1')) tegra_cluster_debug = 1; else { PRINT_CLUSTER(("cluster/debug: '%*.*s' invalid, " "must be 0 or 1\n", count, count, buf)); ret = -EINVAL; break; } PRINT_CLUSTER(("cluster/debug -> %d\n",tegra_cluster_debug)); break; #endif default: ret = -ENOENT; break; } spin_unlock(&cluster_lock); if (new_parent) { e = tegra_cluster_switch(cpu_clk, new_parent); if (e) { PRINT_CLUSTER(("cluster/active: request failed (%d)\n", e)); ret = e; } } fail: TRACE_CLUSTER(("-sysfscluster_store: %d\n", count)); return ret; } #define CREATE_FILE(x) \ do { \ e = sysfs_create_file(cluster_kobj, &cluster_##x##_attr.attr); \ if (e) { \ TRACE_CLUSTER(("cluster/" __stringify(x) \ ": sysfs_create_file failed!\n")); \ goto fail; \ } \ } while (0) static int __init sysfscluster_init(void) { int e; TRACE_CLUSTER(("+sysfscluster_init\n")); spin_lock_init(&cluster_lock); cluster_kobj = kobject_create_and_add("cluster", kernel_kobj); CREATE_FILE(active); CREATE_FILE(immediate); CREATE_FILE(force); CREATE_FILE(wake_ms); #if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE CREATE_FILE(powermode); #endif #ifdef CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE CREATE_FILE(powergate); #endif #if DEBUG_CLUSTER_SWITCH CREATE_FILE(debug); #endif spin_lock(&cluster_lock); if (is_lp_cluster()) flags |= TEGRA_POWER_CLUSTER_LP; else flags |= TEGRA_POWER_CLUSTER_G; spin_unlock(&cluster_lock); fail: TRACE_CLUSTER(("-sysfscluster_init\n")); return e; } #define REMOVE_FILE(x) \ sysfs_remove_file(cluster_kobj, &cluster_##x##_attr.attr) static void __exit sysfscluster_exit(void) { TRACE_CLUSTER(("+sysfscluster_exit\n")); #if DEBUG_CLUSTER_SWITCH REMOVE_FILE(debug); #endif #if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE REMOVE_FILE(powermode); #endif REMOVE_FILE(wake_ms); REMOVE_FILE(force); REMOVE_FILE(immediate); REMOVE_FILE(active); kobject_del(cluster_kobj); TRACE_CLUSTER(("-sysfscluster_exit\n")); } module_init(sysfscluster_init); module_exit(sysfscluster_exit); MODULE_LICENSE("GPL");
pershoot/android_kernel_asus_tf701t
arch/arm/mach-tegra/sysfs-cluster.c
C
gpl-2.0
14,155
<?php namespace React\Tests\Socket; use React\Socket\Server; use React\EventLoop\StreamSelectLoop; use React\Stream\Stream; class ServerTest extends TestCase { private $loop; private $server; private $port; private function createLoop() { return new StreamSelectLoop(); } /** * @covers React\Socket\Server::__construct * @covers React\Socket\Server::listen * @covers React\Socket\Server::getPort */ public function setUp() { $this->loop = $this->createLoop(); $this->server = new Server($this->loop); $this->server->listen(0); $this->port = $this->server->getPort(); } /** * @covers React\EventLoop\StreamSelectLoop::tick * @covers React\Socket\Server::handleConnection * @covers React\Socket\Server::createConnection */ public function testConnection() { $client = stream_socket_client('tcp://localhost:'.$this->port); $this->server->on('connection', $this->expectCallableOnce()); $this->loop->tick(); } /** * @covers React\EventLoop\StreamSelectLoop::tick * @covers React\Socket\Server::handleConnection * @covers React\Socket\Server::createConnection */ public function testConnectionWithManyClients() { $client1 = stream_socket_client('tcp://localhost:'.$this->port); $client2 = stream_socket_client('tcp://localhost:'.$this->port); $client3 = stream_socket_client('tcp://localhost:'.$this->port); $this->server->on('connection', $this->expectCallableExactly(3)); $this->loop->tick(); $this->loop->tick(); $this->loop->tick(); } /** * @covers React\EventLoop\StreamSelectLoop::tick * @covers React\Socket\Connection::handleData */ public function testDataEventWillNotBeEmittedWhenClientSendsNoData() { $client = stream_socket_client('tcp://localhost:'.$this->port); $mock = $this->expectCallableNever(); $this->server->on('connection', function ($conn) use ($mock) { $conn->on('data', $mock); }); $this->loop->tick(); $this->loop->tick(); } /** * @covers React\EventLoop\StreamSelectLoop::tick * @covers React\Socket\Connection::handleData */ public function testDataWillBeEmittedWithDataClientSends() { $client = stream_socket_client('tcp://localhost:'.$this->port); fwrite($client, "foo\n"); $mock = $this->expectCallableOnceWith("foo\n"); $this->server->on('connection', function ($conn) use ($mock) { $conn->on('data', $mock); }); $this->loop->tick(); $this->loop->tick(); } /** * @covers React\EventLoop\StreamSelectLoop::tick * @covers React\Socket\Connection::handleData */ public function testDataWillBeEmittedEvenWhenClientShutsDownAfterSending() { $client = stream_socket_client('tcp://localhost:' . $this->port); fwrite($client, "foo\n"); stream_socket_shutdown($client, STREAM_SHUT_WR); $mock = $this->expectCallableOnceWith("foo\n"); $this->server->on('connection', function ($conn) use ($mock) { $conn->on('data', $mock); }); $this->loop->tick(); $this->loop->tick(); } public function testDataWillBeFragmentedToBufferSize() { $client = stream_socket_client('tcp://localhost:' . $this->port); fwrite($client, "Hello World!\n"); $mock = $this->expectCallableOnceWith("He"); $this->server->on('connection', function ($conn) use ($mock) { $conn->bufferSize = 2; $conn->on('data', $mock); }); $this->loop->tick(); $this->loop->tick(); } public function testLoopWillEndWhenServerIsShutDown() { // explicitly unset server because we already call shutdown() $this->server->shutdown(); $this->server = null; $this->loop->run(); } public function testLoopWillEndWhenServerIsShutDownAfterSingleConnection() { $client = stream_socket_client('tcp://localhost:' . $this->port); // explicitly unset server because we only accept a single connection // and then already call shutdown() $server = $this->server; $this->server = null; $server->on('connection', function ($conn) use ($server) { $conn->close(); $server->shutdown(); }); $this->loop->run(); } public function testDataWillBeEmittedInMultipleChunksWhenClientSendsExcessiveAmounts() { $client = stream_socket_client('tcp://localhost:' . $this->port); $stream = new Stream($client, $this->loop); $bytes = 1024 * 1024; $stream->end(str_repeat('*', $bytes)); $mock = $this->expectCallableOnce(); // explicitly unset server because we only accept a single connection // and then already call shutdown() $server = $this->server; $this->server = null; $received = 0; $server->on('connection', function ($conn) use ($mock, &$received, $server) { // count number of bytes received $conn->on('data', function ($data) use (&$received) { $received += strlen($data); }); $conn->on('end', $mock); // do not await any further connections in order to let the loop terminate $server->shutdown(); }); $this->loop->run(); $this->assertEquals($bytes, $received); } /** * @covers React\EventLoop\StreamSelectLoop::tick */ public function testConnectionDoesNotEndWhenClientDoesNotClose() { $client = stream_socket_client('tcp://localhost:'.$this->port); $mock = $this->expectCallableNever(); $this->server->on('connection', function ($conn) use ($mock) { $conn->on('end', $mock); }); $this->loop->tick(); $this->loop->tick(); } /** * @covers React\EventLoop\StreamSelectLoop::tick * @covers React\Socket\Connection::end */ public function testConnectionDoesEndWhenClientCloses() { $client = stream_socket_client('tcp://localhost:'.$this->port); fclose($client); $mock = $this->expectCallableOnce(); $this->server->on('connection', function ($conn) use ($mock) { $conn->on('end', $mock); }); $this->loop->tick(); $this->loop->tick(); } /** * @expectedException React\Socket\ConnectionException */ public function testListenOnBusyPortThrows() { $another = new Server($this->loop); $another->listen($this->port); } /** * @covers React\Socket\Server::shutdown */ public function tearDown() { if ($this->server) { $this->server->shutdown(); } } }
cuongnd/banhangonline88_joomla
libraries/vendor/react/socket/tests/ServerTest.php
PHP
gpl-2.0
7,009
/* Copyright (c) 2008-2009, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "mdp.h" #if defined(CONFIG_MACH_JENA) extern unsigned long mdp_timer_duration; extern boolean mdp_continues_display; #endif /* mdp primary csc limit vector */ uint32 mdp_plv[] = { 0x10, 0xeb, 0x10, 0xf0 }; /* Color Coefficient matrix for YUV -> RGB */ struct mdp_ccs mdp_ccs_yuv2rgb = { MDP_CCS_YUV2RGB, { 0x254, 0x000, 0x331, 0x254, 0xff38, 0xfe61, 0x254, 0x409, 0x000, }, { #ifdef CONFIG_FB_MSM_MDP31 0x1f0, 0x180, 0x180 #else 0x10, 0x80, 0x80 #endif } }; /* Color Coefficient matrix for RGB -> YUV */ struct mdp_ccs mdp_ccs_rgb2yuv = { MDP_CCS_RGB2YUV, { 0x83, 0x102, 0x32, 0xffb5, 0xff6c, 0xe1, 0xe1, 0xff45, 0xffdc, }, #ifdef CONFIG_FB_MSM_MDP31 { 0x10, 0x80, 0x80 } #endif }; static void mdp_load_lut_param(void) { outpdw(MDP_BASE + 0x40800, 0x0); outpdw(MDP_BASE + 0x40804, 0x151515); outpdw(MDP_BASE + 0x40808, 0x1d1d1d); outpdw(MDP_BASE + 0x4080c, 0x232323); outpdw(MDP_BASE + 0x40810, 0x272727); outpdw(MDP_BASE + 0x40814, 0x2b2b2b); outpdw(MDP_BASE + 0x40818, 0x2f2f2f); outpdw(MDP_BASE + 0x4081c, 0x333333); outpdw(MDP_BASE + 0x40820, 0x363636); outpdw(MDP_BASE + 0x40824, 0x393939); outpdw(MDP_BASE + 0x40828, 0x3b3b3b); outpdw(MDP_BASE + 0x4082c, 0x3e3e3e); outpdw(MDP_BASE + 0x40830, 0x404040); outpdw(MDP_BASE + 0x40834, 0x434343); outpdw(MDP_BASE + 0x40838, 0x454545); outpdw(MDP_BASE + 0x4083c, 0x474747); outpdw(MDP_BASE + 0x40840, 0x494949); outpdw(MDP_BASE + 0x40844, 0x4b4b4b); outpdw(MDP_BASE + 0x40848, 0x4d4d4d); outpdw(MDP_BASE + 0x4084c, 0x4f4f4f); outpdw(MDP_BASE + 0x40850, 0x515151); outpdw(MDP_BASE + 0x40854, 0x535353); outpdw(MDP_BASE + 0x40858, 0x555555); outpdw(MDP_BASE + 0x4085c, 0x565656); outpdw(MDP_BASE + 0x40860, 0x585858); outpdw(MDP_BASE + 0x40864, 0x5a5a5a); outpdw(MDP_BASE + 0x40868, 0x5b5b5b); outpdw(MDP_BASE + 0x4086c, 0x5d5d5d); outpdw(MDP_BASE + 0x40870, 0x5e5e5e); outpdw(MDP_BASE + 0x40874, 0x606060); outpdw(MDP_BASE + 0x40878, 0x616161); outpdw(MDP_BASE + 0x4087c, 0x636363); outpdw(MDP_BASE + 0x40880, 0x646464); outpdw(MDP_BASE + 0x40884, 0x666666); outpdw(MDP_BASE + 0x40888, 0x676767); outpdw(MDP_BASE + 0x4088c, 0x686868); outpdw(MDP_BASE + 0x40890, 0x6a6a6a); outpdw(MDP_BASE + 0x40894, 0x6b6b6b); outpdw(MDP_BASE + 0x40898, 0x6c6c6c); outpdw(MDP_BASE + 0x4089c, 0x6e6e6e); outpdw(MDP_BASE + 0x408a0, 0x6f6f6f); outpdw(MDP_BASE + 0x408a4, 0x707070); outpdw(MDP_BASE + 0x408a8, 0x717171); outpdw(MDP_BASE + 0x408ac, 0x727272); outpdw(MDP_BASE + 0x408b0, 0x747474); outpdw(MDP_BASE + 0x408b4, 0x757575); outpdw(MDP_BASE + 0x408b8, 0x767676); outpdw(MDP_BASE + 0x408bc, 0x777777); outpdw(MDP_BASE + 0x408c0, 0x787878); outpdw(MDP_BASE + 0x408c4, 0x797979); outpdw(MDP_BASE + 0x408c8, 0x7a7a7a); outpdw(MDP_BASE + 0x408cc, 0x7c7c7c); outpdw(MDP_BASE + 0x408d0, 0x7d7d7d); outpdw(MDP_BASE + 0x408d4, 0x7e7e7e); outpdw(MDP_BASE + 0x408d8, 0x7f7f7f); outpdw(MDP_BASE + 0x408dc, 0x808080); outpdw(MDP_BASE + 0x408e0, 0x818181); outpdw(MDP_BASE + 0x408e4, 0x828282); outpdw(MDP_BASE + 0x408e8, 0x838383); outpdw(MDP_BASE + 0x408ec, 0x848484); outpdw(MDP_BASE + 0x408f0, 0x858585); outpdw(MDP_BASE + 0x408f4, 0x868686); outpdw(MDP_BASE + 0x408f8, 0x878787); outpdw(MDP_BASE + 0x408fc, 0x888888); outpdw(MDP_BASE + 0x40900, 0x898989); outpdw(MDP_BASE + 0x40904, 0x8a8a8a); outpdw(MDP_BASE + 0x40908, 0x8b8b8b); outpdw(MDP_BASE + 0x4090c, 0x8c8c8c); outpdw(MDP_BASE + 0x40910, 0x8d8d8d); outpdw(MDP_BASE + 0x40914, 0x8e8e8e); outpdw(MDP_BASE + 0x40918, 0x8f8f8f); outpdw(MDP_BASE + 0x4091c, 0x8f8f8f); outpdw(MDP_BASE + 0x40920, 0x909090); outpdw(MDP_BASE + 0x40924, 0x919191); outpdw(MDP_BASE + 0x40928, 0x929292); outpdw(MDP_BASE + 0x4092c, 0x939393); outpdw(MDP_BASE + 0x40930, 0x949494); outpdw(MDP_BASE + 0x40934, 0x959595); outpdw(MDP_BASE + 0x40938, 0x969696); outpdw(MDP_BASE + 0x4093c, 0x969696); outpdw(MDP_BASE + 0x40940, 0x979797); outpdw(MDP_BASE + 0x40944, 0x989898); outpdw(MDP_BASE + 0x40948, 0x999999); outpdw(MDP_BASE + 0x4094c, 0x9a9a9a); outpdw(MDP_BASE + 0x40950, 0x9b9b9b); outpdw(MDP_BASE + 0x40954, 0x9c9c9c); outpdw(MDP_BASE + 0x40958, 0x9c9c9c); outpdw(MDP_BASE + 0x4095c, 0x9d9d9d); outpdw(MDP_BASE + 0x40960, 0x9e9e9e); outpdw(MDP_BASE + 0x40964, 0x9f9f9f); outpdw(MDP_BASE + 0x40968, 0xa0a0a0); outpdw(MDP_BASE + 0x4096c, 0xa0a0a0); outpdw(MDP_BASE + 0x40970, 0xa1a1a1); outpdw(MDP_BASE + 0x40974, 0xa2a2a2); outpdw(MDP_BASE + 0x40978, 0xa3a3a3); outpdw(MDP_BASE + 0x4097c, 0xa4a4a4); outpdw(MDP_BASE + 0x40980, 0xa4a4a4); outpdw(MDP_BASE + 0x40984, 0xa5a5a5); outpdw(MDP_BASE + 0x40988, 0xa6a6a6); outpdw(MDP_BASE + 0x4098c, 0xa7a7a7); outpdw(MDP_BASE + 0x40990, 0xa7a7a7); outpdw(MDP_BASE + 0x40994, 0xa8a8a8); outpdw(MDP_BASE + 0x40998, 0xa9a9a9); outpdw(MDP_BASE + 0x4099c, 0xaaaaaa); outpdw(MDP_BASE + 0x409a0, 0xaaaaaa); outpdw(MDP_BASE + 0x409a4, 0xababab); outpdw(MDP_BASE + 0x409a8, 0xacacac); outpdw(MDP_BASE + 0x409ac, 0xadadad); outpdw(MDP_BASE + 0x409b0, 0xadadad); outpdw(MDP_BASE + 0x409b4, 0xaeaeae); outpdw(MDP_BASE + 0x409b8, 0xafafaf); outpdw(MDP_BASE + 0x409bc, 0xafafaf); outpdw(MDP_BASE + 0x409c0, 0xb0b0b0); outpdw(MDP_BASE + 0x409c4, 0xb1b1b1); outpdw(MDP_BASE + 0x409c8, 0xb2b2b2); outpdw(MDP_BASE + 0x409cc, 0xb2b2b2); outpdw(MDP_BASE + 0x409d0, 0xb3b3b3); outpdw(MDP_BASE + 0x409d4, 0xb4b4b4); outpdw(MDP_BASE + 0x409d8, 0xb4b4b4); outpdw(MDP_BASE + 0x409dc, 0xb5b5b5); outpdw(MDP_BASE + 0x409e0, 0xb6b6b6); outpdw(MDP_BASE + 0x409e4, 0xb6b6b6); outpdw(MDP_BASE + 0x409e8, 0xb7b7b7); outpdw(MDP_BASE + 0x409ec, 0xb8b8b8); outpdw(MDP_BASE + 0x409f0, 0xb8b8b8); outpdw(MDP_BASE + 0x409f4, 0xb9b9b9); outpdw(MDP_BASE + 0x409f8, 0xbababa); outpdw(MDP_BASE + 0x409fc, 0xbababa); outpdw(MDP_BASE + 0x40a00, 0xbbbbbb); outpdw(MDP_BASE + 0x40a04, 0xbcbcbc); outpdw(MDP_BASE + 0x40a08, 0xbcbcbc); outpdw(MDP_BASE + 0x40a0c, 0xbdbdbd); outpdw(MDP_BASE + 0x40a10, 0xbebebe); outpdw(MDP_BASE + 0x40a14, 0xbebebe); outpdw(MDP_BASE + 0x40a18, 0xbfbfbf); outpdw(MDP_BASE + 0x40a1c, 0xc0c0c0); outpdw(MDP_BASE + 0x40a20, 0xc0c0c0); outpdw(MDP_BASE + 0x40a24, 0xc1c1c1); outpdw(MDP_BASE + 0x40a28, 0xc1c1c1); outpdw(MDP_BASE + 0x40a2c, 0xc2c2c2); outpdw(MDP_BASE + 0x40a30, 0xc3c3c3); outpdw(MDP_BASE + 0x40a34, 0xc3c3c3); outpdw(MDP_BASE + 0x40a38, 0xc4c4c4); outpdw(MDP_BASE + 0x40a3c, 0xc5c5c5); outpdw(MDP_BASE + 0x40a40, 0xc5c5c5); outpdw(MDP_BASE + 0x40a44, 0xc6c6c6); outpdw(MDP_BASE + 0x40a48, 0xc6c6c6); outpdw(MDP_BASE + 0x40a4c, 0xc7c7c7); outpdw(MDP_BASE + 0x40a50, 0xc8c8c8); outpdw(MDP_BASE + 0x40a54, 0xc8c8c8); outpdw(MDP_BASE + 0x40a58, 0xc9c9c9); outpdw(MDP_BASE + 0x40a5c, 0xc9c9c9); outpdw(MDP_BASE + 0x40a60, 0xcacaca); outpdw(MDP_BASE + 0x40a64, 0xcbcbcb); outpdw(MDP_BASE + 0x40a68, 0xcbcbcb); outpdw(MDP_BASE + 0x40a6c, 0xcccccc); outpdw(MDP_BASE + 0x40a70, 0xcccccc); outpdw(MDP_BASE + 0x40a74, 0xcdcdcd); outpdw(MDP_BASE + 0x40a78, 0xcecece); outpdw(MDP_BASE + 0x40a7c, 0xcecece); outpdw(MDP_BASE + 0x40a80, 0xcfcfcf); outpdw(MDP_BASE + 0x40a84, 0xcfcfcf); outpdw(MDP_BASE + 0x40a88, 0xd0d0d0); outpdw(MDP_BASE + 0x40a8c, 0xd0d0d0); outpdw(MDP_BASE + 0x40a90, 0xd1d1d1); outpdw(MDP_BASE + 0x40a94, 0xd2d2d2); outpdw(MDP_BASE + 0x40a98, 0xd2d2d2); outpdw(MDP_BASE + 0x40a9c, 0xd3d3d3); outpdw(MDP_BASE + 0x40aa0, 0xd3d3d3); outpdw(MDP_BASE + 0x40aa4, 0xd4d4d4); outpdw(MDP_BASE + 0x40aa8, 0xd4d4d4); outpdw(MDP_BASE + 0x40aac, 0xd5d5d5); outpdw(MDP_BASE + 0x40ab0, 0xd6d6d6); outpdw(MDP_BASE + 0x40ab4, 0xd6d6d6); outpdw(MDP_BASE + 0x40ab8, 0xd7d7d7); outpdw(MDP_BASE + 0x40abc, 0xd7d7d7); outpdw(MDP_BASE + 0x40ac0, 0xd8d8d8); outpdw(MDP_BASE + 0x40ac4, 0xd8d8d8); outpdw(MDP_BASE + 0x40ac8, 0xd9d9d9); outpdw(MDP_BASE + 0x40acc, 0xd9d9d9); outpdw(MDP_BASE + 0x40ad0, 0xdadada); outpdw(MDP_BASE + 0x40ad4, 0xdbdbdb); outpdw(MDP_BASE + 0x40ad8, 0xdbdbdb); outpdw(MDP_BASE + 0x40adc, 0xdcdcdc); outpdw(MDP_BASE + 0x40ae0, 0xdcdcdc); outpdw(MDP_BASE + 0x40ae4, 0xdddddd); outpdw(MDP_BASE + 0x40ae8, 0xdddddd); outpdw(MDP_BASE + 0x40aec, 0xdedede); outpdw(MDP_BASE + 0x40af0, 0xdedede); outpdw(MDP_BASE + 0x40af4, 0xdfdfdf); outpdw(MDP_BASE + 0x40af8, 0xdfdfdf); outpdw(MDP_BASE + 0x40afc, 0xe0e0e0); outpdw(MDP_BASE + 0x40b00, 0xe0e0e0); outpdw(MDP_BASE + 0x40b04, 0xe1e1e1); outpdw(MDP_BASE + 0x40b08, 0xe1e1e1); outpdw(MDP_BASE + 0x40b0c, 0xe2e2e2); outpdw(MDP_BASE + 0x40b10, 0xe3e3e3); outpdw(MDP_BASE + 0x40b14, 0xe3e3e3); outpdw(MDP_BASE + 0x40b18, 0xe4e4e4); outpdw(MDP_BASE + 0x40b1c, 0xe4e4e4); outpdw(MDP_BASE + 0x40b20, 0xe5e5e5); outpdw(MDP_BASE + 0x40b24, 0xe5e5e5); outpdw(MDP_BASE + 0x40b28, 0xe6e6e6); outpdw(MDP_BASE + 0x40b2c, 0xe6e6e6); outpdw(MDP_BASE + 0x40b30, 0xe7e7e7); outpdw(MDP_BASE + 0x40b34, 0xe7e7e7); outpdw(MDP_BASE + 0x40b38, 0xe8e8e8); outpdw(MDP_BASE + 0x40b3c, 0xe8e8e8); outpdw(MDP_BASE + 0x40b40, 0xe9e9e9); outpdw(MDP_BASE + 0x40b44, 0xe9e9e9); outpdw(MDP_BASE + 0x40b48, 0xeaeaea); outpdw(MDP_BASE + 0x40b4c, 0xeaeaea); outpdw(MDP_BASE + 0x40b50, 0xebebeb); outpdw(MDP_BASE + 0x40b54, 0xebebeb); outpdw(MDP_BASE + 0x40b58, 0xececec); outpdw(MDP_BASE + 0x40b5c, 0xececec); outpdw(MDP_BASE + 0x40b60, 0xededed); outpdw(MDP_BASE + 0x40b64, 0xededed); outpdw(MDP_BASE + 0x40b68, 0xeeeeee); outpdw(MDP_BASE + 0x40b6c, 0xeeeeee); outpdw(MDP_BASE + 0x40b70, 0xefefef); outpdw(MDP_BASE + 0x40b74, 0xefefef); outpdw(MDP_BASE + 0x40b78, 0xf0f0f0); outpdw(MDP_BASE + 0x40b7c, 0xf0f0f0); outpdw(MDP_BASE + 0x40b80, 0xf1f1f1); outpdw(MDP_BASE + 0x40b84, 0xf1f1f1); outpdw(MDP_BASE + 0x40b88, 0xf2f2f2); outpdw(MDP_BASE + 0x40b8c, 0xf2f2f2); outpdw(MDP_BASE + 0x40b90, 0xf2f2f2); outpdw(MDP_BASE + 0x40b94, 0xf3f3f3); outpdw(MDP_BASE + 0x40b98, 0xf3f3f3); outpdw(MDP_BASE + 0x40b9c, 0xf4f4f4); outpdw(MDP_BASE + 0x40ba0, 0xf4f4f4); outpdw(MDP_BASE + 0x40ba4, 0xf5f5f5); outpdw(MDP_BASE + 0x40ba8, 0xf5f5f5); outpdw(MDP_BASE + 0x40bac, 0xf6f6f6); outpdw(MDP_BASE + 0x40bb0, 0xf6f6f6); outpdw(MDP_BASE + 0x40bb4, 0xf7f7f7); outpdw(MDP_BASE + 0x40bb8, 0xf7f7f7); outpdw(MDP_BASE + 0x40bbc, 0xf8f8f8); outpdw(MDP_BASE + 0x40bc0, 0xf8f8f8); outpdw(MDP_BASE + 0x40bc4, 0xf9f9f9); outpdw(MDP_BASE + 0x40bc8, 0xf9f9f9); outpdw(MDP_BASE + 0x40bcc, 0xfafafa); outpdw(MDP_BASE + 0x40bd0, 0xfafafa); outpdw(MDP_BASE + 0x40bd4, 0xfafafa); outpdw(MDP_BASE + 0x40bd8, 0xfbfbfb); outpdw(MDP_BASE + 0x40bdc, 0xfbfbfb); outpdw(MDP_BASE + 0x40be0, 0xfcfcfc); outpdw(MDP_BASE + 0x40be4, 0xfcfcfc); outpdw(MDP_BASE + 0x40be8, 0xfdfdfd); outpdw(MDP_BASE + 0x40bec, 0xfdfdfd); outpdw(MDP_BASE + 0x40bf0, 0xfefefe); outpdw(MDP_BASE + 0x40bf4, 0xfefefe); outpdw(MDP_BASE + 0x40bf8, 0xffffff); outpdw(MDP_BASE + 0x40bfc, 0xffffff); outpdw(MDP_BASE + 0x40c00, 0x0); outpdw(MDP_BASE + 0x40c04, 0x0); outpdw(MDP_BASE + 0x40c08, 0x0); outpdw(MDP_BASE + 0x40c0c, 0x0); outpdw(MDP_BASE + 0x40c10, 0x0); outpdw(MDP_BASE + 0x40c14, 0x0); outpdw(MDP_BASE + 0x40c18, 0x0); outpdw(MDP_BASE + 0x40c1c, 0x0); outpdw(MDP_BASE + 0x40c20, 0x0); outpdw(MDP_BASE + 0x40c24, 0x0); outpdw(MDP_BASE + 0x40c28, 0x0); outpdw(MDP_BASE + 0x40c2c, 0x0); outpdw(MDP_BASE + 0x40c30, 0x0); outpdw(MDP_BASE + 0x40c34, 0x0); outpdw(MDP_BASE + 0x40c38, 0x0); outpdw(MDP_BASE + 0x40c3c, 0x0); outpdw(MDP_BASE + 0x40c40, 0x10101); outpdw(MDP_BASE + 0x40c44, 0x10101); outpdw(MDP_BASE + 0x40c48, 0x10101); outpdw(MDP_BASE + 0x40c4c, 0x10101); outpdw(MDP_BASE + 0x40c50, 0x10101); outpdw(MDP_BASE + 0x40c54, 0x10101); outpdw(MDP_BASE + 0x40c58, 0x10101); outpdw(MDP_BASE + 0x40c5c, 0x10101); outpdw(MDP_BASE + 0x40c60, 0x10101); outpdw(MDP_BASE + 0x40c64, 0x10101); outpdw(MDP_BASE + 0x40c68, 0x20202); outpdw(MDP_BASE + 0x40c6c, 0x20202); outpdw(MDP_BASE + 0x40c70, 0x20202); outpdw(MDP_BASE + 0x40c74, 0x20202); outpdw(MDP_BASE + 0x40c78, 0x20202); outpdw(MDP_BASE + 0x40c7c, 0x20202); outpdw(MDP_BASE + 0x40c80, 0x30303); outpdw(MDP_BASE + 0x40c84, 0x30303); outpdw(MDP_BASE + 0x40c88, 0x30303); outpdw(MDP_BASE + 0x40c8c, 0x30303); outpdw(MDP_BASE + 0x40c90, 0x30303); outpdw(MDP_BASE + 0x40c94, 0x40404); outpdw(MDP_BASE + 0x40c98, 0x40404); outpdw(MDP_BASE + 0x40c9c, 0x40404); outpdw(MDP_BASE + 0x40ca0, 0x40404); outpdw(MDP_BASE + 0x40ca4, 0x40404); outpdw(MDP_BASE + 0x40ca8, 0x50505); outpdw(MDP_BASE + 0x40cac, 0x50505); outpdw(MDP_BASE + 0x40cb0, 0x50505); outpdw(MDP_BASE + 0x40cb4, 0x50505); outpdw(MDP_BASE + 0x40cb8, 0x60606); outpdw(MDP_BASE + 0x40cbc, 0x60606); outpdw(MDP_BASE + 0x40cc0, 0x60606); outpdw(MDP_BASE + 0x40cc4, 0x70707); outpdw(MDP_BASE + 0x40cc8, 0x70707); outpdw(MDP_BASE + 0x40ccc, 0x70707); outpdw(MDP_BASE + 0x40cd0, 0x70707); outpdw(MDP_BASE + 0x40cd4, 0x80808); outpdw(MDP_BASE + 0x40cd8, 0x80808); outpdw(MDP_BASE + 0x40cdc, 0x80808); outpdw(MDP_BASE + 0x40ce0, 0x90909); outpdw(MDP_BASE + 0x40ce4, 0x90909); outpdw(MDP_BASE + 0x40ce8, 0xa0a0a); outpdw(MDP_BASE + 0x40cec, 0xa0a0a); outpdw(MDP_BASE + 0x40cf0, 0xa0a0a); outpdw(MDP_BASE + 0x40cf4, 0xb0b0b); outpdw(MDP_BASE + 0x40cf8, 0xb0b0b); outpdw(MDP_BASE + 0x40cfc, 0xb0b0b); outpdw(MDP_BASE + 0x40d00, 0xc0c0c); outpdw(MDP_BASE + 0x40d04, 0xc0c0c); outpdw(MDP_BASE + 0x40d08, 0xd0d0d); outpdw(MDP_BASE + 0x40d0c, 0xd0d0d); outpdw(MDP_BASE + 0x40d10, 0xe0e0e); outpdw(MDP_BASE + 0x40d14, 0xe0e0e); outpdw(MDP_BASE + 0x40d18, 0xe0e0e); outpdw(MDP_BASE + 0x40d1c, 0xf0f0f); outpdw(MDP_BASE + 0x40d20, 0xf0f0f); outpdw(MDP_BASE + 0x40d24, 0x101010); outpdw(MDP_BASE + 0x40d28, 0x101010); outpdw(MDP_BASE + 0x40d2c, 0x111111); outpdw(MDP_BASE + 0x40d30, 0x111111); outpdw(MDP_BASE + 0x40d34, 0x121212); outpdw(MDP_BASE + 0x40d38, 0x121212); outpdw(MDP_BASE + 0x40d3c, 0x131313); outpdw(MDP_BASE + 0x40d40, 0x131313); outpdw(MDP_BASE + 0x40d44, 0x141414); outpdw(MDP_BASE + 0x40d48, 0x151515); outpdw(MDP_BASE + 0x40d4c, 0x151515); outpdw(MDP_BASE + 0x40d50, 0x161616); outpdw(MDP_BASE + 0x40d54, 0x161616); outpdw(MDP_BASE + 0x40d58, 0x171717); outpdw(MDP_BASE + 0x40d5c, 0x171717); outpdw(MDP_BASE + 0x40d60, 0x181818); outpdw(MDP_BASE + 0x40d64, 0x191919); outpdw(MDP_BASE + 0x40d68, 0x191919); outpdw(MDP_BASE + 0x40d6c, 0x1a1a1a); outpdw(MDP_BASE + 0x40d70, 0x1b1b1b); outpdw(MDP_BASE + 0x40d74, 0x1b1b1b); outpdw(MDP_BASE + 0x40d78, 0x1c1c1c); outpdw(MDP_BASE + 0x40d7c, 0x1c1c1c); outpdw(MDP_BASE + 0x40d80, 0x1d1d1d); outpdw(MDP_BASE + 0x40d84, 0x1e1e1e); outpdw(MDP_BASE + 0x40d88, 0x1f1f1f); outpdw(MDP_BASE + 0x40d8c, 0x1f1f1f); outpdw(MDP_BASE + 0x40d90, 0x202020); outpdw(MDP_BASE + 0x40d94, 0x212121); outpdw(MDP_BASE + 0x40d98, 0x212121); outpdw(MDP_BASE + 0x40d9c, 0x222222); outpdw(MDP_BASE + 0x40da0, 0x232323); outpdw(MDP_BASE + 0x40da4, 0x242424); outpdw(MDP_BASE + 0x40da8, 0x242424); outpdw(MDP_BASE + 0x40dac, 0x252525); outpdw(MDP_BASE + 0x40db0, 0x262626); outpdw(MDP_BASE + 0x40db4, 0x272727); outpdw(MDP_BASE + 0x40db8, 0x272727); outpdw(MDP_BASE + 0x40dbc, 0x282828); outpdw(MDP_BASE + 0x40dc0, 0x292929); outpdw(MDP_BASE + 0x40dc4, 0x2a2a2a); outpdw(MDP_BASE + 0x40dc8, 0x2b2b2b); outpdw(MDP_BASE + 0x40dcc, 0x2c2c2c); outpdw(MDP_BASE + 0x40dd0, 0x2c2c2c); outpdw(MDP_BASE + 0x40dd4, 0x2d2d2d); outpdw(MDP_BASE + 0x40dd8, 0x2e2e2e); outpdw(MDP_BASE + 0x40ddc, 0x2f2f2f); outpdw(MDP_BASE + 0x40de0, 0x303030); outpdw(MDP_BASE + 0x40de4, 0x313131); outpdw(MDP_BASE + 0x40de8, 0x323232); outpdw(MDP_BASE + 0x40dec, 0x333333); outpdw(MDP_BASE + 0x40df0, 0x333333); outpdw(MDP_BASE + 0x40df4, 0x343434); outpdw(MDP_BASE + 0x40df8, 0x353535); outpdw(MDP_BASE + 0x40dfc, 0x363636); outpdw(MDP_BASE + 0x40e00, 0x373737); outpdw(MDP_BASE + 0x40e04, 0x383838); outpdw(MDP_BASE + 0x40e08, 0x393939); outpdw(MDP_BASE + 0x40e0c, 0x3a3a3a); outpdw(MDP_BASE + 0x40e10, 0x3b3b3b); outpdw(MDP_BASE + 0x40e14, 0x3c3c3c); outpdw(MDP_BASE + 0x40e18, 0x3d3d3d); outpdw(MDP_BASE + 0x40e1c, 0x3e3e3e); outpdw(MDP_BASE + 0x40e20, 0x3f3f3f); outpdw(MDP_BASE + 0x40e24, 0x404040); outpdw(MDP_BASE + 0x40e28, 0x414141); outpdw(MDP_BASE + 0x40e2c, 0x424242); outpdw(MDP_BASE + 0x40e30, 0x434343); outpdw(MDP_BASE + 0x40e34, 0x444444); outpdw(MDP_BASE + 0x40e38, 0x464646); outpdw(MDP_BASE + 0x40e3c, 0x474747); outpdw(MDP_BASE + 0x40e40, 0x484848); outpdw(MDP_BASE + 0x40e44, 0x494949); outpdw(MDP_BASE + 0x40e48, 0x4a4a4a); outpdw(MDP_BASE + 0x40e4c, 0x4b4b4b); outpdw(MDP_BASE + 0x40e50, 0x4c4c4c); outpdw(MDP_BASE + 0x40e54, 0x4d4d4d); outpdw(MDP_BASE + 0x40e58, 0x4f4f4f); outpdw(MDP_BASE + 0x40e5c, 0x505050); outpdw(MDP_BASE + 0x40e60, 0x515151); outpdw(MDP_BASE + 0x40e64, 0x525252); outpdw(MDP_BASE + 0x40e68, 0x535353); outpdw(MDP_BASE + 0x40e6c, 0x545454); outpdw(MDP_BASE + 0x40e70, 0x565656); outpdw(MDP_BASE + 0x40e74, 0x575757); outpdw(MDP_BASE + 0x40e78, 0x585858); outpdw(MDP_BASE + 0x40e7c, 0x595959); outpdw(MDP_BASE + 0x40e80, 0x5b5b5b); outpdw(MDP_BASE + 0x40e84, 0x5c5c5c); outpdw(MDP_BASE + 0x40e88, 0x5d5d5d); outpdw(MDP_BASE + 0x40e8c, 0x5e5e5e); outpdw(MDP_BASE + 0x40e90, 0x606060); outpdw(MDP_BASE + 0x40e94, 0x616161); outpdw(MDP_BASE + 0x40e98, 0x626262); outpdw(MDP_BASE + 0x40e9c, 0x646464); outpdw(MDP_BASE + 0x40ea0, 0x656565); outpdw(MDP_BASE + 0x40ea4, 0x666666); outpdw(MDP_BASE + 0x40ea8, 0x686868); outpdw(MDP_BASE + 0x40eac, 0x696969); outpdw(MDP_BASE + 0x40eb0, 0x6a6a6a); outpdw(MDP_BASE + 0x40eb4, 0x6c6c6c); outpdw(MDP_BASE + 0x40eb8, 0x6d6d6d); outpdw(MDP_BASE + 0x40ebc, 0x6f6f6f); outpdw(MDP_BASE + 0x40ec0, 0x707070); outpdw(MDP_BASE + 0x40ec4, 0x717171); outpdw(MDP_BASE + 0x40ec8, 0x737373); outpdw(MDP_BASE + 0x40ecc, 0x747474); outpdw(MDP_BASE + 0x40ed0, 0x767676); outpdw(MDP_BASE + 0x40ed4, 0x777777); outpdw(MDP_BASE + 0x40ed8, 0x797979); outpdw(MDP_BASE + 0x40edc, 0x7a7a7a); outpdw(MDP_BASE + 0x40ee0, 0x7c7c7c); outpdw(MDP_BASE + 0x40ee4, 0x7d7d7d); outpdw(MDP_BASE + 0x40ee8, 0x7f7f7f); outpdw(MDP_BASE + 0x40eec, 0x808080); outpdw(MDP_BASE + 0x40ef0, 0x828282); outpdw(MDP_BASE + 0x40ef4, 0x838383); outpdw(MDP_BASE + 0x40ef8, 0x858585); outpdw(MDP_BASE + 0x40efc, 0x868686); outpdw(MDP_BASE + 0x40f00, 0x888888); outpdw(MDP_BASE + 0x40f04, 0x898989); outpdw(MDP_BASE + 0x40f08, 0x8b8b8b); outpdw(MDP_BASE + 0x40f0c, 0x8d8d8d); outpdw(MDP_BASE + 0x40f10, 0x8e8e8e); outpdw(MDP_BASE + 0x40f14, 0x909090); outpdw(MDP_BASE + 0x40f18, 0x919191); outpdw(MDP_BASE + 0x40f1c, 0x939393); outpdw(MDP_BASE + 0x40f20, 0x959595); outpdw(MDP_BASE + 0x40f24, 0x969696); outpdw(MDP_BASE + 0x40f28, 0x989898); outpdw(MDP_BASE + 0x40f2c, 0x9a9a9a); outpdw(MDP_BASE + 0x40f30, 0x9b9b9b); outpdw(MDP_BASE + 0x40f34, 0x9d9d9d); outpdw(MDP_BASE + 0x40f38, 0x9f9f9f); outpdw(MDP_BASE + 0x40f3c, 0xa1a1a1); outpdw(MDP_BASE + 0x40f40, 0xa2a2a2); outpdw(MDP_BASE + 0x40f44, 0xa4a4a4); outpdw(MDP_BASE + 0x40f48, 0xa6a6a6); outpdw(MDP_BASE + 0x40f4c, 0xa7a7a7); outpdw(MDP_BASE + 0x40f50, 0xa9a9a9); outpdw(MDP_BASE + 0x40f54, 0xababab); outpdw(MDP_BASE + 0x40f58, 0xadadad); outpdw(MDP_BASE + 0x40f5c, 0xafafaf); outpdw(MDP_BASE + 0x40f60, 0xb0b0b0); outpdw(MDP_BASE + 0x40f64, 0xb2b2b2); outpdw(MDP_BASE + 0x40f68, 0xb4b4b4); outpdw(MDP_BASE + 0x40f6c, 0xb6b6b6); outpdw(MDP_BASE + 0x40f70, 0xb8b8b8); outpdw(MDP_BASE + 0x40f74, 0xbababa); outpdw(MDP_BASE + 0x40f78, 0xbbbbbb); outpdw(MDP_BASE + 0x40f7c, 0xbdbdbd); outpdw(MDP_BASE + 0x40f80, 0xbfbfbf); outpdw(MDP_BASE + 0x40f84, 0xc1c1c1); outpdw(MDP_BASE + 0x40f88, 0xc3c3c3); outpdw(MDP_BASE + 0x40f8c, 0xc5c5c5); outpdw(MDP_BASE + 0x40f90, 0xc7c7c7); outpdw(MDP_BASE + 0x40f94, 0xc9c9c9); outpdw(MDP_BASE + 0x40f98, 0xcbcbcb); outpdw(MDP_BASE + 0x40f9c, 0xcdcdcd); outpdw(MDP_BASE + 0x40fa0, 0xcfcfcf); outpdw(MDP_BASE + 0x40fa4, 0xd1d1d1); outpdw(MDP_BASE + 0x40fa8, 0xd3d3d3); outpdw(MDP_BASE + 0x40fac, 0xd5d5d5); outpdw(MDP_BASE + 0x40fb0, 0xd7d7d7); outpdw(MDP_BASE + 0x40fb4, 0xd9d9d9); outpdw(MDP_BASE + 0x40fb8, 0xdbdbdb); outpdw(MDP_BASE + 0x40fbc, 0xdddddd); outpdw(MDP_BASE + 0x40fc0, 0xdfdfdf); outpdw(MDP_BASE + 0x40fc4, 0xe1e1e1); outpdw(MDP_BASE + 0x40fc8, 0xe3e3e3); outpdw(MDP_BASE + 0x40fcc, 0xe5e5e5); outpdw(MDP_BASE + 0x40fd0, 0xe7e7e7); outpdw(MDP_BASE + 0x40fd4, 0xe9e9e9); outpdw(MDP_BASE + 0x40fd8, 0xebebeb); outpdw(MDP_BASE + 0x40fdc, 0xeeeeee); outpdw(MDP_BASE + 0x40fe0, 0xf0f0f0); outpdw(MDP_BASE + 0x40fe4, 0xf2f2f2); outpdw(MDP_BASE + 0x40fe8, 0xf4f4f4); outpdw(MDP_BASE + 0x40fec, 0xf6f6f6); outpdw(MDP_BASE + 0x40ff0, 0xf8f8f8); outpdw(MDP_BASE + 0x40ff4, 0xfbfbfb); outpdw(MDP_BASE + 0x40ff8, 0xfdfdfd); outpdw(MDP_BASE + 0x40ffc, 0xffffff); } #define IRQ_EN_1__MDP_IRQ___M 0x00000800 void mdp_hw_init(void) { int i; #if defined(CONFIG_MACH_JENA) mdp_timer_duration = (100 * HZ); /* 100 sec */ #endif /* MDP cmd block enable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); /* debug interface write access */ outpdw(MDP_BASE + 0x60, 1); outp32(MDP_INTR_ENABLE, MDP_ANY_INTR_MASK); outp32(MDP_EBI2_PORTMAP_MODE, 0x3); outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8, 0x0); outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc, 0x0); outpdw(MDP_BASE + 0x60, 0x1); mdp_load_lut_param(); /* * clear up unused fg/main registers */ /* comp.plane 2&3 ystride */ MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0120, 0x0); /* unpacked pattern */ MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x012c, 0x0); /* unpacked pattern */ MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0130, 0x0); /* unpacked pattern */ MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0134, 0x0); MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0158, 0x0); MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x15c, 0x0); MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0160, 0x0); MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0170, 0x0); MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0174, 0x0); MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x017c, 0x0); /* comp.plane 2 */ MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0114, 0x0); /* comp.plane 3 */ MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0118, 0x0); /* clear up unused bg registers */ MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8, 0); MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0, 0); MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc, 0); MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0, 0); MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4, 0); #ifndef CONFIG_FB_MSM_MDP22 #if defined(CONFIG_MACH_JENA) /* skip the code to avoid LCDC is to be disable */ if (!mdp_continues_display) #endif { MDP_OUTP(MDP_BASE + 0xE0000, 0); MDP_OUTP(MDP_BASE + 0x100, 0xffffffff); MDP_OUTP(MDP_BASE + 0x90070, 0); MDP_OUTP(MDP_BASE + 0x94010, 1); MDP_OUTP(MDP_BASE + 0x9401c, 2); } #endif /* * limit vector * pre gets applied before color matrix conversion * post is after ccs */ writel(mdp_plv[0], MDP_CSC_PRE_LV1n(0)); writel(mdp_plv[1], MDP_CSC_PRE_LV1n(1)); writel(mdp_plv[2], MDP_CSC_PRE_LV1n(2)); writel(mdp_plv[3], MDP_CSC_PRE_LV1n(3)); #ifdef CONFIG_FB_MSM_MDP31 writel(mdp_plv[2], MDP_CSC_PRE_LV1n(4)); writel(mdp_plv[3], MDP_CSC_PRE_LV1n(5)); writel(0, MDP_CSC_POST_LV1n(0)); writel(0xff, MDP_CSC_POST_LV1n(1)); writel(0, MDP_CSC_POST_LV1n(2)); writel(0xff, MDP_CSC_POST_LV1n(3)); writel(0, MDP_CSC_POST_LV1n(4)); writel(0xff, MDP_CSC_POST_LV1n(5)); writel(0, MDP_CSC_PRE_LV2n(0)); writel(0xff, MDP_CSC_PRE_LV2n(1)); writel(0, MDP_CSC_PRE_LV2n(2)); writel(0xff, MDP_CSC_PRE_LV2n(3)); writel(0, MDP_CSC_PRE_LV2n(4)); writel(0xff, MDP_CSC_PRE_LV2n(5)); writel(mdp_plv[0], MDP_CSC_POST_LV2n(0)); writel(mdp_plv[1], MDP_CSC_POST_LV2n(1)); writel(mdp_plv[2], MDP_CSC_POST_LV2n(2)); writel(mdp_plv[3], MDP_CSC_POST_LV2n(3)); writel(mdp_plv[2], MDP_CSC_POST_LV2n(4)); writel(mdp_plv[3], MDP_CSC_POST_LV2n(5)); #endif /* primary forward matrix */ for (i = 0; i < MDP_CCS_SIZE; i++) writel(mdp_ccs_rgb2yuv.ccs[i], MDP_CSC_PFMVn(i)); #ifdef CONFIG_FB_MSM_MDP31 for (i = 0; i < MDP_BV_SIZE; i++) writel(mdp_ccs_rgb2yuv.bv[i], MDP_CSC_POST_BV2n(i)); writel(0, MDP_CSC_PRE_BV2n(0)); writel(0, MDP_CSC_PRE_BV2n(1)); writel(0, MDP_CSC_PRE_BV2n(2)); #endif /* primary reverse matrix */ for (i = 0; i < MDP_CCS_SIZE; i++) writel(mdp_ccs_yuv2rgb.ccs[i], MDP_CSC_PRMVn(i)); for (i = 0; i < MDP_BV_SIZE; i++) writel(mdp_ccs_yuv2rgb.bv[i], MDP_CSC_PRE_BV1n(i)); #ifdef CONFIG_FB_MSM_MDP31 writel(0, MDP_CSC_POST_BV1n(0)); writel(0, MDP_CSC_POST_BV1n(1)); writel(0, MDP_CSC_POST_BV1n(2)); outpdw(MDP_BASE + 0x30010, 0x03e0); outpdw(MDP_BASE + 0x30014, 0x0360); outpdw(MDP_BASE + 0x30018, 0x0120); outpdw(MDP_BASE + 0x3001c, 0x0140); #endif mdp_init_scale_table(); #ifndef CONFIG_FB_MSM_MDP31 MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0104, ((16 << 6) << 16) | (16) << 6); #endif /* MDP cmd block disable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); }
loxdegio/Android_kernel_samsung_msm7x27a-1
drivers/video/msm/mdp_hw_init.c
C
gpl-2.0
25,296
/* * Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData Name: gm_commandscript %Complete: 100 Comment: All gm related commands Category: commandscripts EndScriptData */ #include "ScriptMgr.h" #include "ObjectMgr.h" #include "Chat.h" #include "AccountMgr.h" #include "Language.h" #include "World.h" #include "Player.h" #include "Opcodes.h" class gm_commandscript : public CommandScript { public: gm_commandscript() : CommandScript("gm_commandscript") { } ChatCommand* GetCommands() const { static ChatCommand gmCommandTable[] = { { "chat", SEC_MODERATOR, false, &HandleGMChatCommand, "", NULL }, { "fly", SEC_ADMINISTRATOR, false, &HandleGMFlyCommand, "", NULL }, { "ingame", SEC_PLAYER, true, &HandleGMListIngameCommand, "", NULL }, { "list", SEC_ADMINISTRATOR, true, &HandleGMListFullCommand, "", NULL }, { "visible", SEC_MODERATOR, false, &HandleGMVisibleCommand, "", NULL }, { "", SEC_MODERATOR, false, &HandleGMCommand, "", NULL }, { NULL, 0, false, NULL, "", NULL } }; static ChatCommand commandTable[] = { { "gm", SEC_MODERATOR, false, NULL, "", gmCommandTable }, { NULL, 0, false, NULL, "", NULL } }; return commandTable; } // Enables or disables hiding of the staff badge static bool HandleGMChatCommand(ChatHandler* handler, char const* args) { if (!*args) { WorldSession* session = handler->GetSession(); if (!AccountMgr::IsPlayerAccount(session->GetSecurity()) && session->GetPlayer()->isGMChat()) session->SendNotification(LANG_GM_CHAT_ON); else session->SendNotification(LANG_GM_CHAT_OFF); return true; } std::string param = (char*)args; if (param == "on") { handler->GetSession()->GetPlayer()->SetGMChat(true); handler->GetSession()->SendNotification(LANG_GM_CHAT_ON); return true; } if (param == "off") { handler->GetSession()->GetPlayer()->SetGMChat(false); handler->GetSession()->SendNotification(LANG_GM_CHAT_OFF); return true; } handler->SendSysMessage(LANG_USE_BOL); handler->SetSentErrorMessage(true); return false; } static bool HandleGMFlyCommand(ChatHandler* handler, char const* args) { if (!*args) return false; Player* target = handler->getSelectedPlayer(); if (!target) target = handler->GetSession()->GetPlayer(); WorldPacket data; if (strncmp(args, "on", 3) == 0) { target->AddUnitMovementFlag(MOVEMENTFLAG_CAN_FLY); target->SendMovementCanFlyChange(); } else if (strncmp(args, "off", 4) == 0) { target->RemoveUnitMovementFlag(MOVEMENTFLAG_CAN_FLY); target->SendMovementCanFlyChange(); } else { handler->SendSysMessage(LANG_USE_BOL); return false; } handler->PSendSysMessage(LANG_COMMAND_FLYMODE_STATUS, handler->GetNameLink(target).c_str(), args); return true; } static bool HandleGMListIngameCommand(ChatHandler* handler, char const* /*args*/) { bool first = true; bool footer = false; TRINITY_READ_GUARD(HashMapHolder<Player>::LockType, *HashMapHolder<Player>::GetLock()); HashMapHolder<Player>::MapType const& m = sObjectAccessor->GetPlayers(); for (HashMapHolder<Player>::MapType::const_iterator itr = m.begin(); itr != m.end(); ++itr) { AccountTypes itrSec = itr->second->GetSession()->GetSecurity(); if ((itr->second->isGameMaster() || (!AccountMgr::IsPlayerAccount(itrSec) && itrSec <= AccountTypes(sWorld->getIntConfig(CONFIG_GM_LEVEL_IN_GM_LIST)))) && (!handler->GetSession() || itr->second->IsVisibleGloballyFor(handler->GetSession()->GetPlayer()))) { if (first) { first = false; footer = true; handler->SendSysMessage(LANG_GMS_ON_SRV); handler->SendSysMessage("========================"); } std::string const& name = itr->second->GetName(); uint8 size = name.size(); uint8 security = itrSec; uint8 max = ((16 - size) / 2); uint8 max2 = max; if ((max + max2 + size) == 16) max2 = max - 1; if (handler->GetSession()) handler->PSendSysMessage("| %s GMLevel %u", name.c_str(), security); else handler->PSendSysMessage("|%*s%s%*s| %u |", max, " ", name.c_str(), max2, " ", security); } } if (footer) handler->SendSysMessage("========================"); if (first) handler->SendSysMessage(LANG_GMS_NOT_LOGGED); return true; } /// Display the list of GMs static bool HandleGMListFullCommand(ChatHandler* handler, char const* /*args*/) { ///- Get the accounts with GM Level >0 PreparedStatement* stmt = LoginDatabase.GetPreparedStatement(LOGIN_SEL_GM_ACCOUNTS); stmt->setUInt8(0, uint8(SEC_MODERATOR)); stmt->setInt32(1, int32(realmID)); PreparedQueryResult result = LoginDatabase.Query(stmt); if (result) { handler->SendSysMessage(LANG_GMLIST); handler->SendSysMessage("========================"); ///- Cycle through them. Display username and GM level do { Field* fields = result->Fetch(); char const* name = fields[0].GetCString(); uint8 security = fields[1].GetUInt8(); uint8 max = (16 - strlen(name)) / 2; uint8 max2 = max; if ((max + max2 + strlen(name)) == 16) max2 = max - 1; if (handler->GetSession()) handler->PSendSysMessage("| %s GMLevel %u", name, security); else handler->PSendSysMessage("|%*s%s%*s| %u |", max, " ", name, max2, " ", security); } while (result->NextRow()); handler->SendSysMessage("========================"); } else handler->PSendSysMessage(LANG_GMLIST_EMPTY); return true; } //Enable\Disable Invisible mode static bool HandleGMVisibleCommand(ChatHandler* handler, char const* args) { Player* _player = handler->GetSession()->GetPlayer(); if (!*args) { handler->PSendSysMessage(LANG_YOU_ARE, _player->isGMVisible() ? handler->GetTrinityString(LANG_VISIBLE) : handler->GetTrinityString(LANG_INVISIBLE)); return true; } const uint32 VISUAL_AURA = 37800; std::string param = (char*)args; if (param == "on") { if (_player->HasAura(VISUAL_AURA, 0)) _player->RemoveAurasDueToSpell(VISUAL_AURA); _player->SetGMVisible(true); _player->UpdateObjectVisibility(); handler->GetSession()->SendNotification(LANG_INVISIBLE_VISIBLE); return true; } if (param == "off") { _player->AddAura(VISUAL_AURA, _player); _player->SetGMVisible(false); _player->UpdateObjectVisibility(); handler->GetSession()->SendNotification(LANG_INVISIBLE_INVISIBLE); return true; } handler->SendSysMessage(LANG_USE_BOL); handler->SetSentErrorMessage(true); return false; } //Enable\Disable GM Mode static bool HandleGMCommand(ChatHandler* handler, char const* args) { Player* _player = handler->GetSession()->GetPlayer(); if (!*args) { handler->GetSession()->SendNotification(_player->isGameMaster() ? LANG_GM_ON : LANG_GM_OFF); return true; } std::string param = (char*)args; if (param == "on") { _player->SetGameMaster(true); handler->GetSession()->SendNotification(LANG_GM_ON); _player->UpdateTriggerVisibility(); #ifdef _DEBUG_VMAPS VMAP::IVMapManager* vMapManager = VMAP::VMapFactory::createOrGetVMapManager(); vMapManager->processCommand("stoplog"); #endif return true; } if (param == "off") { _player->SetGameMaster(false); handler->GetSession()->SendNotification(LANG_GM_OFF); _player->UpdateTriggerVisibility(); #ifdef _DEBUG_VMAPS VMAP::IVMapManager* vMapManager = VMAP::VMapFactory::createOrGetVMapManager(); vMapManager->processCommand("startlog"); #endif return true; } handler->SendSysMessage(LANG_USE_BOL); handler->SetSentErrorMessage(true); return false; } }; void AddSC_gm_commandscript() { new gm_commandscript(); }
Ginfred/CataCore
src/server/scripts/Commands/cs_gm.cpp
C++
gpl-2.0
10,138
/***************************************************************** | | Neptune - Trust Anchors | | This file is automatically generated by a script, do not edit! | | Copyright (c) 2002-2010, Axiomatic Systems, LLC. | All rights reserved. | | Redistribution and use in source and binary forms, with or without | modification, are permitted provided that the following conditions are met: | * Redistributions of source code must retain the above copyright | notice, this list of conditions and the following disclaimer. | * Redistributions in binary form must reproduce the above copyright | notice, this list of conditions and the following disclaimer in the | documentation and/or other materials provided with the distribution. | * Neither the name of Axiomatic Systems nor the | names of its contributors may be used to endorse or promote products | derived from this software without specific prior written permission. | | THIS SOFTWARE IS PROVIDED BY AXIOMATIC SYSTEMS ''AS IS'' AND ANY | EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | DISCLAIMED. IN NO EVENT SHALL AXIOMATIC SYSTEMS BE LIABLE FOR ANY | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ****************************************************************/ /* Firmaprofesional Root CA */ const unsigned char NptTlsTrustAnchor_Base_0099_Data[1115] = { 0x30,0x82,0x04,0x57,0x30,0x82,0x03,0x3f ,0xa0,0x03,0x02,0x01,0x02,0x02,0x01,0x01 ,0x30,0x0d,0x06,0x09,0x2a,0x86,0x48,0x86 ,0xf7,0x0d,0x01,0x01,0x05,0x05,0x00,0x30 ,0x81,0x9d,0x31,0x0b,0x30,0x09,0x06,0x03 ,0x55,0x04,0x06,0x13,0x02,0x45,0x53,0x31 ,0x22,0x30,0x20,0x06,0x03,0x55,0x04,0x07 ,0x13,0x19,0x43,0x2f,0x20,0x4d,0x75,0x6e ,0x74,0x61,0x6e,0x65,0x72,0x20,0x32,0x34 ,0x34,0x20,0x42,0x61,0x72,0x63,0x65,0x6c ,0x6f,0x6e,0x61,0x31,0x42,0x30,0x40,0x06 ,0x03,0x55,0x04,0x03,0x13,0x39,0x41,0x75 ,0x74,0x6f,0x72,0x69,0x64,0x61,0x64,0x20 ,0x64,0x65,0x20,0x43,0x65,0x72,0x74,0x69 ,0x66,0x69,0x63,0x61,0x63,0x69,0x6f,0x6e ,0x20,0x46,0x69,0x72,0x6d,0x61,0x70,0x72 ,0x6f,0x66,0x65,0x73,0x69,0x6f,0x6e,0x61 ,0x6c,0x20,0x43,0x49,0x46,0x20,0x41,0x36 ,0x32,0x36,0x33,0x34,0x30,0x36,0x38,0x31 ,0x26,0x30,0x24,0x06,0x09,0x2a,0x86,0x48 ,0x86,0xf7,0x0d,0x01,0x09,0x01,0x16,0x17 ,0x63,0x61,0x40,0x66,0x69,0x72,0x6d,0x61 ,0x70,0x72,0x6f,0x66,0x65,0x73,0x69,0x6f ,0x6e,0x61,0x6c,0x2e,0x63,0x6f,0x6d,0x30 ,0x1e,0x17,0x0d,0x30,0x31,0x31,0x30,0x32 ,0x34,0x32,0x32,0x30,0x30,0x30,0x30,0x5a ,0x17,0x0d,0x31,0x33,0x31,0x30,0x32,0x34 ,0x32,0x32,0x30,0x30,0x30,0x30,0x5a,0x30 ,0x81,0x9d,0x31,0x0b,0x30,0x09,0x06,0x03 ,0x55,0x04,0x06,0x13,0x02,0x45,0x53,0x31 ,0x22,0x30,0x20,0x06,0x03,0x55,0x04,0x07 ,0x13,0x19,0x43,0x2f,0x20,0x4d,0x75,0x6e ,0x74,0x61,0x6e,0x65,0x72,0x20,0x32,0x34 ,0x34,0x20,0x42,0x61,0x72,0x63,0x65,0x6c ,0x6f,0x6e,0x61,0x31,0x42,0x30,0x40,0x06 ,0x03,0x55,0x04,0x03,0x13,0x39,0x41,0x75 ,0x74,0x6f,0x72,0x69,0x64,0x61,0x64,0x20 ,0x64,0x65,0x20,0x43,0x65,0x72,0x74,0x69 ,0x66,0x69,0x63,0x61,0x63,0x69,0x6f,0x6e ,0x20,0x46,0x69,0x72,0x6d,0x61,0x70,0x72 ,0x6f,0x66,0x65,0x73,0x69,0x6f,0x6e,0x61 ,0x6c,0x20,0x43,0x49,0x46,0x20,0x41,0x36 ,0x32,0x36,0x33,0x34,0x30,0x36,0x38,0x31 ,0x26,0x30,0x24,0x06,0x09,0x2a,0x86,0x48 ,0x86,0xf7,0x0d,0x01,0x09,0x01,0x16,0x17 ,0x63,0x61,0x40,0x66,0x69,0x72,0x6d,0x61 ,0x70,0x72,0x6f,0x66,0x65,0x73,0x69,0x6f ,0x6e,0x61,0x6c,0x2e,0x63,0x6f,0x6d,0x30 ,0x82,0x01,0x22,0x30,0x0d,0x06,0x09,0x2a ,0x86,0x48,0x86,0xf7,0x0d,0x01,0x01,0x01 ,0x05,0x00,0x03,0x82,0x01,0x0f,0x00,0x30 ,0x82,0x01,0x0a,0x02,0x82,0x01,0x01,0x00 ,0xe7,0x23,0x03,0x6f,0x6f,0x23,0xa5,0x5e ,0x78,0xce,0x95,0x2c,0xed,0x94,0x1e,0x6e ,0x0a,0x9e,0x01,0xc7,0xea,0x30,0xd1,0x2c ,0x9d,0xdd,0x37,0xe8,0x9b,0x98,0x79,0x56 ,0xd3,0xfc,0x73,0xdf,0xd0,0x8a,0xde,0x55 ,0x8f,0x51,0xf9,0x5a,0xea,0xde,0xb5,0x70 ,0xc4,0xed,0xa4,0xed,0xff,0xa3,0x0d,0x6e ,0x0f,0x64,0x50,0x31,0xaf,0x01,0x27,0x58 ,0xae,0xfe,0x6c,0xa7,0x4a,0x2f,0x17,0x2d ,0xd3,0x73,0xd5,0x13,0x1c,0x8f,0x59,0xa5 ,0x34,0x2c,0x1d,0x54,0x04,0x45,0xcd,0x68 ,0xb8,0xa0,0xc0,0x03,0xa5,0xcf,0x85,0x42 ,0x47,0x95,0x28,0x5b,0xcf,0xef,0x80,0x6c ,0xe0,0x90,0x97,0x8a,0x01,0x3c,0x1d,0xf3 ,0x87,0x10,0x30,0x26,0x48,0x7d,0xd7,0xfc ,0xe9,0x9d,0x91,0x71,0xff,0x41,0x9a,0xa9 ,0x40,0xb5,0x37,0x9c,0x29,0x20,0x4f,0x1f ,0x52,0xe3,0xa0,0x7d,0x13,0x6d,0x54,0xb7 ,0x0a,0xde,0xe9,0x6a,0x4e,0x07,0xac,0xac ,0x19,0x5f,0xdc,0x7e,0x62,0x74,0xf6,0xb2 ,0x05,0x00,0xba,0x85,0xa0,0xfd,0x1d,0x38 ,0x6e,0xcb,0x5a,0xbb,0x86,0xbc,0x94,0x67 ,0x33,0x35,0x83,0x2c,0x1f,0x23,0xcd,0xf8 ,0xc8,0x91,0x71,0xcc,0x97,0x8b,0xef,0xae ,0x0f,0xdc,0x29,0x03,0x1b,0xc0,0x39,0xeb ,0x70,0xed,0xc1,0x6e,0x0e,0xd8,0x67,0x0b ,0x89,0xa9,0xbc,0x35,0xe4,0xef,0xb6,0x34 ,0xb4,0xa5,0xb6,0xc4,0x2d,0xa5,0xbe,0xd0 ,0xc3,0x94,0x24,0x48,0xdb,0xdf,0x96,0xd3 ,0x00,0xb5,0x66,0x1a,0x8b,0x66,0x05,0x0f ,0xdd,0x3f,0x3f,0xcb,0x3f,0xaa,0x5e,0x9a ,0x4a,0xf8,0xb4,0x4a,0xef,0x95,0x37,0x1b ,0x02,0x03,0x01,0x00,0x01,0xa3,0x81,0x9f ,0x30,0x81,0x9c,0x30,0x2a,0x06,0x03,0x55 ,0x1d,0x11,0x04,0x23,0x30,0x21,0x86,0x1f ,0x68,0x74,0x74,0x70,0x3a,0x2f,0x2f,0x77 ,0x77,0x77,0x2e,0x66,0x69,0x72,0x6d,0x61 ,0x70,0x72,0x6f,0x66,0x65,0x73,0x69,0x6f ,0x6e,0x61,0x6c,0x2e,0x63,0x6f,0x6d,0x30 ,0x12,0x06,0x03,0x55,0x1d,0x13,0x01,0x01 ,0xff,0x04,0x08,0x30,0x06,0x01,0x01,0xff ,0x02,0x01,0x01,0x30,0x2b,0x06,0x03,0x55 ,0x1d,0x10,0x04,0x24,0x30,0x22,0x80,0x0f ,0x32,0x30,0x30,0x31,0x31,0x30,0x32,0x34 ,0x32,0x32,0x30,0x30,0x30,0x30,0x5a,0x81 ,0x0f,0x32,0x30,0x31,0x33,0x31,0x30,0x32 ,0x34,0x32,0x32,0x30,0x30,0x30,0x30,0x5a ,0x30,0x0e,0x06,0x03,0x55,0x1d,0x0f,0x01 ,0x01,0xff,0x04,0x04,0x03,0x02,0x01,0x06 ,0x30,0x1d,0x06,0x03,0x55,0x1d,0x0e,0x04 ,0x16,0x04,0x14,0x33,0x0b,0xa0,0x66,0xd1 ,0xea,0xda,0xce,0xde,0x62,0x93,0x04,0x28 ,0x52,0xb5,0x14,0x7f,0x38,0x68,0xb7,0x30 ,0x0d,0x06,0x09,0x2a,0x86,0x48,0x86,0xf7 ,0x0d,0x01,0x01,0x05,0x05,0x00,0x03,0x82 ,0x01,0x01,0x00,0x47,0x73,0xfe,0x8d,0x27 ,0x54,0xf0,0xf5,0xd4,0x77,0x9c,0x27,0x79 ,0x57,0x57,0xb7,0x15,0x56,0xec,0xc7,0xd8 ,0x58,0xb7,0x01,0x02,0xf4,0x33,0xed,0x93 ,0x50,0x88,0x9e,0x7c,0x46,0xb1,0xbd,0x3f ,0x14,0x6f,0xf1,0xb3,0x47,0x48,0x8b,0x8c ,0x97,0x06,0xd7,0xea,0x7e,0xa3,0x5c,0x2a ,0xbb,0x4d,0x2f,0x47,0xe2,0xf8,0x39,0x06 ,0xc9,0x9c,0x2e,0x31,0x1a,0x03,0x78,0xf4 ,0xbc,0x38,0xc6,0x22,0x8b,0x33,0x31,0xf0 ,0x16,0x04,0x04,0x7d,0xf9,0x76,0xe4,0x4b ,0xd7,0xc0,0xe6,0x83,0xec,0x59,0xcc,0x3f ,0xde,0xff,0x4f,0x6b,0xb7,0x67,0x7e,0xa6 ,0x86,0x81,0x32,0x23,0x03,0x9d,0xc8,0xf7 ,0x5f,0xc1,0x4a,0x60,0xa5,0x92,0xa9,0xb1 ,0xa4,0xa0,0x60,0xc3,0x78,0x87,0xb3,0x22 ,0xf3,0x2a,0xeb,0x5b,0xa9,0xed,0x05,0xab ,0x37,0x0f,0xb1,0xe2,0xd3,0x95,0x76,0x63 ,0x56,0x74,0x8c,0x58,0x72,0x1b,0x37,0xe5 ,0x64,0xa1,0xbe,0x4d,0x0c,0x93,0x98,0x0c ,0x97,0xf6,0x87,0x6d,0xb3,0x3f,0xe7,0xcb ,0x80,0xa6,0xed,0x88,0xc7,0x5f,0x50,0x62 ,0x02,0xe8,0x99,0x74,0x16,0xd0,0xe6,0xb4 ,0x39,0xf1,0x27,0xcb,0xc8,0x40,0xd6,0xe3 ,0x86,0x10,0xa9,0x23,0x12,0x92,0xe0,0x69 ,0x41,0x63,0xa7,0xaf,0x25,0x0b,0xc0,0xc5 ,0x92,0xcb,0x1e,0x98,0xa3,0x5a,0xba,0xc5 ,0x33,0x0f,0xa0,0x97,0x01,0xdd,0x7f,0xe0 ,0x7b,0xd6,0x06,0x54,0xcf,0xa1,0xe2,0x4d ,0x38,0xeb,0x4b,0x50,0xb5,0xcb,0x26,0xf4 ,0xca,0xda,0x70,0x4a,0x6a,0xa1,0xe2,0x79 ,0xaa,0xe1,0xa7,0x33,0xf6,0xfd,0x4a,0x1f ,0xf6,0xd9,0x60}; const unsigned int NptTlsTrustAnchor_Base_0099_Size = 1115;
atupone/xbmc
lib/libUPnP/Neptune/Source/Data/TLS/Base/NptTlsTrustAnchor_Base_0099.cpp
C++
gpl-2.0
7,650
/* * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device. * * This is a new flat driver which is based on the original emac_lite * driver from John Williams <john.williams@xilinx.com>. * * 2007 - 2013 (c) Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/uaccess.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/interrupt.h> #define DRIVER_NAME "xilinx_emaclite" /* Register offsets for the EmacLite Core */ #define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */ #define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */ #define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */ #define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */ #define XEL_MDIOCTRL_OFFSET 0x07F0 /* MDIO Control Register */ #define XEL_GIER_OFFSET 0x07F8 /* GIE Register */ #define XEL_TSR_OFFSET 0x07FC /* Tx status */ #define XEL_TPLR_OFFSET 0x07F4 /* Tx packet length */ #define XEL_RXBUFF_OFFSET 0x1000 /* Receive Buffer */ #define XEL_RPLR_OFFSET 0x100C /* Rx packet length */ #define XEL_RSR_OFFSET 0x17FC /* Rx status */ #define XEL_BUFFER_OFFSET 0x0800 /* Next Tx/Rx buffer's offset */ /* MDIO Address Register Bit Masks */ #define XEL_MDIOADDR_REGADR_MASK 0x0000001F /* Register Address */ #define XEL_MDIOADDR_PHYADR_MASK 0x000003E0 /* PHY Address */ #define XEL_MDIOADDR_PHYADR_SHIFT 5 #define XEL_MDIOADDR_OP_MASK 0x00000400 /* RD/WR Operation */ /* MDIO Write Data Register Bit Masks */ #define XEL_MDIOWR_WRDATA_MASK 0x0000FFFF /* Data to be Written */ /* MDIO Read Data Register Bit Masks */ #define XEL_MDIORD_RDDATA_MASK 0x0000FFFF /* Data to be Read */ /* MDIO Control Register Bit Masks */ #define XEL_MDIOCTRL_MDIOSTS_MASK 0x00000001 /* MDIO Status Mask */ #define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */ /* Global Interrupt Enable Register (GIER) Bit Masks */ #define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */ /* Transmit Status Register (TSR) Bit Masks */ #define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */ #define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */ #define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */ #define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit * only. This is not documented * in the HW spec */ /* Define for programming the MAC address into the EmacLite */ #define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK) /* Receive Status Register (RSR) */ #define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */ #define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */ /* Transmit Packet Length Register (TPLR) */ #define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */ /* Receive Packet Length Register (RPLR) */ #define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */ #define XEL_HEADER_OFFSET 12 /* Offset to length field */ #define XEL_HEADER_SHIFT 16 /* Shift value for length */ /* General Ethernet Definitions */ #define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */ #define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */ #define TX_TIMEOUT (60*HZ) /* Tx timeout is 60 seconds. */ #define ALIGNMENT 4 /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ #define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT) /** * struct net_local - Our private per device data * @ndev: instance of the network device * @tx_ping_pong: indicates whether Tx Pong buffer is configured in HW * @rx_ping_pong: indicates whether Rx Pong buffer is configured in HW * @next_tx_buf_to_use: next Tx buffer to write to * @next_rx_buf_to_use: next Rx buffer to read from * @base_addr: base address of the Emaclite device * @reset_lock: lock used for synchronization * @deferred_skb: holds an skb (for transmission at a later time) when the * Tx buffer is not free * @phy_dev: pointer to the PHY device * @phy_node: pointer to the PHY device node * @mii_bus: pointer to the MII bus * @mdio_irqs: IRQs table for MDIO bus * @last_link: last link status * @has_mdio: indicates whether MDIO is included in the HW */ struct net_local { struct net_device *ndev; bool tx_ping_pong; bool rx_ping_pong; u32 next_tx_buf_to_use; u32 next_rx_buf_to_use; void __iomem *base_addr; spinlock_t reset_lock; struct sk_buff *deferred_skb; struct phy_device *phy_dev; struct device_node *phy_node; struct mii_bus *mii_bus; int mdio_irqs[PHY_MAX_ADDR]; int last_link; bool has_mdio; }; /*************************/ /* EmacLite driver calls */ /*************************/ /** * xemaclite_enable_interrupts - Enable the interrupts for the EmacLite device * @drvdata: Pointer to the Emaclite device private data * * This function enables the Tx and Rx interrupts for the Emaclite device along * with the Global Interrupt Enable. */ static void xemaclite_enable_interrupts(struct net_local *drvdata) { u32 reg_data; /* Enable the Tx interrupts for the first Buffer */ reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, drvdata->base_addr + XEL_TSR_OFFSET); /* Enable the Rx interrupts for the first buffer */ __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); /* Enable the Global Interrupt Enable */ __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); } /** * xemaclite_disable_interrupts - Disable the interrupts for the EmacLite device * @drvdata: Pointer to the Emaclite device private data * * This function disables the Tx and Rx interrupts for the Emaclite device, * along with the Global Interrupt Enable. */ static void xemaclite_disable_interrupts(struct net_local *drvdata) { u32 reg_data; /* Disable the Global Interrupt Enable */ __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); /* Disable the Tx interrupts for the first buffer */ reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), drvdata->base_addr + XEL_TSR_OFFSET); /* Disable the Rx interrupts for the first buffer */ reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), drvdata->base_addr + XEL_RSR_OFFSET); } /** * xemaclite_aligned_write - Write from 16-bit aligned to 32-bit aligned address * @src_ptr: Void pointer to the 16-bit aligned source address * @dest_ptr: Pointer to the 32-bit aligned destination address * @length: Number bytes to write from source to destination * * This function writes data from a 16-bit aligned buffer to a 32-bit aligned * address in the EmacLite device. */ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, unsigned length) { u32 align_buffer; u32 *to_u32_ptr; u16 *from_u16_ptr, *to_u16_ptr; to_u32_ptr = dest_ptr; from_u16_ptr = src_ptr; align_buffer = 0; for (; length > 3; length -= 4) { to_u16_ptr = (u16 *)&align_buffer; *to_u16_ptr++ = *from_u16_ptr++; *to_u16_ptr++ = *from_u16_ptr++; /* This barrier resolves occasional issues seen around * cases where the data is not properly flushed out * from the processor store buffers to the destination * memory locations. */ wmb(); /* Output a word */ *to_u32_ptr++ = align_buffer; } if (length) { u8 *from_u8_ptr, *to_u8_ptr; /* Set up to output the remaining data */ align_buffer = 0; to_u8_ptr = (u8 *) &align_buffer; from_u8_ptr = (u8 *) from_u16_ptr; /* Output the remaining data */ for (; length > 0; length--) *to_u8_ptr++ = *from_u8_ptr++; /* This barrier resolves occasional issues seen around * cases where the data is not properly flushed out * from the processor store buffers to the destination * memory locations. */ wmb(); *to_u32_ptr = align_buffer; } } /** * xemaclite_aligned_read - Read from 32-bit aligned to 16-bit aligned buffer * @src_ptr: Pointer to the 32-bit aligned source address * @dest_ptr: Pointer to the 16-bit aligned destination address * @length: Number bytes to read from source to destination * * This function reads data from a 32-bit aligned address in the EmacLite device * to a 16-bit aligned buffer. */ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr, unsigned length) { u16 *to_u16_ptr, *from_u16_ptr; u32 *from_u32_ptr; u32 align_buffer; from_u32_ptr = src_ptr; to_u16_ptr = (u16 *) dest_ptr; for (; length > 3; length -= 4) { /* Copy each word into the temporary buffer */ align_buffer = *from_u32_ptr++; from_u16_ptr = (u16 *)&align_buffer; /* Read data from source */ *to_u16_ptr++ = *from_u16_ptr++; *to_u16_ptr++ = *from_u16_ptr++; } if (length) { u8 *to_u8_ptr, *from_u8_ptr; /* Set up to read the remaining data */ to_u8_ptr = (u8 *) to_u16_ptr; align_buffer = *from_u32_ptr++; from_u8_ptr = (u8 *) &align_buffer; /* Read the remaining data */ for (; length > 0; length--) *to_u8_ptr = *from_u8_ptr; } } /** * xemaclite_send_data - Send an Ethernet frame * @drvdata: Pointer to the Emaclite device private data * @data: Pointer to the data to be sent * @byte_count: Total frame size, including header * * This function checks if the Tx buffer of the Emaclite device is free to send * data. If so, it fills the Tx buffer with data for transmission. Otherwise, it * returns an error. * * Return: 0 upon success or -1 if the buffer(s) are full. * * Note: The maximum Tx packet size can not be more than Ethernet header * (14 Bytes) + Maximum MTU (1500 bytes). This is excluding FCS. */ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, unsigned int byte_count) { u32 reg_data; void __iomem *addr; /* Determine the expected Tx buffer address */ addr = drvdata->base_addr + drvdata->next_tx_buf_to_use; /* If the length is too large, truncate it */ if (byte_count > ETH_FRAME_LEN) byte_count = ETH_FRAME_LEN; /* Check if the expected buffer is available */ reg_data = __raw_readl(addr + XEL_TSR_OFFSET); if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { /* Switch to next buffer if configured */ if (drvdata->tx_ping_pong != 0) drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET; } else if (drvdata->tx_ping_pong != 0) { /* If the expected buffer is full, try the other buffer, * if it is configured in HW */ addr = (void __iomem __force *)((u32 __force)addr ^ XEL_BUFFER_OFFSET); reg_data = __raw_readl(addr + XEL_TSR_OFFSET); if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) != 0) return -1; /* Buffers were full, return failure */ } else return -1; /* Buffer was full, return failure */ /* Write the frame to the buffer */ xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK), addr + XEL_TPLR_OFFSET); /* Update the Tx Status Register to indicate that there is a * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which * is used by the interrupt handler to check whether a frame * has been transmitted */ reg_data = __raw_readl(addr + XEL_TSR_OFFSET); reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); __raw_writel(reg_data, addr + XEL_TSR_OFFSET); return 0; } /** * xemaclite_recv_data - Receive a frame * @drvdata: Pointer to the Emaclite device private data * @data: Address where the data is to be received * * This function is intended to be called from the interrupt context or * with a wrapper which waits for the receive frame to be available. * * Return: Total number of bytes received */ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) { void __iomem *addr; u16 length, proto_type; u32 reg_data; /* Determine the expected buffer address */ addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use); /* Verify which buffer has valid data */ reg_data = __raw_readl(addr + XEL_RSR_OFFSET); if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) { if (drvdata->rx_ping_pong != 0) drvdata->next_rx_buf_to_use ^= XEL_BUFFER_OFFSET; } else { /* The instance is out of sync, try other buffer if other * buffer is configured, return 0 otherwise. If the instance is * out of sync, do not update the 'next_rx_buf_to_use' since it * will correct on subsequent calls */ if (drvdata->rx_ping_pong != 0) addr = (void __iomem __force *)((u32 __force)addr ^ XEL_BUFFER_OFFSET); else return 0; /* No data was available */ /* Verify that buffer has valid data */ reg_data = __raw_readl(addr + XEL_RSR_OFFSET); if ((reg_data & XEL_RSR_RECV_DONE_MASK) != XEL_RSR_RECV_DONE_MASK) return 0; /* No data was available */ } /* Get the protocol type of the ethernet frame that arrived */ proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & XEL_RPLR_LENGTH_MASK); /* Check if received ethernet frame is a raw ethernet frame * or an IP packet or an ARP packet */ if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { if (proto_type == ETH_P_IP) { length = ((ntohl(__raw_readl(addr + XEL_HEADER_IP_LENGTH_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & XEL_RPLR_LENGTH_MASK); length += ETH_HLEN + ETH_FCS_LEN; } else if (proto_type == ETH_P_ARP) length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN; else /* Field contains type other than IP or ARP, use max * frame size and let user parse it */ length = ETH_FRAME_LEN + ETH_FCS_LEN; } else /* Use the length in the frame, plus the header and trailer */ length = proto_type + ETH_HLEN + ETH_FCS_LEN; /* Read from the EmacLite device */ xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET), data, length); /* Acknowledge the frame */ reg_data = __raw_readl(addr + XEL_RSR_OFFSET); reg_data &= ~XEL_RSR_RECV_DONE_MASK; __raw_writel(reg_data, addr + XEL_RSR_OFFSET); return length; } /** * xemaclite_update_address - Update the MAC address in the device * @drvdata: Pointer to the Emaclite device private data * @address_ptr:Pointer to the MAC address (MAC address is a 48-bit value) * * Tx must be idle and Rx should be idle for deterministic results. * It is recommended that this function should be called after the * initialization and before transmission of any packets from the device. * The MAC address can be programmed using any of the two transmit * buffers (if configured). */ static void xemaclite_update_address(struct net_local *drvdata, u8 *address_ptr) { void __iomem *addr; u32 reg_data; /* Determine the expected Tx buffer address */ addr = drvdata->base_addr + drvdata->next_tx_buf_to_use; xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); /* Update the MAC address in the EmacLite */ reg_data = __raw_readl(addr + XEL_TSR_OFFSET); __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); /* Wait for EmacLite to finish with the MAC address update */ while ((__raw_readl(addr + XEL_TSR_OFFSET) & XEL_TSR_PROG_MAC_ADDR) != 0) ; } /** * xemaclite_set_mac_address - Set the MAC address for this device * @dev: Pointer to the network device instance * @addr: Void pointer to the sockaddr structure * * This function copies the HW address from the sockaddr strucutre to the * net_device structure and updates the address in HW. * * Return: Error if the net device is busy or 0 if the addr is set * successfully */ static int xemaclite_set_mac_address(struct net_device *dev, void *address) { struct net_local *lp = netdev_priv(dev); struct sockaddr *addr = address; if (netif_running(dev)) return -EBUSY; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); xemaclite_update_address(lp, dev->dev_addr); return 0; } /** * xemaclite_tx_timeout - Callback for Tx Timeout * @dev: Pointer to the network device * * This function is called when Tx time out occurs for Emaclite device. */ static void xemaclite_tx_timeout(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); unsigned long flags; dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n", TX_TIMEOUT * 1000UL / HZ); dev->stats.tx_errors++; /* Reset the device */ spin_lock_irqsave(&lp->reset_lock, flags); /* Shouldn't really be necessary, but shouldn't hurt */ netif_stop_queue(dev); xemaclite_disable_interrupts(lp); xemaclite_enable_interrupts(lp); if (lp->deferred_skb) { dev_kfree_skb(lp->deferred_skb); lp->deferred_skb = NULL; dev->stats.tx_errors++; } /* To exclude tx timeout */ dev->trans_start = jiffies; /* prevent tx timeout */ /* We're all ready to go. Start the queue */ netif_wake_queue(dev); spin_unlock_irqrestore(&lp->reset_lock, flags); } /**********************/ /* Interrupt Handlers */ /**********************/ /** * xemaclite_tx_handler - Interrupt handler for frames sent * @dev: Pointer to the network device * * This function updates the number of packets transmitted and handles the * deferred skb, if there is one. */ static void xemaclite_tx_handler(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); dev->stats.tx_packets++; if (lp->deferred_skb) { if (xemaclite_send_data(lp, (u8 *) lp->deferred_skb->data, lp->deferred_skb->len) != 0) return; else { dev->stats.tx_bytes += lp->deferred_skb->len; dev_kfree_skb_irq(lp->deferred_skb); lp->deferred_skb = NULL; dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); } } } /** * xemaclite_rx_handler- Interrupt handler for frames received * @dev: Pointer to the network device * * This function allocates memory for a socket buffer, fills it with data * received and hands it over to the TCP/IP stack. */ static void xemaclite_rx_handler(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); struct sk_buff *skb; unsigned int align; u32 len; len = ETH_FRAME_LEN + ETH_FCS_LEN; skb = netdev_alloc_skb(dev, len + ALIGNMENT); if (!skb) { /* Couldn't get memory. */ dev->stats.rx_dropped++; dev_err(&lp->ndev->dev, "Could not allocate receive buffer\n"); return; } /* * A new skb should have the data halfword aligned, but this code is * here just in case that isn't true. Calculate how many * bytes we should reserve to get the data to start on a word * boundary */ align = BUFFER_ALIGN(skb->data); if (align) skb_reserve(skb, align); skb_reserve(skb, 2); len = xemaclite_recv_data(lp, (u8 *) skb->data); if (!len) { dev->stats.rx_errors++; dev_kfree_skb_irq(skb); return; } skb_put(skb, len); /* Tell the skb how much data we got */ skb->protocol = eth_type_trans(skb, dev); skb_checksum_none_assert(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; if (!skb_defer_rx_timestamp(skb)) netif_rx(skb); /* Send the packet upstream */ } /** * xemaclite_interrupt - Interrupt handler for this driver * @irq: Irq of the Emaclite device * @dev_id: Void pointer to the network device instance used as callback * reference * * This function handles the Tx and Rx interrupts of the EmacLite device. */ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) { bool tx_complete = false; struct net_device *dev = dev_id; struct net_local *lp = netdev_priv(dev); void __iomem *base_addr = lp->base_addr; u32 tx_status; /* Check if there is Rx Data available */ if ((__raw_readl(base_addr + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK) || (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK)) xemaclite_rx_handler(dev); /* Check if the Transmission for the first buffer is completed */ tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET); tx_complete = true; } /* Check if the Transmission for the second buffer is completed */ tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); tx_complete = true; } /* If there was a Tx interrupt, call the Tx Handler */ if (tx_complete != 0) xemaclite_tx_handler(dev); return IRQ_HANDLED; } /**********************/ /* MDIO Bus functions */ /**********************/ /** * xemaclite_mdio_wait - Wait for the MDIO to be ready to use * @lp: Pointer to the Emaclite device private data * * This function waits till the device is ready to accept a new MDIO * request. * * Return: 0 for success or ETIMEDOUT for a timeout */ static int xemaclite_mdio_wait(struct net_local *lp) { unsigned long end = jiffies + 2; /* wait for the MDIO interface to not be busy or timeout after some time. */ while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & XEL_MDIOCTRL_MDIOSTS_MASK) { if (time_before_eq(end, jiffies)) { WARN_ON(1); return -ETIMEDOUT; } msleep(1); } return 0; } /** * xemaclite_mdio_read - Read from a given MII management register * @bus: the mii_bus struct * @phy_id: the phy address * @reg: register number to read from * * This function waits till the device is ready to accept a new MDIO * request and then writes the phy address to the MDIO Address register * and reads data from MDIO Read Data register, when its available. * * Return: Value read from the MII management register */ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) { struct net_local *lp = bus->priv; u32 ctrl_reg; u32 rc; if (xemaclite_mdio_wait(lp)) return -ETIMEDOUT; /* Write the PHY address, register number and set the OP bit in the * MDIO Address register. Set the Status bit in the MDIO Control * register to start a MDIO read transaction. */ ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); __raw_writel(XEL_MDIOADDR_OP_MASK | ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), lp->base_addr + XEL_MDIOADDR_OFFSET); __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, lp->base_addr + XEL_MDIOCTRL_OFFSET); if (xemaclite_mdio_wait(lp)) return -ETIMEDOUT; rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET); dev_dbg(&lp->ndev->dev, "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", phy_id, reg, rc); return rc; } /** * xemaclite_mdio_write - Write to a given MII management register * @bus: the mii_bus struct * @phy_id: the phy address * @reg: register number to write to * @val: value to write to the register number specified by reg * * This function waits till the device is ready to accept a new MDIO * request and then writes the val to the MDIO Write Data register. */ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) { struct net_local *lp = bus->priv; u32 ctrl_reg; dev_dbg(&lp->ndev->dev, "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n", phy_id, reg, val); if (xemaclite_mdio_wait(lp)) return -ETIMEDOUT; /* Write the PHY address, register number and clear the OP bit in the * MDIO Address register and then write the value into the MDIO Write * Data register. Finally, set the Status bit in the MDIO Control * register to start a MDIO write transaction. */ ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); __raw_writel(~XEL_MDIOADDR_OP_MASK & ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), lp->base_addr + XEL_MDIOADDR_OFFSET); __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, lp->base_addr + XEL_MDIOCTRL_OFFSET); return 0; } /** * xemaclite_mdio_setup - Register mii_bus for the Emaclite device * @lp: Pointer to the Emaclite device private data * @ofdev: Pointer to OF device structure * * This function enables MDIO bus in the Emaclite device and registers a * mii_bus. * * Return: 0 upon success or a negative error upon failure */ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) { struct mii_bus *bus; int rc; struct resource res; struct device_node *np = of_get_parent(lp->phy_node); struct device_node *npp; /* Don't register the MDIO bus if the phy_node or its parent node * can't be found. */ if (!np) { dev_err(dev, "Failed to register mdio bus.\n"); return -ENODEV; } npp = of_get_parent(np); of_address_to_resource(npp, 0, &res); if (lp->ndev->mem_start != res.start) { struct phy_device *phydev; phydev = of_phy_find_device(lp->phy_node); if (!phydev) dev_info(dev, "MDIO of the phy is not registered yet\n"); return 0; } /* Enable the MDIO bus by asserting the enable bit in MDIO Control * register. */ __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK, lp->base_addr + XEL_MDIOCTRL_OFFSET); bus = mdiobus_alloc(); if (!bus) { dev_err(dev, "Failed to allocate mdiobus\n"); return -ENOMEM; } snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", (unsigned long long)res.start); bus->priv = lp; bus->name = "Xilinx Emaclite MDIO"; bus->read = xemaclite_mdio_read; bus->write = xemaclite_mdio_write; bus->parent = dev; bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ lp->mii_bus = bus; rc = of_mdiobus_register(bus, np); if (rc) { dev_err(dev, "Failed to register mdio bus.\n"); goto err_register; } return 0; err_register: mdiobus_free(bus); return rc; } /** * xemaclite_adjust_link - Link state callback for the Emaclite device * @ndev: pointer to net_device struct * * There's nothing in the Emaclite device to be configured when the link * state changes. We just print the status. */ static void xemaclite_adjust_link(struct net_device *ndev) { struct net_local *lp = netdev_priv(ndev); struct phy_device *phy = lp->phy_dev; int link_state; /* hash together the state values to decide if something has changed */ link_state = phy->speed | (phy->duplex << 1) | phy->link; if (lp->last_link != link_state) { lp->last_link = link_state; phy_print_status(phy); } } /** * xemaclite_open - Open the network device * @dev: Pointer to the network device * * This function sets the MAC address, requests an IRQ and enables interrupts * for the Emaclite device and starts the Tx queue. * It also connects to the phy device, if MDIO is included in Emaclite device. */ static int xemaclite_open(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); int retval; /* Just to be safe, stop the device first */ xemaclite_disable_interrupts(lp); if (lp->phy_node) { u32 bmcr; lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, xemaclite_adjust_link, 0, PHY_INTERFACE_MODE_MII); if (!lp->phy_dev) { dev_err(&lp->ndev->dev, "of_phy_connect() failed\n"); return -ENODEV; } /* EmacLite doesn't support giga-bit speeds */ lp->phy_dev->supported &= (PHY_BASIC_FEATURES); lp->phy_dev->advertising = lp->phy_dev->supported; /* Don't advertise 1000BASE-T Full/Half duplex speeds */ phy_write(lp->phy_dev, MII_CTRL1000, 0); /* Advertise only 10 and 100mbps full/half duplex speeds */ phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_CSMA); /* Restart auto negotiation */ bmcr = phy_read(lp->phy_dev, MII_BMCR); bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); phy_write(lp->phy_dev, MII_BMCR, bmcr); phy_start(lp->phy_dev); } /* Set the MAC address each time opened */ xemaclite_update_address(lp, dev->dev_addr); /* Grab the IRQ */ retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev); if (retval) { dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n", dev->irq); if (lp->phy_dev) phy_disconnect(lp->phy_dev); lp->phy_dev = NULL; return retval; } /* Enable Interrupts */ xemaclite_enable_interrupts(lp); /* We're ready to go */ netif_start_queue(dev); return 0; } /** * xemaclite_close - Close the network device * @dev: Pointer to the network device * * This function stops the Tx queue, disables interrupts and frees the IRQ for * the Emaclite device. * It also disconnects the phy device associated with the Emaclite device. */ static int xemaclite_close(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); netif_stop_queue(dev); xemaclite_disable_interrupts(lp); free_irq(dev->irq, dev); if (lp->phy_dev) phy_disconnect(lp->phy_dev); lp->phy_dev = NULL; return 0; } /** * xemaclite_send - Transmit a frame * @orig_skb: Pointer to the socket buffer to be transmitted * @dev: Pointer to the network device * * This function checks if the Tx buffer of the Emaclite device is free to send * data. If so, it fills the Tx buffer with data from socket buffer data, * updates the stats and frees the socket buffer. The Tx completion is signaled * by an interrupt. If the Tx buffer isn't free, then the socket buffer is * deferred and the Tx queue is stopped so that the deferred socket buffer can * be transmitted when the Emaclite device is free to transmit data. * * Return: 0, always. */ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) { struct net_local *lp = netdev_priv(dev); struct sk_buff *new_skb; unsigned int len; unsigned long flags; len = orig_skb->len; new_skb = orig_skb; spin_lock_irqsave(&lp->reset_lock, flags); if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) { /* If the Emaclite Tx buffer is busy, stop the Tx queue and * defer the skb for transmission during the ISR, after the * current transmission is complete */ netif_stop_queue(dev); lp->deferred_skb = new_skb; /* Take the time stamp now, since we can't do this in an ISR. */ skb_tx_timestamp(new_skb); spin_unlock_irqrestore(&lp->reset_lock, flags); return 0; } spin_unlock_irqrestore(&lp->reset_lock, flags); skb_tx_timestamp(new_skb); dev->stats.tx_bytes += len; dev_consume_skb_any(new_skb); return 0; } /** * xemaclite_remove_ndev - Free the network device * @ndev: Pointer to the network device to be freed * * This function un maps the IO region of the Emaclite device and frees the net * device. */ static void xemaclite_remove_ndev(struct net_device *ndev) { if (ndev) { free_netdev(ndev); } } /** * get_bool - Get a parameter from the OF device * @ofdev: Pointer to OF device structure * @s: Property to be retrieved * * This function looks for a property in the device node and returns the value * of the property if its found or 0 if the property is not found. * * Return: Value of the parameter if the parameter is found, or 0 otherwise */ static bool get_bool(struct platform_device *ofdev, const char *s) { u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL); if (p) { return (bool)*p; } else { dev_warn(&ofdev->dev, "Parameter %s not found," "defaulting to false\n", s); return false; } } static struct net_device_ops xemaclite_netdev_ops; /** * xemaclite_of_probe - Probe method for the Emaclite device. * @ofdev: Pointer to OF device structure * @match: Pointer to the structure used for matching a device * * This function probes for the Emaclite device in the device tree. * It initializes the driver data structure and the hardware, sets the MAC * address and registers the network device. * It also registers a mii_bus for the Emaclite device, if MDIO is included * in the device. * * Return: 0, if the driver is bound to the Emaclite device, or * a negative error if there is failure. */ static int xemaclite_of_probe(struct platform_device *ofdev) { struct resource *res; struct net_device *ndev = NULL; struct net_local *lp = NULL; struct device *dev = &ofdev->dev; const void *mac_address; int rc = 0; dev_info(dev, "Device Tree Probing\n"); /* Create an ethernet device instance */ ndev = alloc_etherdev(sizeof(struct net_local)); if (!ndev) return -ENOMEM; dev_set_drvdata(dev, ndev); SET_NETDEV_DEV(ndev, &ofdev->dev); lp = netdev_priv(ndev); lp->ndev = ndev; /* Get IRQ for the device */ res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(dev, "no IRQ found\n"); rc = -ENXIO; goto error; } ndev->irq = res->start; res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); lp->base_addr = devm_ioremap_resource(&ofdev->dev, res); if (IS_ERR(lp->base_addr)) { rc = PTR_ERR(lp->base_addr); goto error; } ndev->mem_start = res->start; ndev->mem_end = res->end; spin_lock_init(&lp->reset_lock); lp->next_tx_buf_to_use = 0x0; lp->next_rx_buf_to_use = 0x0; lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong"); lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); mac_address = of_get_mac_address(ofdev->dev.of_node); if (mac_address) /* Set the MAC address. */ memcpy(ndev->dev_addr, mac_address, ETH_ALEN); else dev_warn(dev, "No MAC address found\n"); /* Clear the Tx CSR's in case this is a restart */ __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); /* Set the MAC address in the EmacLite device */ xemaclite_update_address(lp, ndev->dev_addr); lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); rc = xemaclite_mdio_setup(lp, &ofdev->dev); if (rc) dev_warn(&ofdev->dev, "error registering MDIO bus\n"); dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr); ndev->netdev_ops = &xemaclite_netdev_ops; ndev->flags &= ~IFF_MULTICAST; ndev->watchdog_timeo = TX_TIMEOUT; /* Finally, register the device */ rc = register_netdev(ndev); if (rc) { dev_err(dev, "Cannot register network device, aborting\n"); goto error; } dev_info(dev, "Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n", (unsigned int __force)ndev->mem_start, (unsigned int __force)lp->base_addr, ndev->irq); return 0; error: xemaclite_remove_ndev(ndev); return rc; } /** * xemaclite_of_remove - Unbind the driver from the Emaclite device. * @of_dev: Pointer to OF device structure * * This function is called if a device is physically removed from the system or * if the driver module is being unloaded. It frees any resources allocated to * the device. * * Return: 0, always. */ static int xemaclite_of_remove(struct platform_device *of_dev) { struct net_device *ndev = platform_get_drvdata(of_dev); struct net_local *lp = netdev_priv(ndev); /* Un-register the mii_bus, if configured */ if (lp->has_mdio) { mdiobus_unregister(lp->mii_bus); kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); lp->mii_bus = NULL; } unregister_netdev(ndev); of_node_put(lp->phy_node); lp->phy_node = NULL; xemaclite_remove_ndev(ndev); return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void xemaclite_poll_controller(struct net_device *ndev) { disable_irq(ndev->irq); xemaclite_interrupt(ndev->irq, ndev); enable_irq(ndev->irq); } #endif static struct net_device_ops xemaclite_netdev_ops = { .ndo_open = xemaclite_open, .ndo_stop = xemaclite_close, .ndo_start_xmit = xemaclite_send, .ndo_set_mac_address = xemaclite_set_mac_address, .ndo_tx_timeout = xemaclite_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xemaclite_poll_controller, #endif }; /* Match table for OF platform binding */ static const struct of_device_id xemaclite_of_match[] = { { .compatible = "xlnx,opb-ethernetlite-1.01.a", }, { .compatible = "xlnx,opb-ethernetlite-1.01.b", }, { .compatible = "xlnx,xps-ethernetlite-1.00.a", }, { .compatible = "xlnx,xps-ethernetlite-2.00.a", }, { .compatible = "xlnx,xps-ethernetlite-2.01.a", }, { .compatible = "xlnx,xps-ethernetlite-3.00.a", }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(of, xemaclite_of_match); static struct platform_driver xemaclite_of_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = xemaclite_of_match, }, .probe = xemaclite_of_probe, .remove = xemaclite_of_remove, }; module_platform_driver(xemaclite_of_driver); MODULE_AUTHOR("Xilinx, Inc."); MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver"); MODULE_LICENSE("GPL");
kbukin1/pnotify-linux-4.1.6
drivers/net/ethernet/xilinx/xilinx_emaclite.c
C
gpl-2.0
36,848
/* linux/arch/arm/mach-s5pv210/pm.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * S5PV210 - Power Management support * * Based on arch/arm/mach-s3c2410/pm.c * Copyright (c) 2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/suspend.h> #include <linux/syscore_ops.h> #include <linux/io.h> #include <plat/cpu.h> #include <plat/pm.h> #include <mach/regs-irq.h> #include <mach/regs-clock.h> static struct sleep_save s5pv210_core_save[] = { /* Clock source */ SAVE_ITEM(S5P_CLK_SRC0), SAVE_ITEM(S5P_CLK_SRC1), SAVE_ITEM(S5P_CLK_SRC2), SAVE_ITEM(S5P_CLK_SRC3), SAVE_ITEM(S5P_CLK_SRC4), SAVE_ITEM(S5P_CLK_SRC5), SAVE_ITEM(S5P_CLK_SRC6), /* Clock source Mask */ SAVE_ITEM(S5P_CLK_SRC_MASK0), SAVE_ITEM(S5P_CLK_SRC_MASK1), /* Clock Divider */ SAVE_ITEM(S5P_CLK_DIV0), SAVE_ITEM(S5P_CLK_DIV1), SAVE_ITEM(S5P_CLK_DIV2), SAVE_ITEM(S5P_CLK_DIV3), SAVE_ITEM(S5P_CLK_DIV4), SAVE_ITEM(S5P_CLK_DIV5), SAVE_ITEM(S5P_CLK_DIV6), SAVE_ITEM(S5P_CLK_DIV7), /* Clock Main Gate */ SAVE_ITEM(S5P_CLKGATE_MAIN0), SAVE_ITEM(S5P_CLKGATE_MAIN1), SAVE_ITEM(S5P_CLKGATE_MAIN2), /* Clock source Peri Gate */ SAVE_ITEM(S5P_CLKGATE_PERI0), SAVE_ITEM(S5P_CLKGATE_PERI1), /* Clock source SCLK Gate */ SAVE_ITEM(S5P_CLKGATE_SCLK0), SAVE_ITEM(S5P_CLKGATE_SCLK1), /* Clock IP Clock gate */ SAVE_ITEM(S5P_CLKGATE_IP0), SAVE_ITEM(S5P_CLKGATE_IP1), SAVE_ITEM(S5P_CLKGATE_IP2), SAVE_ITEM(S5P_CLKGATE_IP3), SAVE_ITEM(S5P_CLKGATE_IP4), /* Clock Blcok and Bus gate */ SAVE_ITEM(S5P_CLKGATE_BLOCK), SAVE_ITEM(S5P_CLKGATE_BUS0), /* Clock ETC */ SAVE_ITEM(S5P_CLK_OUT), SAVE_ITEM(S5P_MDNIE_SEL), }; static int s5pv210_cpu_suspend(unsigned long arg) { unsigned long tmp; /* issue the standby signal into the pm unit. Note, we * issue a write-buffer drain just in case */ tmp = 0; asm("b 1f\n\t" ".align 5\n\t" "1:\n\t" "mcr p15, 0, %0, c7, c10, 5\n\t" "mcr p15, 0, %0, c7, c10, 4\n\t" "wfi" : : "r" (tmp)); pr_info("Failed to suspend the system\n"); return 1; /* Aborting suspend */ } static void s5pv210_pm_prepare(void) { unsigned int tmp; /* ensure at least INFORM0 has the resume address */ __raw_writel(virt_to_phys(s3c_cpu_resume), S5P_INFORM0); tmp = __raw_readl(S5P_SLEEP_CFG); tmp &= ~(S5P_SLEEP_CFG_OSC_EN | S5P_SLEEP_CFG_USBOSC_EN); __raw_writel(tmp, S5P_SLEEP_CFG); /* WFI for SLEEP mode configuration by SYSCON */ tmp = __raw_readl(S5P_PWR_CFG); tmp &= S5P_CFG_WFI_CLEAN; tmp |= S5P_CFG_WFI_SLEEP; __raw_writel(tmp, S5P_PWR_CFG); /* SYSCON interrupt handling disable */ tmp = __raw_readl(S5P_OTHERS); tmp |= S5P_OTHER_SYSC_INTOFF; __raw_writel(tmp, S5P_OTHERS); s3c_pm_do_save(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save)); } static int s5pv210_pm_add(struct device *dev, struct subsys_interface *sif) { pm_cpu_prep = s5pv210_pm_prepare; pm_cpu_sleep = s5pv210_cpu_suspend; return 0; } static struct subsys_interface s5pv210_pm_interface = { .name = "s5pv210_pm", .subsys = &s5pv210_subsys, .add_dev = s5pv210_pm_add, }; static __init int s5pv210_pm_drvinit(void) { return subsys_interface_register(&s5pv210_pm_interface); } arch_initcall(s5pv210_pm_drvinit); static void s5pv210_pm_resume(void) { u32 tmp; tmp = __raw_readl(S5P_OTHERS); tmp |= (S5P_OTHERS_RET_IO | S5P_OTHERS_RET_CF |\ S5P_OTHERS_RET_MMC | S5P_OTHERS_RET_UART); __raw_writel(tmp , S5P_OTHERS); s3c_pm_do_restore_core(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save)); } static struct syscore_ops s5pv210_pm_syscore_ops = { .resume = s5pv210_pm_resume, }; static __init int s5pv210_pm_syscore_init(void) { register_syscore_ops(&s5pv210_pm_syscore_ops); return 0; } arch_initcall(s5pv210_pm_syscore_init);
kyupltd/linux
arch/arm/mach-s5pv210/pm.c
C
gpl-2.0
3,964
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Copyright (C) IBM Corp. 2006 * * Authors: Hollis Blanchard <hollisb@us.ibm.com> */ #include <linux/gfp.h> #include <linux/mm.h> #include <asm/page.h> #include <xen/xencomm.h> #include <xen/interface/xen.h> #include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */ static int xencomm_init(struct xencomm_desc *desc, void *buffer, unsigned long bytes) { unsigned long recorded = 0; int i = 0; while ((recorded < bytes) && (i < desc->nr_addrs)) { unsigned long vaddr = (unsigned long)buffer + recorded; unsigned long paddr; int offset; int chunksz; offset = vaddr % PAGE_SIZE; /* handle partial pages */ chunksz = min(PAGE_SIZE - offset, bytes - recorded); paddr = xencomm_vtop(vaddr); if (paddr == ~0UL) { printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n", __func__, vaddr); return -EINVAL; } desc->address[i++] = paddr; recorded += chunksz; } if (recorded < bytes) { printk(KERN_DEBUG "%s: could only translate %ld of %ld bytes\n", __func__, recorded, bytes); return -ENOSPC; } /* mark remaining addresses invalid (just for safety) */ while (i < desc->nr_addrs) desc->address[i++] = XENCOMM_INVALID; desc->magic = XENCOMM_MAGIC; return 0; } static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask, void *buffer, unsigned long bytes) { struct xencomm_desc *desc; unsigned long buffer_ulong = (unsigned long)buffer; unsigned long start = buffer_ulong & PAGE_MASK; unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK; unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT; unsigned long size = sizeof(*desc) + sizeof(desc->address[0]) * nr_addrs; /* * slab allocator returns at least sizeof(void*) aligned pointer. * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might * cross page boundary. */ if (sizeof(*desc) > sizeof(void *)) { unsigned long order = get_order(size); desc = (struct xencomm_desc *)__get_free_pages(gfp_mask, order); if (desc == NULL) return NULL; desc->nr_addrs = ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) / sizeof(*desc->address); } else { desc = kmalloc(size, gfp_mask); if (desc == NULL) return NULL; desc->nr_addrs = nr_addrs; } return desc; } void xencomm_free(struct xencomm_handle *desc) { if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) { struct xencomm_desc *desc__ = (struct xencomm_desc *)desc; if (sizeof(*desc__) > sizeof(void *)) { unsigned long size = sizeof(*desc__) + sizeof(desc__->address[0]) * desc__->nr_addrs; unsigned long order = get_order(size); free_pages((unsigned long)__va(desc), order); } else kfree(__va(desc)); } } static int xencomm_create(void *buffer, unsigned long bytes, struct xencomm_desc **ret, gfp_t gfp_mask) { struct xencomm_desc *desc; int rc; pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes); if (bytes == 0) { /* don't create a descriptor; Xen recognizes NULL. */ BUG_ON(buffer != NULL); *ret = NULL; return 0; } BUG_ON(buffer == NULL); /* 'bytes' is non-zero */ desc = xencomm_alloc(gfp_mask, buffer, bytes); if (!desc) { printk(KERN_DEBUG "%s failure\n", "xencomm_alloc"); return -ENOMEM; } rc = xencomm_init(desc, buffer, bytes); if (rc) { printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc); xencomm_free((struct xencomm_handle *)__pa(desc)); return rc; } *ret = desc; return 0; } static struct xencomm_handle *xencomm_create_inline(void *ptr) { unsigned long paddr; BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr)); paddr = (unsigned long)xencomm_pa(ptr); BUG_ON(paddr & XENCOMM_INLINE_FLAG); return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG); } /* "mini" routine, for stack-based communications: */ static int xencomm_create_mini(void *buffer, unsigned long bytes, struct xencomm_mini *xc_desc, struct xencomm_desc **ret) { int rc = 0; struct xencomm_desc *desc; BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0); desc = (void *)xc_desc; desc->nr_addrs = XENCOMM_MINI_ADDRS; rc = xencomm_init(desc, buffer, bytes); if (!rc) *ret = desc; return rc; } struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes) { int rc; struct xencomm_desc *desc; if (xencomm_is_phys_contiguous((unsigned long)ptr)) return xencomm_create_inline(ptr); rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL); if (rc || desc == NULL) return NULL; return xencomm_pa(desc); } struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, struct xencomm_mini *xc_desc) { int rc; struct xencomm_desc *desc = NULL; if (xencomm_is_phys_contiguous((unsigned long)ptr)) return xencomm_create_inline(ptr); rc = xencomm_create_mini(ptr, bytes, xc_desc, &desc); if (rc) return NULL; return xencomm_pa(desc); }
yakantosat/linux-2.6.32
drivers/xen/xencomm.c
C
gpl-2.0
5,559
#include <linux/gpio/consumer.h> #include <linux/gpio/driver.h> #include <linux/gpio.h> #include "gpiolib.h" void gpio_free(unsigned gpio) { gpiod_free(gpio_to_desc(gpio)); } EXPORT_SYMBOL_GPL(gpio_free); /** * gpio_request_one - request a single GPIO with initial configuration * @gpio: the GPIO number * @flags: GPIO configuration as specified by GPIOF_* * @label: a literal description string of this GPIO */ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) { struct gpio_desc *desc; int err; desc = gpio_to_desc(gpio); /* Compatibility: assume unavailable "valid" GPIOs will appear later */ if (!desc && gpio_is_valid(gpio)) return -EPROBE_DEFER; err = gpiod_request(desc, label); if (err) return err; if (flags & GPIOF_OPEN_DRAIN) set_bit(FLAG_OPEN_DRAIN, &desc->flags); if (flags & GPIOF_OPEN_SOURCE) set_bit(FLAG_OPEN_SOURCE, &desc->flags); if (flags & GPIOF_ACTIVE_LOW) set_bit(FLAG_ACTIVE_LOW, &desc->flags); if (flags & GPIOF_DIR_IN) err = gpiod_direction_input(desc); else err = gpiod_direction_output_raw(desc, (flags & GPIOF_INIT_HIGH) ? 1 : 0); if (err) goto free_gpio; if (flags & GPIOF_EXPORT) { err = gpiod_export(desc, flags & GPIOF_EXPORT_CHANGEABLE); if (err) goto free_gpio; } return 0; free_gpio: gpiod_free(desc); return err; } EXPORT_SYMBOL_GPL(gpio_request_one); int gpio_request(unsigned gpio, const char *label) { struct gpio_desc *desc = gpio_to_desc(gpio); /* Compatibility: assume unavailable "valid" GPIOs will appear later */ if (!desc && gpio_is_valid(gpio)) return -EPROBE_DEFER; return gpiod_request(desc, label); } EXPORT_SYMBOL_GPL(gpio_request); /** * gpio_request_array - request multiple GPIOs in a single call * @array: array of the 'struct gpio' * @num: how many GPIOs in the array */ int gpio_request_array(const struct gpio *array, size_t num) { int i, err; for (i = 0; i < num; i++, array++) { err = gpio_request_one(array->gpio, array->flags, array->label); if (err) goto err_free; } return 0; err_free: while (i--) gpio_free((--array)->gpio); return err; } EXPORT_SYMBOL_GPL(gpio_request_array); /** * gpio_free_array - release multiple GPIOs in a single call * @array: array of the 'struct gpio' * @num: how many GPIOs in the array */ void gpio_free_array(const struct gpio *array, size_t num) { while (num--) gpio_free((array++)->gpio); } EXPORT_SYMBOL_GPL(gpio_free_array);
rajat1994/linux
drivers/gpio/gpiolib-legacy.c
C
gpl-2.0
2,457
/* * This program is used to generate definitions needed by * assembly language modules. * * We use the technique used in the OSF Mach kernel code: * generate asm statements containing #defines, * compile this file to assembler, and then extract the * #defines from the assembly-language output. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/suspend.h> #include <linux/hrtimer.h> #ifdef CONFIG_PPC64 #include <linux/time.h> #include <linux/hardirq.h> #endif #include <linux/kbuild.h> #include <asm/io.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/rtas.h> #include <asm/vdso_datapage.h> #ifdef CONFIG_PPC64 #include <asm/paca.h> #include <asm/lppaca.h> #include <asm/cache.h> #include <asm/compat.h> #include <asm/mmu.h> #include <asm/hvcall.h> #include <asm/xics.h> #endif #ifdef CONFIG_PPC_POWERNV #include <asm/opal.h> #endif #if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST) #include <linux/kvm_host.h> #endif #if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S) #include <asm/kvm_book3s.h> #endif #ifdef CONFIG_PPC32 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) #include "head_booke.h" #endif #endif #if defined(CONFIG_PPC_FSL_BOOK3E) #include "../mm/mmu_decl.h" #endif int main(void) { DEFINE(THREAD, offsetof(struct task_struct, thread)); DEFINE(MM, offsetof(struct task_struct, mm)); DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); #ifdef CONFIG_PPC64 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); DEFINE(SIGSEGV, SIGSEGV); DEFINE(NMI_MASK, NMI_MASK); DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr)); DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit)); #else DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); #endif /* CONFIG_PPC64 */ DEFINE(KSP, offsetof(struct thread_struct, ksp)); DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit)); DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); #ifdef CONFIG_BOOKE DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); #endif DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0])); DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr)); #ifdef CONFIG_ALTIVEC DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0])); DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr)); DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX DEFINE(THREAD_VSR0, offsetof(struct thread_struct, fpr)); DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr)); #endif /* CONFIG_VSX */ #ifdef CONFIG_PPC64 DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid)); #else /* CONFIG_PPC64 */ DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0)); #endif #ifdef CONFIG_SPE DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0])); DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc)); DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr)); DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); #endif /* CONFIG_SPE */ #endif /* CONFIG_PPC64 */ #ifdef CONFIG_KVM_BOOK3S_32_HANDLER DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); #endif DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); #ifdef CONFIG_PPC64 DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size)); DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size)); DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page)); DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size)); DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); /* paca */ DEFINE(PACA_SIZE, sizeof(struct paca_struct)); DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token)); DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); DEFINE(PACACURRENT, offsetof(struct paca_struct, __current)); DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr)); DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase)); DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened)); DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); #ifdef CONFIG_PPC_MM_SLICES DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, context.low_slices_psize)); DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct, context.high_slices_psize)); DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def)); #endif /* CONFIG_PPC_MM_SLICES */ #ifdef CONFIG_PPC_BOOK3E DEFINE(PACAPGD, offsetof(struct paca_struct, pgd)); DEFINE(PACA_KERNELPGD, offsetof(struct paca_struct, kernel_pgd)); DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); DEFINE(PACA_EXTLB, offsetof(struct paca_struct, extlb)); DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); DEFINE(PACA_EXCRIT, offsetof(struct paca_struct, excrit)); DEFINE(PACA_EXDBG, offsetof(struct paca_struct, exdbg)); DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack)); DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack)); DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack)); #endif /* CONFIG_PPC_BOOK3E */ #ifdef CONFIG_PPC_STD_MMU_64 DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real)); DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr)); DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp)); #ifdef CONFIG_PPC_MM_SLICES DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp)); #else DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp)); #endif /* CONFIG_PPC_MM_SLICES */ DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr)); DEFINE(SLBSHADOW_STACKVSID, offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); DEFINE(SLBSHADOW_STACKESID, offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use)); DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count)); DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); #endif /* CONFIG_PPC_STD_MMU_64 */ DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime)); DEFINE(PACA_STARTTIME_USER, offsetof(struct paca_struct, starttime_user)); DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost)); #endif /* CONFIG_PPC64 */ /* RTAS */ DEFINE(RTASBASE, offsetof(struct rtas_t, base)); DEFINE(RTASENTRY, offsetof(struct rtas_t, entry)); /* Interrupt register frame */ DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); #ifdef CONFIG_PPC64 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */ DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); /* hcall statistics */ DEFINE(HCALL_STAT_SIZE, sizeof(struct hcall_stats)); DEFINE(HCALL_STAT_CALLS, offsetof(struct hcall_stats, num_calls)); DEFINE(HCALL_STAT_TB, offsetof(struct hcall_stats, tb_total)); DEFINE(HCALL_STAT_PURR, offsetof(struct hcall_stats, purr_total)); #endif /* CONFIG_PPC64 */ DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0])); DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1])); DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2])); DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3])); DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4])); DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5])); DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6])); DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7])); DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8])); DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9])); DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10])); DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11])); DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12])); DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13])); #ifndef CONFIG_PPC64 DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14])); DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15])); DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16])); DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17])); DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18])); DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19])); DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20])); DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21])); DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22])); DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23])); DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24])); DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25])); DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26])); DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27])); DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28])); DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29])); DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30])); DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31])); #endif /* CONFIG_PPC64 */ /* * Note: these symbols include _ because they overlap with special * register names */ DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip)); DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr)); DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr)); DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link)); DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr)); DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer)); DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3)); DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result)); DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); #ifndef CONFIG_PPC64 DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq)); /* * The PowerPC 400-class & Book-E processors have neither the DAR * nor the DSISR SPRs. Hence, we overload them to hold the similar * DEAR and ESR SPRs for such processors. For critical interrupts * we use them to hold SRR0 and SRR1. */ DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); #else /* CONFIG_PPC64 */ DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe)); /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */ DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)); DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_PPC32) #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE); DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */ DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1)); DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2)); DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3)); DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6)); DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7)); DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0)); DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1)); DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0)); DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1)); DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0)); DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1)); DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit)); #endif #endif DEFINE(CLONE_VM, CLONE_VM); DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); #ifndef CONFIG_PPC64 DEFINE(MM_PGD, offsetof(struct mm_struct, pgd)); #endif /* ! CONFIG_PPC64 */ /* About the CPU features table */ DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore)); DEFINE(pbe_address, offsetof(struct pbe, address)); DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); DEFINE(pbe_next, offsetof(struct pbe, next)); #ifndef CONFIG_PPC64 DEFINE(TASK_SIZE, TASK_SIZE); DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); #endif /* ! CONFIG_PPC64 */ /* datapage offsets for use by vdso */ DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp)); DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec)); DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs)); DEFINE(CFG_STAMP_XSEC, offsetof(struct vdso_data, stamp_xsec)); DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count)); DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest)); DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32)); DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec)); DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime)); DEFINE(STAMP_SEC_FRAC, offsetof(struct vdso_data, stamp_sec_fraction)); DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size)); DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size)); DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size)); DEFINE(CFG_DCACHE_LOGBLOCKSZ, offsetof(struct vdso_data, dcache_log_block_size)); #ifdef CONFIG_PPC64 DEFINE(CFG_SYSCALL_MAP64, offsetof(struct vdso_data, syscall_map_64)); DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec)); DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec)); DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec)); DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec)); DEFINE(TSPC64_TV_SEC, offsetof(struct timespec, tv_sec)); DEFINE(TSPC64_TV_NSEC, offsetof(struct timespec, tv_nsec)); DEFINE(TSPC32_TV_SEC, offsetof(struct compat_timespec, tv_sec)); DEFINE(TSPC32_TV_NSEC, offsetof(struct compat_timespec, tv_nsec)); #else DEFINE(TVAL32_TV_SEC, offsetof(struct timeval, tv_sec)); DEFINE(TVAL32_TV_USEC, offsetof(struct timeval, tv_usec)); DEFINE(TSPC32_TV_SEC, offsetof(struct timespec, tv_sec)); DEFINE(TSPC32_TV_NSEC, offsetof(struct timespec, tv_nsec)); #endif /* timeval/timezone offsets for use by vdso */ DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); /* Other bits used by the vdso */ DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); #ifdef CONFIG_BUG DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); #endif DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE); DEFINE(PTE_SIZE, sizeof(pte_t)); #ifdef CONFIG_KVM DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr)); #ifdef CONFIG_ALTIVEC DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr)); DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr)); #endif #ifdef CONFIG_VSX DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr)); #endif DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); #ifdef CONFIG_KVM_BOOK3S_64_HV DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr)); DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0)); DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1)); DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0)); DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1)); DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); #endif DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4)); DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5)); DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6)); DEFINE(VCPU_SHARED_SPRG7, offsetof(struct kvm_vcpu_arch_shared, sprg7)); DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1)); DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0)); DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1)); DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2)); DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3)); DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4)); DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6)); /* book3s */ #ifdef CONFIG_KVM_BOOK3S_64_HV DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock)); DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter)); DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu)); DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr)); DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor)); DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); #endif #ifdef CONFIG_PPC_BOOK3S DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions)); DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded)); DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa)); DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu)); DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid)); DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - offsetof(struct kvmppc_vcpu_book3s, vcpu)); DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); #ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_KVM_BOOK3S_PR # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) #else # define SVCPU_FIELD(x, f) #endif # define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f)) #else /* 32-bit */ # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f)) # define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f)) #endif SVCPU_FIELD(SVCPU_CR, cr); SVCPU_FIELD(SVCPU_XER, xer); SVCPU_FIELD(SVCPU_CTR, ctr); SVCPU_FIELD(SVCPU_LR, lr); SVCPU_FIELD(SVCPU_PC, pc); SVCPU_FIELD(SVCPU_R0, gpr[0]); SVCPU_FIELD(SVCPU_R1, gpr[1]); SVCPU_FIELD(SVCPU_R2, gpr[2]); SVCPU_FIELD(SVCPU_R3, gpr[3]); SVCPU_FIELD(SVCPU_R4, gpr[4]); SVCPU_FIELD(SVCPU_R5, gpr[5]); SVCPU_FIELD(SVCPU_R6, gpr[6]); SVCPU_FIELD(SVCPU_R7, gpr[7]); SVCPU_FIELD(SVCPU_R8, gpr[8]); SVCPU_FIELD(SVCPU_R9, gpr[9]); SVCPU_FIELD(SVCPU_R10, gpr[10]); SVCPU_FIELD(SVCPU_R11, gpr[11]); SVCPU_FIELD(SVCPU_R12, gpr[12]); SVCPU_FIELD(SVCPU_R13, gpr[13]); SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr); SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar); SVCPU_FIELD(SVCPU_LAST_INST, last_inst); SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1); #ifdef CONFIG_PPC_BOOK3S_32 SVCPU_FIELD(SVCPU_SR, sr); #endif #ifdef CONFIG_PPC64 SVCPU_FIELD(SVCPU_SLB, slb); SVCPU_FIELD(SVCPU_SLB_MAX, slb_max); #endif HSTATE_FIELD(HSTATE_HOST_R1, host_r1); HSTATE_FIELD(HSTATE_HOST_R2, host_r2); HSTATE_FIELD(HSTATE_HOST_MSR, host_msr); HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); HSTATE_FIELD(HSTATE_NAPPING, napping); #ifdef CONFIG_KVM_BOOK3S_64_HV HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore); HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); HSTATE_FIELD(HSTATE_MMCR, host_mmcr); HSTATE_FIELD(HSTATE_PMC, host_pmc); HSTATE_FIELD(HSTATE_PURR, host_purr); HSTATE_FIELD(HSTATE_SPURR, host_spurr); HSTATE_FIELD(HSTATE_DSCR, host_dscr); HSTATE_FIELD(HSTATE_DABR, dabr); HSTATE_FIELD(HSTATE_DECEXP, dec_expires); DEFINE(IPI_PRIORITY, IPI_PRIORITY); #endif /* CONFIG_KVM_BOOK3S_64_HV */ #else /* CONFIG_PPC_BOOK3S */ DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); #endif /* CONFIG_PPC_BOOK3S */ #endif /* CONFIG_KVM */ #ifdef CONFIG_KVM_GUEST DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared, scratch1)); DEFINE(KVM_MAGIC_SCRATCH2, offsetof(struct kvm_vcpu_arch_shared, scratch2)); DEFINE(KVM_MAGIC_SCRATCH3, offsetof(struct kvm_vcpu_arch_shared, scratch3)); DEFINE(KVM_MAGIC_INT, offsetof(struct kvm_vcpu_arch_shared, int_pending)); DEFINE(KVM_MAGIC_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); DEFINE(KVM_MAGIC_CRITICAL, offsetof(struct kvm_vcpu_arch_shared, critical)); DEFINE(KVM_MAGIC_SR, offsetof(struct kvm_vcpu_arch_shared, sr)); #endif #ifdef CONFIG_44x DEFINE(PGD_T_LOG2, PGD_T_LOG2); DEFINE(PTE_T_LOG2, PTE_T_LOG2); #endif #ifdef CONFIG_PPC_FSL_BOOK3E DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0)); DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1)); DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2)); DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3)); DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7)); #endif #if defined(CONFIG_KVM) && defined(CONFIG_SPE) DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0])); DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc)); DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr)); DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr)); #endif #ifdef CONFIG_KVM_EXIT_TIMING DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, arch.timing_exit.tv32.tbu)); DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu, arch.timing_exit.tv32.tbl)); DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu, arch.timing_last_enter.tv32.tbu)); DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu, arch.timing_last_enter.tv32.tbl)); #endif #ifdef CONFIG_PPC_POWERNV DEFINE(OPAL_MC_GPR3, offsetof(struct opal_machine_check_event, gpr3)); DEFINE(OPAL_MC_SRR0, offsetof(struct opal_machine_check_event, srr0)); DEFINE(OPAL_MC_SRR1, offsetof(struct opal_machine_check_event, srr1)); DEFINE(PACA_OPAL_MC_EVT, offsetof(struct paca_struct, opal_mc_evt)); #endif return 0; }
MWisBest/omap
arch/powerpc/kernel/asm-offsets.c
C
gpl-2.0
28,849
/* * Export of symbols defined in assembler */ /* Tell string.h we don't want memcpy etc. as cpp defines */ #define EXPORT_SYMTAB_STROPS #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> #include <asm/checksum.h> #include <asm/uaccess.h> #include <asm/ftrace.h> /* string functions */ EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strncmp); /* mem* functions */ extern void *__memscan_zero(void *, size_t); extern void *__memscan_generic(void *, int, size_t); extern void *__bzero(void *, size_t); EXPORT_SYMBOL(memscan); EXPORT_SYMBOL(__memscan_zero); EXPORT_SYMBOL(__memscan_generic); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(__bzero); /* Networking helper routines. */ EXPORT_SYMBOL(csum_partial); #ifdef CONFIG_MCOUNT EXPORT_SYMBOL(_mcount); #endif /* * sparc */ #ifdef CONFIG_SPARC32 extern int __ashrdi3(int, int); extern int __ashldi3(int, int); extern int __lshrdi3(int, int); extern int __muldi3(int, int); extern int __divdi3(int, int); extern void (*__copy_1page)(void *, const void *); extern void (*bzero_1page)(void *); extern void ___rw_read_enter(void); extern void ___rw_read_try(void); extern void ___rw_read_exit(void); extern void ___rw_write_enter(void); /* Networking helper routines. */ EXPORT_SYMBOL(__csum_partial_copy_sparc_generic); /* Special internal versions of library functions. */ EXPORT_SYMBOL(__copy_1page); EXPORT_SYMBOL(__memmove); EXPORT_SYMBOL(bzero_1page); /* Moving data to/from/in userspace. */ EXPORT_SYMBOL(__copy_user); /* Used by asm/spinlock.h */ #ifdef CONFIG_SMP EXPORT_SYMBOL(___rw_read_enter); EXPORT_SYMBOL(___rw_read_try); EXPORT_SYMBOL(___rw_read_exit); EXPORT_SYMBOL(___rw_write_enter); #endif EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__muldi3); EXPORT_SYMBOL(__divdi3); #endif /* * sparc64 */ #ifdef CONFIG_SPARC64 /* Networking helper routines. */ EXPORT_SYMBOL(csum_partial_copy_nocheck); EXPORT_SYMBOL(__csum_partial_copy_from_user); EXPORT_SYMBOL(__csum_partial_copy_to_user); EXPORT_SYMBOL(ip_fast_csum); /* Moving data to/from/in userspace. */ EXPORT_SYMBOL(___copy_to_user); EXPORT_SYMBOL(___copy_from_user); EXPORT_SYMBOL(___copy_in_user); EXPORT_SYMBOL(__clear_user); /* Atomic counter implementation. */ EXPORT_SYMBOL(atomic_add); EXPORT_SYMBOL(atomic_add_ret); EXPORT_SYMBOL(atomic_sub); EXPORT_SYMBOL(atomic_sub_ret); EXPORT_SYMBOL(atomic64_add); EXPORT_SYMBOL(atomic64_add_ret); EXPORT_SYMBOL(atomic64_sub); EXPORT_SYMBOL(atomic64_sub_ret); EXPORT_SYMBOL(atomic64_dec_if_positive); /* Atomic bit operations. */ EXPORT_SYMBOL(test_and_set_bit); EXPORT_SYMBOL(test_and_clear_bit); EXPORT_SYMBOL(test_and_change_bit); EXPORT_SYMBOL(set_bit); EXPORT_SYMBOL(clear_bit); EXPORT_SYMBOL(change_bit); /* Special internal versions of library functions. */ EXPORT_SYMBOL(_clear_page); EXPORT_SYMBOL(clear_user_page); EXPORT_SYMBOL(copy_user_page); /* RAID code needs this */ void VISenter(void); EXPORT_SYMBOL(VISenter); /* CRYPTO code needs this */ void VISenterhalf(void); EXPORT_SYMBOL(VISenterhalf); extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, unsigned long *); extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *, unsigned long *, unsigned long *); extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *, unsigned long *, unsigned long *, unsigned long *); EXPORT_SYMBOL(xor_vis_2); EXPORT_SYMBOL(xor_vis_3); EXPORT_SYMBOL(xor_vis_4); EXPORT_SYMBOL(xor_vis_5); extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *); extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *, unsigned long *); extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *, unsigned long *, unsigned long *); extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *, unsigned long *, unsigned long *, unsigned long *); EXPORT_SYMBOL(xor_niagara_2); EXPORT_SYMBOL(xor_niagara_3); EXPORT_SYMBOL(xor_niagara_4); EXPORT_SYMBOL(xor_niagara_5); #endif
Team-Hydra/android_kernel_moto_shamu
arch/sparc/lib/ksyms.c
C
gpl-2.0
4,168
/* * Copyright 2012 Cisco Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/mempool.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <linux/kallsyms.h> #include "fnic_io.h" #include "fnic.h" unsigned int trace_max_pages; static int fnic_max_trace_entries; static unsigned long fnic_trace_buf_p; static DEFINE_SPINLOCK(fnic_trace_lock); static fnic_trace_dbg_t fnic_trace_entries; int fnic_tracing_enabled = 1; /* * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information * * Description: * This routine gets next available trace buffer entry location @wr_idx * from allocated trace buffer pages and give that memory location * to user to store the trace information. * * Return Value: * This routine returns pointer to next available trace entry * @fnic_buf_head for user to fill trace information. */ fnic_trace_data_t *fnic_trace_get_buf(void) { unsigned long fnic_buf_head; unsigned long flags; spin_lock_irqsave(&fnic_trace_lock, flags); /* * Get next available memory location for writing trace information * at @wr_idx and increment @wr_idx */ fnic_buf_head = fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx]; fnic_trace_entries.wr_idx++; /* * Verify if trace buffer is full then change wd_idx to * start from zero */ if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries) fnic_trace_entries.wr_idx = 0; /* * Verify if write index @wr_idx and read index @rd_idx are same then * increment @rd_idx to move to next entry in trace buffer */ if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) { fnic_trace_entries.rd_idx++; if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries) fnic_trace_entries.rd_idx = 0; } spin_unlock_irqrestore(&fnic_trace_lock, flags); return (fnic_trace_data_t *)fnic_buf_head; } /* * fnic_get_trace_data - Copy trace buffer to a memory file * @fnic_dbgfs_t: pointer to debugfs trace buffer * * Description: * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in * the log and process the log until the end of the buffer. Then it will gather * from the beginning of the log and process until the current entry @wr_idx. * * Return Value: * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t */ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt) { int rd_idx; int wr_idx; int len = 0; unsigned long flags; char str[KSYM_SYMBOL_LEN]; struct timespec val; fnic_trace_data_t *tbp; spin_lock_irqsave(&fnic_trace_lock, flags); rd_idx = fnic_trace_entries.rd_idx; wr_idx = fnic_trace_entries.wr_idx; if (wr_idx < rd_idx) { while (1) { /* Start from read index @rd_idx */ tbp = (fnic_trace_data_t *) fnic_trace_entries.page_offset[rd_idx]; if (!tbp) { spin_unlock_irqrestore(&fnic_trace_lock, flags); return 0; } /* Convert function pointer to function name */ if (sizeof(unsigned long) < 8) { sprint_symbol(str, tbp->fnaddr.low); jiffies_to_timespec(tbp->timestamp.low, &val); } else { sprint_symbol(str, tbp->fnaddr.val); jiffies_to_timespec(tbp->timestamp.val, &val); } /* * Dump trace buffer entry to memory file * and increment read index @rd_idx */ len += snprintf(fnic_dbgfs_prt->buffer + len, (trace_max_pages * PAGE_SIZE * 3) - len, "%16lu.%16lu %-50s %8x %8x %16llx %16llx " "%16llx %16llx %16llx\n", val.tv_sec, val.tv_nsec, str, tbp->host_no, tbp->tag, tbp->data[0], tbp->data[1], tbp->data[2], tbp->data[3], tbp->data[4]); rd_idx++; /* * If rd_idx is reached to maximum trace entries * then move rd_idx to zero */ if (rd_idx > (fnic_max_trace_entries-1)) rd_idx = 0; /* * Continure dumpping trace buffer entries into * memory file till rd_idx reaches write index */ if (rd_idx == wr_idx) break; } } else if (wr_idx > rd_idx) { while (1) { /* Start from read index @rd_idx */ tbp = (fnic_trace_data_t *) fnic_trace_entries.page_offset[rd_idx]; if (!tbp) { spin_unlock_irqrestore(&fnic_trace_lock, flags); return 0; } /* Convert function pointer to function name */ if (sizeof(unsigned long) < 8) { sprint_symbol(str, tbp->fnaddr.low); jiffies_to_timespec(tbp->timestamp.low, &val); } else { sprint_symbol(str, tbp->fnaddr.val); jiffies_to_timespec(tbp->timestamp.val, &val); } /* * Dump trace buffer entry to memory file * and increment read index @rd_idx */ len += snprintf(fnic_dbgfs_prt->buffer + len, (trace_max_pages * PAGE_SIZE * 3) - len, "%16lu.%16lu %-50s %8x %8x %16llx %16llx " "%16llx %16llx %16llx\n", val.tv_sec, val.tv_nsec, str, tbp->host_no, tbp->tag, tbp->data[0], tbp->data[1], tbp->data[2], tbp->data[3], tbp->data[4]); rd_idx++; /* * Continue dumpping trace buffer entries into * memory file till rd_idx reaches write index */ if (rd_idx == wr_idx) break; } } spin_unlock_irqrestore(&fnic_trace_lock, flags); return len; } /* * fnic_trace_buf_init - Initialize fnic trace buffer logging facility * * Description: * Initialize trace buffer data structure by allocating required memory and * setting page_offset information for every trace entry by adding trace entry * length to previous page_offset value. */ int fnic_trace_buf_init(void) { unsigned long fnic_buf_head; int i; int err = 0; trace_max_pages = fnic_trace_max_pages; fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/ FNIC_ENTRY_SIZE_BYTES; fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE)); if (!fnic_trace_buf_p) { printk(KERN_ERR PFX "Failed to allocate memory " "for fnic_trace_buf_p\n"); err = -ENOMEM; goto err_fnic_trace_buf_init; } memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE)); fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries * sizeof(unsigned long)); if (!fnic_trace_entries.page_offset) { printk(KERN_ERR PFX "Failed to allocate memory for" " page_offset\n"); if (fnic_trace_buf_p) { vfree((void *)fnic_trace_buf_p); fnic_trace_buf_p = 0; } err = -ENOMEM; goto err_fnic_trace_buf_init; } memset((void *)fnic_trace_entries.page_offset, 0, (fnic_max_trace_entries * sizeof(unsigned long))); fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0; fnic_buf_head = fnic_trace_buf_p; /* * Set page_offset field of fnic_trace_entries struct by * calculating memory location for every trace entry using * length of each trace entry */ for (i = 0; i < fnic_max_trace_entries; i++) { fnic_trace_entries.page_offset[i] = fnic_buf_head; fnic_buf_head += FNIC_ENTRY_SIZE_BYTES; } err = fnic_trace_debugfs_init(); if (err < 0) { printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n"); goto err_fnic_trace_debugfs_init; } printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n"); return err; err_fnic_trace_debugfs_init: fnic_trace_free(); err_fnic_trace_buf_init: return err; } /* * fnic_trace_free - Free memory of fnic trace data structures. */ void fnic_trace_free(void) { fnic_tracing_enabled = 0; fnic_trace_debugfs_terminate(); if (fnic_trace_entries.page_offset) { vfree((void *)fnic_trace_entries.page_offset); fnic_trace_entries.page_offset = NULL; } if (fnic_trace_buf_p) { vfree((void *)fnic_trace_buf_p); fnic_trace_buf_p = 0; } printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n"); }
arnavgosain/tomato
drivers/scsi/fnic/fnic_trace.c
C
gpl-2.0
8,325